machine_learning_workbench 0.3 → 0.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/bin/setup +1 -1
- data/examples/image_compression.rb +5 -5
- data/lib/machine_learning_workbench.rb +17 -1
- data/lib/machine_learning_workbench/compressor/vector_quantization.rb +5 -8
- data/lib/machine_learning_workbench/monkey.rb +105 -5
- data/lib/machine_learning_workbench/neural_network/base.rb +30 -45
- data/lib/machine_learning_workbench/neural_network/recurrent.rb +1 -1
- data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/base.rb +35 -22
- data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/bdnes.rb +16 -10
- data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/rnes.rb +24 -8
- data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/snes.rb +25 -12
- data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/xnes.rb +24 -12
- data/lib/machine_learning_workbench/tools/imaging.rb +23 -17
- data/lib/machine_learning_workbench/tools/normalization.rb +10 -9
- data/machine_learning_workbench.gemspec +8 -4
- metadata +22 -9
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 743cc4c65bda521785e00dc563b20fcd5660a6af
|
4
|
+
data.tar.gz: 80924d34dc550df7b25d565022f50297b8bb72d5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2ef96c2884a6f43304f0dba63239a7469b80c7be46c97af7c8de283a448a8ee049b55cdc98b0aa5bfde82fc024fde891c982ec621bb4cafcd6070814e8366976
|
7
|
+
data.tar.gz: 0b5571eb26babf90015deec8425dc0a9c75bebd47ef50b4927a131e86f5f0c9e6e828e3ee1b212597222b9aa7209cd54f6b3db7a9c8ec6afcceafe94016165a1
|
data/bin/setup
CHANGED
@@ -5,7 +5,8 @@ Img = MachineLearningWorkbench::Tools::Imaging
|
|
5
5
|
Norm = MachineLearningWorkbench::Tools::Normalization
|
6
6
|
|
7
7
|
ncentrs = 1
|
8
|
-
image_files = Dir[ENV['HOME']+'/jaffe/KA.HA*.png']
|
8
|
+
# image_files = Dir[ENV['HOME']+'/jaffe/KA.HA*.png']
|
9
|
+
image_files = Dir[ENV['HOME']+'/jaffe/*.png']
|
9
10
|
raise "Download the JAFFE dataset in your home dir" if image_files&.empty?
|
10
11
|
# ... and convert the `.tiff` in `.png`: `mogrify -format png jaffe/*.tiff`
|
11
12
|
centr_range = [-1, 1]
|
@@ -14,14 +15,13 @@ img_range = [0, 2**16-1]
|
|
14
15
|
|
15
16
|
puts "Loading images"
|
16
17
|
images = image_files.map do |fname|
|
17
|
-
|
18
|
-
ret = Norm.feature_scaling
|
18
|
+
ary = Img.narr_from_png fname, flat: true
|
19
|
+
ret = Norm.feature_scaling ary, from: img_range, to: centr_range
|
19
20
|
end
|
20
21
|
|
21
22
|
puts "Initializing VQ"
|
22
23
|
vq = VectorQuantization.new ncentrs: ncentrs,
|
23
|
-
dims: images.first.shape, lrate: 0.3,
|
24
|
-
dtype: images.first.dtype, vrange: centr_range
|
24
|
+
dims: images.first.shape, lrate: 0.3, vrange: centr_range
|
25
25
|
|
26
26
|
puts "Training"
|
27
27
|
vq.train images, debug: true
|
@@ -1,4 +1,20 @@
|
|
1
|
-
|
1
|
+
|
2
|
+
gpu = false # prepare for switching to GPUs
|
3
|
+
if gpu
|
4
|
+
require 'cumo/narray'
|
5
|
+
require 'cumo/linalg'
|
6
|
+
Xumo = Cumo
|
7
|
+
else
|
8
|
+
require 'numo/narray'
|
9
|
+
require 'numo/linalg'
|
10
|
+
# currently required for Ubuntu 16.04
|
11
|
+
# depends on openblas: `sudo apt install libopenblas-base`
|
12
|
+
Fiddle.dlopen("/usr/lib/libopenblas.so")
|
13
|
+
Numo::Linalg::Lapack.dlopen("/usr/lib/liblapacke.so.3")
|
14
|
+
Xumo = Numo
|
15
|
+
end
|
16
|
+
NArray = Xumo::DFloat # set a single data type across the WB for now
|
17
|
+
NMath = Xumo::NMath # shorthand for extended math module
|
2
18
|
|
3
19
|
module MachineLearningWorkbench
|
4
20
|
module Compressor
|
@@ -2,13 +2,12 @@ module MachineLearningWorkbench::Compressor
|
|
2
2
|
|
3
3
|
# Standard Vector Quantization
|
4
4
|
class VectorQuantization
|
5
|
-
attr_reader :ncentrs, :centrs, :dims, :vrange, :
|
5
|
+
attr_reader :ncentrs, :centrs, :dims, :vrange, :lrate, :rng, :ntrains
|
6
6
|
Verification = MachineLearningWorkbench::Tools::Verification
|
7
7
|
|
8
|
-
def initialize ncentrs:, dims:, vrange:,
|
8
|
+
def initialize ncentrs:, dims:, vrange:, lrate:, rseed: Random.new_seed
|
9
9
|
@rng = Random.new rseed
|
10
10
|
@ncentrs = ncentrs
|
11
|
-
@dtype = dtype
|
12
11
|
@dims = Array(dims)
|
13
12
|
check_lrate lrate # hack: so that we can overload it in online_vq
|
14
13
|
@lrate = lrate
|
@@ -32,15 +31,13 @@ module MachineLearningWorkbench::Compressor
|
|
32
31
|
|
33
32
|
# Creates a new (random) centroid
|
34
33
|
def new_centr
|
35
|
-
|
36
|
-
# NMatrix.new(dims, dtype: dtype) { rng.rand Range.new *vrange }
|
37
|
-
NMatrix.random dims, dtype: dtype
|
34
|
+
NArray.new(*dims).rand(*vrange)
|
38
35
|
end
|
39
36
|
|
40
37
|
# Computes similarities between vector and all centroids
|
41
38
|
def similarities vec
|
42
39
|
raise NotImplementedError if vec.shape.size > 1
|
43
|
-
centrs.map { |c| c.dot(vec)
|
40
|
+
centrs.map { |c| c.dot(vec) }
|
44
41
|
# require 'parallel'
|
45
42
|
# Parallel.map(centrs) { |c| c.dot(vec).first }
|
46
43
|
end
|
@@ -85,7 +82,7 @@ module MachineLearningWorkbench::Compressor
|
|
85
82
|
end
|
86
83
|
|
87
84
|
# Per-pixel errors in reconstructing vector
|
88
|
-
# @return [
|
85
|
+
# @return [NArray] residuals
|
89
86
|
def reconstr_error vec
|
90
87
|
reconstruction(vec) - vec
|
91
88
|
end
|
@@ -217,12 +217,112 @@ module MachineLearningWorkbench::Monkey
|
|
217
217
|
raise NotImplementedError, "There's no setter for the data pointer!"
|
218
218
|
end
|
219
219
|
end
|
220
|
+
|
221
|
+
module ToNArrayConvertible
|
222
|
+
def to_na
|
223
|
+
NArray[*self]
|
224
|
+
end
|
225
|
+
end
|
226
|
+
|
227
|
+
module NArrayOuterFlattable
|
228
|
+
# Flat-output generalized outer relationship. Same as `#outer`, but the
|
229
|
+
# result is a 2-dim matrix of the interactions between all the elements
|
230
|
+
# in `self` (as rows) and all the elements in `other` (as columns)
|
231
|
+
# @param other [NArray] other matrix
|
232
|
+
# @return [NArray]
|
233
|
+
def outer_flat other
|
234
|
+
# TODO: Numo::NArray should be able to implement this with `#outer` and some other
|
235
|
+
# function to flatten the right layer -- much faster
|
236
|
+
raise ArgumentError, "Need to pass an operand block" unless block_given?
|
237
|
+
self.class.zeros([self.size, other.size]).tap do |ret|
|
238
|
+
self.size.times do |r|
|
239
|
+
other.size.times do |c|
|
240
|
+
ret[r,c] = yield self[r], other[c]
|
241
|
+
end
|
242
|
+
end
|
243
|
+
end
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
module NArrayApproximatable
|
248
|
+
# Verifies if `self` and `other` are withing `epsilon` of each other.
|
249
|
+
# @param other [NArray]
|
250
|
+
# @param epsilon [NArray]
|
251
|
+
# @return [Boolean]
|
252
|
+
def approximates? other, epsilon=1e-5
|
253
|
+
((self - other).abs < epsilon).all?
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
module Invertable
|
258
|
+
# Inverses matrix
|
259
|
+
# @return [NArray]
|
260
|
+
def invert
|
261
|
+
Numo::Linalg.inv self
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
module Exponentiable
|
266
|
+
# Matrix exponential: `e**self` (not to be confused with `self**n`)
|
267
|
+
# @return [NArray]
|
268
|
+
def exponential
|
269
|
+
raise ArgumentError if ndim > 2
|
270
|
+
# special case: one-dimensional matrix: just exponentiate the values
|
271
|
+
return Numo::NMath.exp(self) if (ndim == 1) || shape.include?(1)
|
272
|
+
# at this point we need to validate it is a square matrix
|
273
|
+
raise ArgumentError unless shape.reduce(&:==)
|
274
|
+
|
275
|
+
# Eigenvalue decomposition method from `scipy/linalg/matfuncs.py#expm2` (deprecated)
|
276
|
+
# https://github.com/scipy/scipy/commit/236e0740ba951cb455ba8b6a306abb32740131cf
|
277
|
+
# s, vr = eig(A)
|
278
|
+
# vri = inv(vr)
|
279
|
+
# r = dot(dot(vr, diag(exp(s))), vri)
|
280
|
+
|
281
|
+
# TODO: this is a simple but outdated method, switch to Pade approximation
|
282
|
+
# https://github.com/scipy/scipy/blob/11509c4a98edded6c59423ac44ca1b7f28fba1fd/scipy/sparse/linalg/matfuncs.py#L557
|
283
|
+
|
284
|
+
# e_values, l_e_vectors, r_e_vectors_t = Numo::Linalg.svd self
|
285
|
+
evals, _wi, _vl, r_evecs = Numo::Linalg::Lapack.call(:geev, self, jobvl: false, jobvr: true)
|
286
|
+
r_evecs_t = r_evecs#.transpose
|
287
|
+
r_evecs_inv = r_evecs_t.invert
|
288
|
+
evals_exp_dmat = Numo::NMath.exp(evals).diag
|
289
|
+
|
290
|
+
# l_e_vectors.dot(e_vals_exp_dmat).dot(l_e_vectors.invert)#.transpose
|
291
|
+
r_evecs_t.dot(evals_exp_dmat).dot(r_evecs_inv)
|
292
|
+
end
|
293
|
+
end
|
294
|
+
|
295
|
+
module Mappable
|
296
|
+
# Maps along a NArray dimension, and returns NArray
|
297
|
+
# @return [NArray]
|
298
|
+
# NOTE: this indexing is not consistent with NArray, which uses 0 to indicate
|
299
|
+
# columns rather than the 0th dimension (rows)
|
300
|
+
def map dim=0
|
301
|
+
raise ArgumentError unless dim.kind_of?(Integer) && dim.between?(0,ndim)
|
302
|
+
# TODO: return iterator instead of raise
|
303
|
+
raise NotImplementedError unless block_given?
|
304
|
+
indices = [true]*ndim
|
305
|
+
ret = []
|
306
|
+
shape[dim].times.each do |i|
|
307
|
+
indices[dim] = i
|
308
|
+
ret << yield(self[*indices])
|
309
|
+
end
|
310
|
+
self.class[*ret]
|
311
|
+
end
|
312
|
+
end
|
313
|
+
|
220
314
|
end
|
221
315
|
|
222
316
|
Array.include MachineLearningWorkbench::Monkey::Dimensionable
|
223
|
-
NMatrix.extend MachineLearningWorkbench::Monkey::Buildable
|
224
|
-
require 'nmatrix/lapack_plugin' # loads whichever is installed between atlas and lapacke
|
225
|
-
NMatrix.include MachineLearningWorkbench::Monkey::AdvancelyOperationable
|
317
|
+
# NMatrix.extend MachineLearningWorkbench::Monkey::Buildable
|
318
|
+
# require 'nmatrix/lapack_plugin' # loads whichever is installed between atlas and lapacke
|
319
|
+
# NMatrix.include MachineLearningWorkbench::Monkey::AdvancelyOperationable
|
226
320
|
Numeric.include MachineLearningWorkbench::Monkey::NumericallyApproximatable
|
227
|
-
NMatrix.include MachineLearningWorkbench::Monkey::MatrixApproximatable
|
228
|
-
NMatrix.include MachineLearningWorkbench::Monkey::CPtrDumpable
|
321
|
+
# NMatrix.include MachineLearningWorkbench::Monkey::MatrixApproximatable
|
322
|
+
# NMatrix.include MachineLearningWorkbench::Monkey::CPtrDumpable
|
323
|
+
Array.include MachineLearningWorkbench::Monkey::ToNArrayConvertible
|
324
|
+
NArray.include MachineLearningWorkbench::Monkey::NArrayApproximatable
|
325
|
+
NArray.include MachineLearningWorkbench::Monkey::NArrayOuterFlattable
|
326
|
+
NArray.include MachineLearningWorkbench::Monkey::Exponentiable
|
327
|
+
NArray.include MachineLearningWorkbench::Monkey::Invertable
|
328
|
+
NArray.prepend MachineLearningWorkbench::Monkey::Mappable
|
@@ -7,51 +7,52 @@ module MachineLearningWorkbench::NeuralNetwork
|
|
7
7
|
# List of matrices, each being the weights
|
8
8
|
# connecting a layer's inputs (rows) to a layer's neurons (columns),
|
9
9
|
# hence its shape is `[ninputs, nneurs]`
|
10
|
-
# @return [Array<
|
10
|
+
# @return [Array<NArray>] list of weight matrices, each uniquely describing a layer
|
11
|
+
# TODO: return a NArray after the usage of `#map` is figured out
|
11
12
|
# @!attribute [r] state
|
12
13
|
# It's a list of one-dimensional matrices, each an input to a layer, plus the output layer's output. The first element is the input to the first layer of the network, which is composed of the network's input, possibly the first layer's activation on the last input (recursion), and a bias (fixed `1`). The second to but-last entries follow the same structure, but with the previous layer's output in place of the network's input. The last entry is the activation of the output layer, without additions since it's not used as an input by anyone.
|
13
|
-
#
|
14
|
+
# TODO: return a NArray after the usage of `#map` is figured out
|
15
|
+
# @return [Array<NArray>] current state of the network.
|
14
16
|
# @!attribute [r] act_fn
|
15
17
|
# activation function, common to all neurons (for now)
|
16
18
|
# @return [#call] activation function
|
17
19
|
# @!attribute [r] struct
|
18
20
|
# list of number of (inputs or) neurons in each layer
|
19
21
|
# @return [Array<Integer>] structure of the network
|
20
|
-
attr_reader :layers, :state, :act_fn, :
|
22
|
+
attr_reader :layers, :state, :act_fn, :act_fn_name, :struct
|
21
23
|
|
22
24
|
|
23
25
|
## Initialization
|
24
26
|
|
25
27
|
# @param struct [Array<Integer>] list of layer sizes
|
26
28
|
# @param act_fn [Symbol] choice of activation function for the neurons
|
27
|
-
|
28
|
-
def initialize struct, act_fn: nil, dtype: :float32
|
29
|
+
def initialize struct, act_fn: nil
|
29
30
|
@struct = struct
|
30
|
-
@
|
31
|
+
@act_fn_name = act_fn || :sigmoid
|
32
|
+
@act_fn = send(act_fn_name)
|
31
33
|
# @state holds both inputs, possibly recurrency, and bias
|
32
34
|
# it is a complete input for the next layer, hence size from layer sizes
|
33
35
|
@state = layer_row_sizes.collect do |size|
|
34
|
-
|
36
|
+
NArray.zeros [1, size]
|
35
37
|
end
|
36
38
|
# to this, append a matrix to hold the final network output
|
37
|
-
@state.push
|
39
|
+
@state.push NArray.zeros [1, nneurs(-1)]
|
38
40
|
reset_state
|
39
41
|
end
|
40
42
|
|
41
43
|
# Reset the network to the initial state
|
42
44
|
def reset_state
|
43
|
-
|
44
|
-
# reset
|
45
|
-
|
46
|
-
# add bias to all but output
|
47
|
-
m[0,-1] = 1 unless m.object_id == @state.last.object_id
|
45
|
+
state.each do |s|
|
46
|
+
s.fill 0 # reset state to zero
|
47
|
+
s[0,-1] = 1 # add bias
|
48
48
|
end
|
49
|
+
state[-1][0,-1] = 0 # last layer has no bias
|
49
50
|
end
|
50
51
|
|
51
52
|
# Initialize the network with random weights
|
52
53
|
def init_random
|
53
|
-
# Will only be used for testing, no sense optimizing it (
|
54
|
-
# Reusing
|
54
|
+
# Will only be used for testing, no sense optimizing it now (NArray#rand)
|
55
|
+
# Reusing `#load_weights` instead helps catching bugs
|
55
56
|
load_weights nweights.times.collect { rand(-1.0..1.0) }
|
56
57
|
end
|
57
58
|
|
@@ -90,7 +91,7 @@ module MachineLearningWorkbench::NeuralNetwork
|
|
90
91
|
# @return [Array] three-dimensional Array of weights: a list of weight
|
91
92
|
# matrices, one for each layer.
|
92
93
|
def weights
|
93
|
-
layers.collect(&:
|
94
|
+
layers.collect(&:to_a)
|
94
95
|
end
|
95
96
|
|
96
97
|
# Number of neurons per layer. Although this implementation includes inputs
|
@@ -126,10 +127,10 @@ module MachineLearningWorkbench::NeuralNetwork
|
|
126
127
|
def load_weights weights
|
127
128
|
raise ArgumentError unless weights.size == nweights
|
128
129
|
weights_iter = weights.each
|
129
|
-
@layers ||= layer_shapes.collect { |shape|
|
130
|
-
layers.each do |
|
131
|
-
|
132
|
-
|
130
|
+
@layers ||= layer_shapes.collect { |shape| NArray.zeros shape }
|
131
|
+
layers.each do |narr|
|
132
|
+
narr.each_with_index do |_val, *idxs|
|
133
|
+
narr[*idxs] = weights_iter.next
|
133
134
|
end
|
134
135
|
end
|
135
136
|
reset_state
|
@@ -139,11 +140,6 @@ module MachineLearningWorkbench::NeuralNetwork
|
|
139
140
|
|
140
141
|
## Activation
|
141
142
|
|
142
|
-
# The "fixed `1`" used in the layer's input
|
143
|
-
def bias
|
144
|
-
@bias ||= NMatrix[[1], dtype: dtype]
|
145
|
-
end
|
146
|
-
|
147
143
|
# Activate the network on a given input
|
148
144
|
# @param input [Array<Float>] the given input
|
149
145
|
# @return [Array] the activation of the output layer
|
@@ -153,9 +149,9 @@ module MachineLearningWorkbench::NeuralNetwork
|
|
153
149
|
# load input in first state
|
154
150
|
@state[0][0, 0..-2] = input
|
155
151
|
# activate layers in sequence
|
156
|
-
|
152
|
+
nlayers.times.each do |i|
|
157
153
|
act = activate_layer i
|
158
|
-
@state[i+1][0,0...act.size] = act
|
154
|
+
@state[i+1][0, 0...act.size] = act
|
159
155
|
end
|
160
156
|
return out
|
161
157
|
end
|
@@ -163,49 +159,38 @@ module MachineLearningWorkbench::NeuralNetwork
|
|
163
159
|
# Extract and convert the output layer's activation
|
164
160
|
# @return [Array] the activation of the output layer as 1-dim Array
|
165
161
|
def out
|
166
|
-
state.last.
|
162
|
+
state.last.to_a.flatten
|
167
163
|
end
|
168
164
|
|
169
165
|
# define #activate_layer in child class
|
170
166
|
|
171
167
|
## Activation functions
|
172
168
|
|
173
|
-
# Activation function caller. Allows to cleanly define the activation function as one-dimensional, by calling it over the inputs and building a NMatrix to return.
|
174
|
-
# @return [NMatrix] activations for one layer
|
175
|
-
def get_act_fn type, *args
|
176
|
-
fn = send(type,*args)
|
177
|
-
lambda do |inputs|
|
178
|
-
NMatrix.new([1, inputs.size], dtype: dtype) do |_,i|
|
179
|
-
# single-row matrix, indices are columns
|
180
|
-
fn.call inputs[i]
|
181
|
-
end
|
182
|
-
end
|
183
|
-
end
|
184
|
-
|
185
169
|
# Traditional sigmoid with variable steepness
|
186
170
|
def sigmoid k=0.5
|
187
171
|
# k is steepness: 0<k<1 is flatter, 1<k is flatter
|
188
172
|
# flatter makes activation less sensitive, better with large number of inputs
|
189
|
-
lambda { |x| 1.0 / (
|
173
|
+
lambda { |x| 1.0 / (Numo::NMath.exp(-k * x) + 1.0) }
|
190
174
|
end
|
191
175
|
|
192
176
|
# Traditional logistic
|
193
177
|
def logistic
|
194
178
|
lambda { |x|
|
195
|
-
exp =
|
196
|
-
exp.infinite? ? exp : exp / (1.0 + exp)
|
179
|
+
exp = Numo::NMath.exp(x)
|
180
|
+
# exp.infinite? ? exp : exp / (1.0 + exp)
|
181
|
+
exp / (1.0 + exp)
|
197
182
|
}
|
198
183
|
end
|
199
184
|
|
200
185
|
# LeCun hyperbolic activation
|
201
186
|
# @see http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf Section 4.4
|
202
187
|
def lecun_hyperbolic
|
203
|
-
lambda { |x| 1.7159 *
|
188
|
+
lambda { |x| 1.7159 * Numo::NMath.tanh(2.0*x/3.0) + 1e-3*x }
|
204
189
|
end
|
205
190
|
|
206
191
|
# Rectified Linear Unit (ReLU)
|
207
192
|
def relu
|
208
|
-
lambda { |x| x>0 && x ||
|
193
|
+
lambda { |x| (x>0).all? && x || x.class.zeros(x.shape) }
|
209
194
|
end
|
210
195
|
|
211
196
|
|
@@ -23,7 +23,7 @@ module MachineLearningWorkbench::NeuralNetwork
|
|
23
23
|
previous = nlay # index of previous layer (inputs)
|
24
24
|
current = nlay + 1 # index of current layer (outputs)
|
25
25
|
# Copy the level's last-time activation to the input (previous state)
|
26
|
-
#
|
26
|
+
# TODO: ranges in `NArray#[]` should be reliable, get rid of loop
|
27
27
|
nneurs(current).times do |i| # for each activations to copy
|
28
28
|
# Copy output from last-time activation to recurrency in previous state
|
29
29
|
@state[previous][0, nneurs(previous) + i] = state[current][0, i]
|
@@ -2,7 +2,7 @@
|
|
2
2
|
module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
3
3
|
# Natural Evolution Strategies base class
|
4
4
|
class Base
|
5
|
-
attr_reader :ndims, :mu, :sigma, :opt_type, :obj_fn, :parallel_fit, :
|
5
|
+
attr_reader :ndims, :mu, :sigma, :opt_type, :obj_fn, :parallel_fit, :eye, :rng, :last_fits, :best, :rescale_popsize, :rescale_lrate
|
6
6
|
|
7
7
|
# NES object initialization
|
8
8
|
# @param ndims [Integer] number of parameters to optimize
|
@@ -19,24 +19,23 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
19
19
|
# a single instance.
|
20
20
|
# @param rescale_popsize [Float] scaling for the default population size
|
21
21
|
# @param rescale_lrate [Float] scaling for the default learning rate
|
22
|
-
|
23
|
-
def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false, rescale_popsize: 1, rescale_lrate: 1, dtype: :float64
|
22
|
+
def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false, rescale_popsize: 1, rescale_lrate: 1
|
24
23
|
raise ArgumentError unless [:min, :max].include? opt_type
|
25
24
|
raise ArgumentError unless obj_fn.respond_to? :call
|
26
25
|
@ndims, @opt_type, @obj_fn, @parallel_fit = ndims, opt_type, obj_fn, parallel_fit
|
27
26
|
@rescale_popsize, @rescale_lrate = rescale_popsize, rescale_lrate
|
28
|
-
@
|
27
|
+
@eye = NArray.eye(ndims)
|
29
28
|
rseed ||= Random.new_seed
|
30
29
|
# puts "NES rseed: #{s}" # currently disabled
|
31
30
|
@rng = Random.new rseed
|
32
31
|
@best = [(opt_type==:max ? -1 : 1) * Float::INFINITY, nil]
|
33
32
|
@last_fits = []
|
34
|
-
@dtype = dtype
|
35
33
|
initialize_distribution mu_init: mu_init, sigma_init: sigma_init
|
36
34
|
end
|
37
35
|
|
38
36
|
# Box-Muller transform: generates standard (unit) normal distribution samples
|
39
37
|
# @return [Float] a single sample from a standard normal distribution
|
38
|
+
# @note Numo::NArray implements this :) glad to have switched!
|
40
39
|
def standard_normal_sample
|
41
40
|
rho = Math.sqrt(-2.0 * Math.log(rng.rand))
|
42
41
|
theta = 2 * Math::PI * rng.rand
|
@@ -53,7 +52,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
53
52
|
def lrate; @lrate ||= cmaes_lrate * rescale_lrate end
|
54
53
|
|
55
54
|
# Magic numbers from CMA-ES (TODO: add proper citation)
|
56
|
-
# @return [
|
55
|
+
# @return [NArray] scale-invariant utilities
|
57
56
|
def cmaes_utilities
|
58
57
|
# Algorithm equations are meant for fitness maximization
|
59
58
|
# Match utilities with individuals sorted by INCREASING fitness
|
@@ -63,7 +62,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
63
62
|
total = log_range.reduce(:+)
|
64
63
|
buf = 1.0/popsize
|
65
64
|
vals = log_range.collect { |v| v / total - buf }.reverse
|
66
|
-
|
65
|
+
NArray[vals]
|
67
66
|
end
|
68
67
|
|
69
68
|
# (see #cmaes_utilities)
|
@@ -78,21 +77,26 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
78
77
|
[5, 4 + (3*Math.log(ndims)).floor].max
|
79
78
|
end
|
80
79
|
|
81
|
-
# Samples a standard normal distribution to construct a
|
80
|
+
# Samples a standard normal distribution to construct a NArray of
|
82
81
|
# popsize multivariate samples of length ndims
|
83
|
-
# @return [
|
82
|
+
# @return [NArray] standard normal samples
|
83
|
+
# @note Numo::NArray implements this :) glad to have switched!
|
84
84
|
def standard_normal_samples
|
85
|
-
|
85
|
+
NArray.zeros([popsize, ndims]).tap do |ret|
|
86
|
+
ret.each_with_index { |_,*i| ret[*i] = standard_normal_sample }
|
87
|
+
end
|
86
88
|
end
|
87
89
|
|
88
90
|
# Move standard normal samples to current distribution
|
89
|
-
# @return [
|
91
|
+
# @return [NArray] individuals
|
90
92
|
def move_inds inds
|
91
93
|
# TODO: can we reduce the transpositions?
|
92
|
-
|
93
|
-
multi_mu = NMatrix[*inds.rows.times.collect {mu.to_a}, dtype: dtype].transpose
|
94
|
-
(multi_mu + sigma.dot(inds.transpose)).transpose
|
95
|
-
|
94
|
+
|
95
|
+
# multi_mu = NMatrix[*inds.rows.times.collect {mu.to_a}, dtype: dtype].transpose
|
96
|
+
# (multi_mu + sigma.dot(inds.transpose)).transpose
|
97
|
+
|
98
|
+
mu_tile = mu.tile(inds.shape.first, 1).transpose
|
99
|
+
(mu_tile + sigma.dot(inds.transpose)).transpose
|
96
100
|
end
|
97
101
|
|
98
102
|
# Sorted individuals
|
@@ -100,18 +104,27 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
100
104
|
# matched with individuals sorted by INCREASING fitness. Then reverse order for minimization.
|
101
105
|
# @return standard normal samples sorted by the respective individuals' fitnesses
|
102
106
|
def sorted_inds
|
103
|
-
samples = standard_normal_samples
|
104
|
-
|
107
|
+
# samples = standard_normal_samples # Numo::NArray implements the Box-Muller :)
|
108
|
+
samples = NArray.new([popsize, ndims]).rand_norm(0,1)
|
109
|
+
inds = move_inds(samples)
|
105
110
|
fits = parallel_fit ? obj_fn.call(inds) : inds.map(&obj_fn)
|
106
111
|
# Quick cure for NaN fitnesses
|
107
|
-
fits.map
|
112
|
+
fits.map { |x| x.nan? ? (opt_type==:max ? -1 : 1) * Float::INFINITY : x }
|
108
113
|
@last_fits = fits # allows checking for stagnation
|
109
|
-
|
110
|
-
sorted.
|
111
|
-
|
114
|
+
|
115
|
+
# sorted = [fits.to_a, inds, samples.to_a].transpose.sort_by(&:first)
|
116
|
+
# sorted.reverse! if opt_type==:min
|
117
|
+
# this_best = sorted.last.take(2)
|
118
|
+
# NArray[*sorted.map(&:last)]
|
119
|
+
|
120
|
+
sort_idxs = fits.sort_index
|
121
|
+
sort_idxs = sort_idxs.reverse if opt_type == :min
|
122
|
+
this_best = [fits[sort_idxs[-1]], inds[sort_idxs[-1]]]
|
123
|
+
|
112
124
|
opt_cmp_fn = opt_type==:min ? :< : :>
|
113
125
|
@best = this_best if this_best.first.send(opt_cmp_fn, best.first)
|
114
|
-
|
126
|
+
|
127
|
+
samples[sort_idxs,true]
|
115
128
|
end
|
116
129
|
|
117
130
|
# @!method interface_methods
|
@@ -46,8 +46,8 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
46
46
|
end.transpose
|
47
47
|
|
48
48
|
# Join the individuals for evaluation
|
49
|
-
full_inds = inds_lst.reduce
|
50
|
-
# Need to fix
|
49
|
+
full_inds = inds_lst.reduce { |mem, var| mem.concatenate var, axis: 1 }
|
50
|
+
# Need to fix sample dimensions for sorting
|
51
51
|
# - current dims: nblocks x ninds x [block sizes]
|
52
52
|
# - for sorting: ninds x nblocks x [block sizes]
|
53
53
|
full_samples = samples_lst.transpose
|
@@ -55,24 +55,30 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
55
55
|
# Evaluate fitness of complete individuals
|
56
56
|
fits = parallel_fit ? obj_fn.call(full_inds) : full_inds.map(&obj_fn)
|
57
57
|
# Quick cure for NaN fitnesses
|
58
|
-
fits.map
|
58
|
+
fits.map { |x| x.nan? ? (opt_type==:max ? -1 : 1) * Float::INFINITY : x }
|
59
59
|
@last_fits = fits # allows checking for stagnation
|
60
60
|
|
61
61
|
# Sort inds based on fit and opt_type, save best
|
62
|
-
sorted = [fits, full_inds, full_samples].transpose.sort_by(&:first)
|
63
|
-
sorted.reverse! if opt_type==:min
|
64
|
-
this_best = sorted.last.take(2)
|
62
|
+
# sorted = [fits, full_inds, full_samples].transpose.sort_by(&:first)
|
63
|
+
# sorted.reverse! if opt_type==:min
|
64
|
+
# this_best = sorted.last.take(2)
|
65
|
+
# opt_cmp_fn = opt_type==:min ? :< : :>
|
66
|
+
# @best = this_best if this_best.first.send(opt_cmp_fn, best.first)
|
67
|
+
# sorted_samples = sorted.map(&:last)
|
68
|
+
sort_idxs = fits.sort_index
|
69
|
+
sort_idxs = sort_idxs.reverse if opt_type == :min
|
70
|
+
this_best = [fits[sort_idxs[-1]], full_inds[sort_idxs[-1]]]
|
65
71
|
opt_cmp_fn = opt_type==:min ? :< : :>
|
66
72
|
@best = this_best if this_best.first.send(opt_cmp_fn, best.first)
|
67
|
-
sorted_samples =
|
73
|
+
sorted_samples = full_samples.values_at *sort_idxs
|
68
74
|
|
69
75
|
# Need to bring back sample dimensions for each block
|
70
76
|
# - current dims: ninds x nblocks x [block sizes]
|
71
77
|
# - target blocks list: nblocks x ninds x [block sizes]
|
72
78
|
block_samples = sorted_samples.transpose
|
73
79
|
|
74
|
-
# then back to
|
75
|
-
block_samples.map
|
80
|
+
# then back to NArray for usage in training
|
81
|
+
block_samples.map &:to_na
|
76
82
|
end
|
77
83
|
|
78
84
|
# duck-type the interface: [:train, :mu, :convergence, :save, :load]
|
@@ -84,7 +90,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
84
90
|
end
|
85
91
|
|
86
92
|
def mu
|
87
|
-
blocks.map(&:mu).reduce
|
93
|
+
blocks.map(&:mu).reduce { |mem, var| mem.concatenate var, axis: 1 }
|
88
94
|
end
|
89
95
|
|
90
96
|
def convergence
|
@@ -5,18 +5,34 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
5
5
|
attr_reader :variance
|
6
6
|
|
7
7
|
def initialize_distribution mu_init: 0, sigma_init: 1
|
8
|
-
@mu =
|
9
|
-
|
8
|
+
@mu = case mu_init
|
9
|
+
when Array
|
10
|
+
raise ArgumentError unless mu_init.size == ndims
|
11
|
+
NArray[mu_init]
|
12
|
+
when Numeric
|
13
|
+
NArray.new([1,ndims]).fill mu_init
|
14
|
+
else
|
15
|
+
raise ArgumentError, "Something is wrong with mu_init: #{mu_init}"
|
16
|
+
end
|
10
17
|
@variance = sigma_init
|
11
|
-
@sigma =
|
18
|
+
@sigma = case sigma_init
|
19
|
+
when Array
|
20
|
+
raise ArgumentError "RNES uses single global variance"
|
21
|
+
when Numeric
|
22
|
+
NArray.new([ndims]).fill(variance).diag
|
23
|
+
else
|
24
|
+
raise ArgumentError, "Something is wrong with sigma_init: #{sigma_init}"
|
25
|
+
end
|
12
26
|
end
|
13
27
|
|
14
28
|
def train picks: sorted_inds
|
15
29
|
g_mu = utils.dot(picks)
|
16
|
-
g_sigma = utils.dot(picks.row_norms**2 - ndims).first # back to scalar
|
30
|
+
# g_sigma = utils.dot(picks.row_norms**2 - ndims).first # back to scalar
|
31
|
+
row_norms = Numo::Linalg.norm picks, 2, axis:1
|
32
|
+
g_sigma = utils.dot(row_norms**2 - ndims)[0] # back to scalar
|
17
33
|
@mu += sigma.dot(g_mu.transpose).transpose * lrate
|
18
34
|
@variance *= Math.exp(g_sigma * lrate / 2)
|
19
|
-
@sigma =
|
35
|
+
@sigma = NArray.new([ndims]).fill(variance).diag
|
20
36
|
end
|
21
37
|
|
22
38
|
# Estimate algorithm convergence based on variance
|
@@ -25,14 +41,14 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
25
41
|
end
|
26
42
|
|
27
43
|
def save
|
28
|
-
[mu.
|
44
|
+
[mu.to_a, variance]
|
29
45
|
end
|
30
46
|
|
31
47
|
def load data
|
32
48
|
raise ArgumentError unless data.size == 2
|
33
49
|
mu_ary, @variance = data
|
34
|
-
@mu =
|
35
|
-
@sigma =
|
50
|
+
@mu = mu_ary.to_na
|
51
|
+
@sigma = eye * variance
|
36
52
|
end
|
37
53
|
end
|
38
54
|
end
|
@@ -6,35 +6,48 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
6
6
|
attr_reader :variances
|
7
7
|
|
8
8
|
def initialize_distribution mu_init: 0, sigma_init: 1
|
9
|
-
@mu =
|
10
|
-
|
11
|
-
|
12
|
-
|
9
|
+
@mu = case mu_init
|
10
|
+
when Array
|
11
|
+
raise ArgumentError unless mu_init.size == ndims
|
12
|
+
NArray[mu_init]
|
13
|
+
when Numeric
|
14
|
+
NArray.new([1,ndims]).fill mu_init
|
15
|
+
else
|
16
|
+
raise ArgumentError, "Something is wrong with mu_init: #{mu_init}"
|
17
|
+
end
|
18
|
+
@variances = case sigma_init
|
19
|
+
when Array
|
20
|
+
raise ArgumentError unless sigma_init.size == ndims
|
21
|
+
NArray[*sigma_init]
|
22
|
+
when Numeric
|
23
|
+
NArray.new([ndims]).fill(sigma_init)
|
24
|
+
else
|
25
|
+
raise ArgumentError, "Something is wrong with sigma_init: #{sigma_init}"
|
26
|
+
end
|
27
|
+
@sigma = @variances.diag
|
13
28
|
end
|
14
29
|
|
15
30
|
def train picks: sorted_inds
|
16
31
|
g_mu = utils.dot(picks)
|
17
32
|
g_sigma = utils.dot(picks**2 - 1)
|
18
33
|
@mu += sigma.dot(g_mu.transpose).transpose * lrate
|
19
|
-
@variances *= (g_sigma * lrate / 2).exponential
|
20
|
-
@sigma =
|
34
|
+
@variances *= (g_sigma * lrate / 2).exponential.flatten
|
35
|
+
@sigma = @variances.diag
|
21
36
|
end
|
22
37
|
|
23
38
|
# Estimate algorithm convergence as total variance
|
24
39
|
def convergence
|
25
|
-
variances.
|
40
|
+
variances.sum
|
26
41
|
end
|
27
42
|
|
28
43
|
def save
|
29
|
-
[mu.
|
44
|
+
[mu.to_a, variances.to_a]
|
30
45
|
end
|
31
46
|
|
32
47
|
def load data
|
33
48
|
raise ArgumentError unless data.size == 2
|
34
|
-
|
35
|
-
@
|
36
|
-
@variances = NMatrix[*variances_ary, dtype: dtype]
|
37
|
-
@sigma = NMatrix.diagonal(variances, dtype: dtype)
|
49
|
+
@mu, @variances = data.map &:to_na
|
50
|
+
@sigma = variances.diag
|
38
51
|
end
|
39
52
|
end
|
40
53
|
end
|
@@ -5,21 +5,35 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
5
5
|
attr_reader :log_sigma
|
6
6
|
|
7
7
|
def initialize_distribution mu_init: 0, sigma_init: 1
|
8
|
-
@mu =
|
9
|
-
|
10
|
-
|
8
|
+
@mu = case mu_init
|
9
|
+
when Array
|
10
|
+
raise ArgumentError unless mu_init.size == ndims
|
11
|
+
NArray[mu_init]
|
12
|
+
when Numeric
|
13
|
+
NArray.new([1,ndims]).fill mu_init
|
14
|
+
else
|
15
|
+
raise ArgumentError, "Something is wrong with mu_init: #{mu_init}"
|
16
|
+
end
|
17
|
+
@sigma = case sigma_init
|
18
|
+
when Array
|
19
|
+
raise ArgumentError unless sigma_init.size == ndims
|
20
|
+
NArray[*sigma_init].diag
|
21
|
+
when Numeric
|
22
|
+
NArray.new([ndims]).fill(sigma_init).diag
|
23
|
+
else
|
24
|
+
raise ArgumentError, "Something is wrong with sigma_init: #{sigma_init}"
|
25
|
+
end
|
11
26
|
# Works with the log of sigma to avoid continuous decompositions (thanks Sun Yi)
|
12
|
-
|
13
|
-
@log_sigma = NMatrix.diag(log_sigma_init, dtype: dtype)
|
27
|
+
@log_sigma = Numo::NMath.log(sigma.diagonal).diag
|
14
28
|
end
|
15
29
|
|
16
30
|
def train picks: sorted_inds
|
17
31
|
g_mu = utils.dot(picks)
|
18
|
-
g_log_sigma = popsize.times.inject(
|
32
|
+
g_log_sigma = popsize.times.inject(NArray.zeros sigma.shape) do |sum, i|
|
19
33
|
u = utils[i]
|
20
|
-
ind = picks
|
34
|
+
ind = picks[i, true]
|
21
35
|
ind_sq = ind.outer_flat(ind, &:*)
|
22
|
-
sum + (ind_sq -
|
36
|
+
sum + (ind_sq - eye) * u
|
23
37
|
end
|
24
38
|
@mu += sigma.dot(g_mu.transpose).transpose * lrate
|
25
39
|
@log_sigma += g_log_sigma * (lrate/2)
|
@@ -32,14 +46,12 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
32
46
|
end
|
33
47
|
|
34
48
|
def save
|
35
|
-
[mu.
|
49
|
+
[mu.to_a, log_sigma.to_a]
|
36
50
|
end
|
37
51
|
|
38
52
|
def load data
|
39
53
|
raise ArgumentError unless data.size == 2
|
40
|
-
|
41
|
-
@mu = NMatrix[*mu_ary, dtype: dtype]
|
42
|
-
@log_sigma = NMatrix[*log_sigma_ary, dtype: dtype]
|
54
|
+
@mu, @log_sigma = data.map &:to_na
|
43
55
|
@sigma = log_sigma.exponential
|
44
56
|
end
|
45
57
|
end
|
@@ -3,26 +3,32 @@ module MachineLearningWorkbench::Tools
|
|
3
3
|
Forkable = MachineLearningWorkbench::Tools::Execution
|
4
4
|
Norm = MachineLearningWorkbench::Tools::Normalization
|
5
5
|
|
6
|
-
# Create RMagick::Image from
|
7
|
-
|
8
|
-
|
6
|
+
# Create RMagick::Image from numeric matrix data
|
7
|
+
# @param narr [NArray] numeric matrix to display
|
8
|
+
# @param shape [Array<Integer>] optional reshaping
|
9
|
+
def self.narr_to_img narr, shape: nil
|
10
|
+
shape ||= narr.shape
|
9
11
|
shape = [1, shape] if shape.kind_of?(Integer) || shape.size == 1
|
10
12
|
# `Image::constitute` requires Float pixels to be in [0,1]
|
11
|
-
pixels = Norm.feature_scaling
|
12
|
-
Magick::Image.constitute *shape, "I", pixels.
|
13
|
+
pixels = Norm.feature_scaling narr, to: [0,1]
|
14
|
+
Magick::Image.constitute *shape, "I", pixels.to_a.flatten
|
13
15
|
end
|
14
16
|
|
15
|
-
# Create PNG file from
|
16
|
-
|
17
|
-
|
17
|
+
# Create PNG file from numeric matrix data
|
18
|
+
# @param narr [NArray] numeric matrix to display
|
19
|
+
# @param fname [String] path to save PNG
|
20
|
+
# @param shape [Array<Integer>] optional reshaping before saving
|
21
|
+
def self.narr_to_png narr, fname, shape: nil
|
22
|
+
narr_to_img(narr, shape: shape).write fname
|
18
23
|
end
|
19
24
|
|
20
|
-
# Show a
|
25
|
+
# Show a numeric matrix as image in a RMagick window
|
26
|
+
# @param narr [NArray] numeric matrix to display
|
21
27
|
# @param disp_size [Array] the size of the image to display
|
22
|
-
# @param shape [Array] the true shape of the image (
|
28
|
+
# @param shape [Array] the true shape of the image (numeric matrix could be flattened)
|
23
29
|
# @param in_fork [bool] whether to execute the display in fork (and continue running)
|
24
|
-
def self.display
|
25
|
-
img =
|
30
|
+
def self.display narr, disp_size: nil, shape: nil, in_fork: true
|
31
|
+
img = narr_to_img narr, shape: shape
|
26
32
|
img.resize!(*disp_size, Magick::TriangleFilter,0.51) if disp_size
|
27
33
|
if in_fork
|
28
34
|
MachineLearningWorkbench::Tools::Execution.in_fork { img.display }
|
@@ -31,19 +37,19 @@ module MachineLearningWorkbench::Tools
|
|
31
37
|
end
|
32
38
|
end
|
33
39
|
|
34
|
-
# Create
|
40
|
+
# Create numeric matrix from png by filename.
|
35
41
|
# @param fname the file name
|
36
42
|
# @param scale optional rescaling of the image
|
37
43
|
# @param flat [bool] whether to return a flat array
|
38
|
-
# @param dtype dtype for the
|
39
|
-
def self.
|
44
|
+
# @param dtype dtype for the numeric matrix, leave `nil` for automatic detection
|
45
|
+
def self.narr_from_png fname, scale: nil, flat: false
|
40
46
|
img = Magick::ImageList.new(fname).first
|
41
47
|
img.scale!(scale) if scale
|
42
48
|
shape = [img.columns, img.rows]
|
43
49
|
pixels = img.export_pixels(0, 0, *shape, 'I') # 'I' for intensity
|
44
50
|
raise "Sanity check" unless shape.reduce(:*)==pixels.size
|
45
|
-
return pixels.
|
46
|
-
|
51
|
+
return pixels.to_na if flat
|
52
|
+
pixels.to_na.to_dimensions shape
|
47
53
|
end
|
48
54
|
end
|
49
55
|
end
|
@@ -1,22 +1,23 @@
|
|
1
1
|
module MachineLearningWorkbench::Tools
|
2
2
|
module Normalization
|
3
|
-
def self.feature_scaling
|
4
|
-
from ||=
|
3
|
+
def self.feature_scaling narr, from: nil, to: [0,1]
|
4
|
+
from ||= narr.minmax
|
5
5
|
old_min, old_max = from
|
6
6
|
new_min, new_max = to
|
7
|
-
(
|
7
|
+
(narr-old_min)*(new_max-new_min)/(old_max-old_min)+new_min
|
8
8
|
end
|
9
9
|
|
10
10
|
# @param per_column [bool] wheather to compute stats per-column or matrix-wise
|
11
|
-
def self.z_score
|
11
|
+
def self.z_score narr, per_column: true
|
12
12
|
raise NotImplementedError unless per_column
|
13
|
-
|
14
|
-
|
13
|
+
raise "this would be a good time to test this implementation"
|
14
|
+
means = narr.mean
|
15
|
+
stddevs = narr.std
|
15
16
|
# address edge case of zero variance
|
16
17
|
stddevs.map! { |v| v.zero? ? 1 : v }
|
17
|
-
mean_mat = means.repeat
|
18
|
-
stddev_mat = stddevs.repeat
|
19
|
-
(
|
18
|
+
mean_mat = means.repeat narr.rows, 0
|
19
|
+
stddev_mat = stddevs.repeat narr.rows, 0
|
20
|
+
(narr - mean_mat) / stddev_mat
|
20
21
|
end
|
21
22
|
end
|
22
23
|
end
|
@@ -9,8 +9,7 @@ Gem::Specification.new do |spec|
|
|
9
9
|
spec.email = "giuseppe.cuccu@gmail.com"
|
10
10
|
|
11
11
|
spec.summary = %q[Workbench for practical machine learning in Ruby.]
|
12
|
-
spec.description = %q[
|
13
|
-
This workbench holds a collection of machine learning
|
12
|
+
spec.description = %q[This workbench holds a collection of machine learning
|
14
13
|
methods in Ruby. Rather than specializing on a single task or method, this
|
15
14
|
gem aims at providing an encompassing framework for any machine learning
|
16
15
|
application.].gsub(' ', '')
|
@@ -44,7 +43,12 @@ Gem::Specification.new do |spec|
|
|
44
43
|
spec.add_development_dependency "pry-stack_explorer", "~> 0.4"
|
45
44
|
|
46
45
|
# Run
|
47
|
-
spec.requirements << "
|
48
|
-
spec.add_dependency "
|
46
|
+
spec.requirements << "libopenblas-base" # library for following dependency
|
47
|
+
spec.add_dependency "numo-linalg"
|
49
48
|
spec.add_dependency "parallel", "~> 1.12"
|
49
|
+
|
50
|
+
|
51
|
+
|
52
|
+
# DELETEME
|
53
|
+
spec.add_dependency "nmatrix-atlas"
|
50
54
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: machine_learning_workbench
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 0.4.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Giuseppe Cuccu
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-03-
|
11
|
+
date: 2018-03-25 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -123,19 +123,19 @@ dependencies:
|
|
123
123
|
- !ruby/object:Gem::Version
|
124
124
|
version: '0.4'
|
125
125
|
- !ruby/object:Gem::Dependency
|
126
|
-
name:
|
126
|
+
name: numo-linalg
|
127
127
|
requirement: !ruby/object:Gem::Requirement
|
128
128
|
requirements:
|
129
|
-
- - "
|
129
|
+
- - ">="
|
130
130
|
- !ruby/object:Gem::Version
|
131
|
-
version: '0
|
131
|
+
version: '0'
|
132
132
|
type: :runtime
|
133
133
|
prerelease: false
|
134
134
|
version_requirements: !ruby/object:Gem::Requirement
|
135
135
|
requirements:
|
136
|
-
- - "
|
136
|
+
- - ">="
|
137
137
|
- !ruby/object:Gem::Version
|
138
|
-
version: '0
|
138
|
+
version: '0'
|
139
139
|
- !ruby/object:Gem::Dependency
|
140
140
|
name: parallel
|
141
141
|
requirement: !ruby/object:Gem::Requirement
|
@@ -150,8 +150,21 @@ dependencies:
|
|
150
150
|
- - "~>"
|
151
151
|
- !ruby/object:Gem::Version
|
152
152
|
version: '1.12'
|
153
|
+
- !ruby/object:Gem::Dependency
|
154
|
+
name: nmatrix-atlas
|
155
|
+
requirement: !ruby/object:Gem::Requirement
|
156
|
+
requirements:
|
157
|
+
- - ">="
|
158
|
+
- !ruby/object:Gem::Version
|
159
|
+
version: '0'
|
160
|
+
type: :runtime
|
161
|
+
prerelease: false
|
162
|
+
version_requirements: !ruby/object:Gem::Requirement
|
163
|
+
requirements:
|
164
|
+
- - ">="
|
165
|
+
- !ruby/object:Gem::Version
|
166
|
+
version: '0'
|
153
167
|
description: |-
|
154
|
-
\
|
155
168
|
This workbench holds a collection of machine learning
|
156
169
|
methods in Ruby. Rather than specializing on a single task or method, this
|
157
170
|
gem aims at providing an encompassing framework for any machine learning
|
@@ -219,7 +232,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
219
232
|
- !ruby/object:Gem::Version
|
220
233
|
version: '0'
|
221
234
|
requirements:
|
222
|
-
-
|
235
|
+
- libopenblas-base
|
223
236
|
rubyforge_project:
|
224
237
|
rubygems_version: 2.6.13
|
225
238
|
signing_key:
|