machine_learning_workbench 0.6.1 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -1
  3. data/Rakefile +2 -0
  4. data/examples/image_compression.rb +4 -0
  5. data/examples/neuroevolution.rb +4 -0
  6. data/lib/machine_learning_workbench.rb +1 -0
  7. data/lib/machine_learning_workbench/compressor.rb +3 -0
  8. data/lib/machine_learning_workbench/compressor/copy_vq.rb +2 -0
  9. data/lib/machine_learning_workbench/compressor/decaying_learning_rate_vq.rb +2 -0
  10. data/lib/machine_learning_workbench/compressor/incr_dict_vq.rb +45 -0
  11. data/lib/machine_learning_workbench/compressor/vector_quantization.rb +23 -4
  12. data/lib/machine_learning_workbench/monkey.rb +1 -0
  13. data/lib/machine_learning_workbench/neural_network.rb +2 -0
  14. data/lib/machine_learning_workbench/neural_network/base.rb +6 -5
  15. data/lib/machine_learning_workbench/neural_network/feed_forward.rb +1 -0
  16. data/lib/machine_learning_workbench/neural_network/recurrent.rb +1 -0
  17. data/lib/machine_learning_workbench/optimizer.rb +2 -0
  18. data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/base.rb +13 -6
  19. data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/bdnes.rb +28 -7
  20. data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/fnes.rb +1 -0
  21. data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/rnes.rb +1 -0
  22. data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/snes.rb +1 -0
  23. data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/xnes.rb +14 -7
  24. data/lib/machine_learning_workbench/systems.rb +2 -0
  25. data/lib/machine_learning_workbench/systems/neuroevolution.rb +1 -0
  26. data/lib/machine_learning_workbench/tools.rb +2 -0
  27. data/lib/machine_learning_workbench/tools/execution.rb +2 -0
  28. data/lib/machine_learning_workbench/tools/imaging.rb +2 -0
  29. data/lib/machine_learning_workbench/tools/normalization.rb +2 -0
  30. data/lib/machine_learning_workbench/tools/verification.rb +2 -0
  31. data/machine_learning_workbench.gemspec +1 -0
  32. metadata +3 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: aa6944de3c6c7e7ef318e456aaf2479ff577773b
4
- data.tar.gz: dce74b8e349c4f15e6a65c32805e8fa6f7e95253
3
+ metadata.gz: 8897ba173dbfa944cf55b3ca7b57eb3af87bbff7
4
+ data.tar.gz: 44883310f216b187d5d3ccce669e85f946e6ee5f
5
5
  SHA512:
6
- metadata.gz: 86858d8e37e499ad296476a92afbaf59ecee6edf9c499c8d1e786ec25d39d676f5852f9faa7f14ed5afeac8f9b8d92a3fd478adb88d85d5d03acb3ca9ac804c6
7
- data.tar.gz: 5bd9161716409f4470d4b5d11aad78c9cf907c2acde8486928d3622f175c20f7fb608e27d33b7ede9fcb2ab9339fc95d8d0f6f4d6940c029793e7a854cfa770d
6
+ metadata.gz: 75d8a1f4d2087746dae316ca47c07925858826bdf393eddde7bb2f82e22b47e2d9c2c6bbaa6e0ce10fea1ccb0b9df1882e80b58519a2107f59732d6f99ea1a76
7
+ data.tar.gz: d4945335adc99edaabd26b56ac7bb49936d0642d58a09516ac63fae27e11d871db879e12db9cc3d0ec78788363c85c29558d382deb145713fa4051985e330f28
data/.gitignore CHANGED
@@ -7,6 +7,6 @@
7
7
  /spec/reports/
8
8
  /tmp/
9
9
  Gemfile.lock
10
-
10
+ /stats/
11
11
  # rspec failure tracking
12
12
  .rspec_status
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "bundler/gem_tasks"
2
4
  require "rspec/core/rake_task"
3
5
 
@@ -1,3 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Run as: `bundle exec ruby examples/image_compression.rb`
4
+
1
5
  require 'rmagick'
2
6
  require 'machine_learning_workbench'
3
7
  VectorQuantization = MachineLearningWorkbench::Compressor::VectorQuantization
@@ -1,3 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Run as: `bundle exec ruby examples/neuroevolution.rb`
4
+
1
5
  # Make sure the gem is installed first with `gem install machine_learning_workbench`
2
6
  # Alternatively, add `gem 'machine_learning_workbench'` to your Gemfile if using Bundle,
3
7
  # followed by a `bundle install`
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  gpu = false # prepare for switching to GPUs
3
4
  if gpu
@@ -1,3 +1,6 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require_relative 'compressor/vector_quantization'
2
4
  require_relative 'compressor/decaying_learning_rate_vq'
3
5
  require_relative 'compressor/copy_vq'
6
+ require_relative 'compressor/incr_dict_vq'
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Compressor
2
4
  # Train-less VQ, copying new images into centroids
3
5
  # Optimized for online training.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Compressor
2
4
  # VQ with per-centroid decaying learning rates.
3
5
  # Optimized for online training.
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ module MachineLearningWorkbench::Compressor
4
+ # Incremental Dictionary Train-less VQ, creating new centroids rather than training
5
+ # Optimized for online training.
6
+ # TODO: as the deadline grows nigh, the hacks grow foul. Refactor all VQs together.
7
+ class IncrDictVQ < VectorQuantization
8
+
9
+ attr_reader :equal_simil
10
+ undef :ntrains # centroids are not trained
11
+
12
+ def initialize **opts
13
+ puts "Ignoring learning rate: `lrate: #{opts[:lrate]}`" if opts[:lrate]
14
+ puts "Ignoring similarity: `simil_type: #{opts[:simil_type]}`" if opts[:simil_type]
15
+ puts "Ignoring ncentrs: `ncentrs: #{opts[:ncentrs]}`" if opts[:ncentrs]
16
+ # TODO: try different epsilons to reduce the number of states
17
+ # for example, in qbert we care what is lit and what is not, not the colors
18
+ @equal_simil = opts.delete(:equal_simil) || 0.0
19
+ super **opts.merge({ncentrs: 1, lrate: nil, simil_type: nil})
20
+ @ntrains = nil # will disable the counting
21
+ end
22
+
23
+ # Overloading lrate check from original VQ
24
+ def check_lrate lrate; nil; end
25
+
26
+ # Train on one vector:
27
+ # - train only if the image is not already in dictionary
28
+ # - create new centroid from the image
29
+ # @return [Integer] index of new centroid
30
+ def train_one vec, eps: equal_simil
31
+ mses = centrs.map do |centr|
32
+ ((centr-vec)**2).sum / centr.size # uhm get rid of division maybe? squares?
33
+ end
34
+ min_mse = mses.min
35
+ # skip training if the centr with smallest mse (most similar) has less than eps error (equal)
36
+ # TODO: maintain an average somewhere, make eps dynamic
37
+ return if min_mse < eps
38
+ puts "Creating centr #{ncentrs} (min_mse: #{min_mse})"
39
+ centrs << vec
40
+ @utility = @utility.concatenate 0
41
+ @ncentrs.tap{ @ncentrs += 1}
42
+ end
43
+
44
+ end
45
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Compressor
2
4
 
3
5
  # Standard Vector Quantization
@@ -15,7 +17,7 @@ module MachineLearningWorkbench::Compressor
15
17
  check_lrate lrate # hack: so that we can overload it in dlr_vq
16
18
  @lrate = lrate
17
19
  @simil_type = simil_type || :dot
18
- @encoding_type = encoding_type || :ensemble_norm
20
+ @encoding_type = encoding_type || :norm_ensemble
19
21
  @init_centr_vrange ||= vrange
20
22
  @vrange = case vrange
21
23
  when Array
@@ -80,6 +82,12 @@ module MachineLearningWorkbench::Compressor
80
82
  @ncodes += 1
81
83
  @utility[code] += 1
82
84
  code
85
+ when :most_similar_ary
86
+ code = simils.new_zeros
87
+ code[simils.max_index] = 1
88
+ @ncodes += 1
89
+ @utility += code
90
+ code
83
91
  when :ensemble
84
92
  code = simils
85
93
  tot = simils.sum
@@ -88,10 +96,17 @@ module MachineLearningWorkbench::Compressor
88
96
  @ncodes += 1
89
97
  @utility += (contrib - utility) / ncodes # cumulative moving average
90
98
  code
91
- when :ensemble_norm
99
+ when :norm_ensemble
92
100
  tot = simils.sum
93
101
  tot = 1 if tot < 1e-5 # HACK: avoid division by zero
94
102
  code = simils / tot
103
+ @ncodes += 1
104
+ @utility += (code - utility) / ncodes # cumulative moving average
105
+ code
106
+ when :sparse_coding
107
+ raise NotImplementedError, "do this next"
108
+
109
+
95
110
  @ncodes += 1
96
111
  @utility += (code - utility) / ncodes # cumulative moving average
97
112
  code
@@ -104,11 +119,15 @@ module MachineLearningWorkbench::Compressor
104
119
  case type
105
120
  when :most_similar
106
121
  centrs[code]
122
+ when :most_similar_ary
123
+ centrs[code.eq(1).where[0]]
107
124
  when :ensemble
108
125
  tot = code.reduce :+
109
126
  centrs.zip(code).map { |centr, contr| centr*contr/tot }.reduce :+
110
- when :ensemble_norm
127
+ when :norm_ensemble
111
128
  centrs.zip(code).map { |centr, contr| centr*contr }.reduce :+
129
+ when :sparse_coding
130
+ raise NotImplementedError, "do this next"
112
131
  else raise ArgumentError, "unrecognized reconstruction type: #{type}"
113
132
  end
114
133
  end
@@ -148,7 +167,7 @@ module MachineLearningWorkbench::Compressor
148
167
  vec_lst.each_with_index do |vec, i|
149
168
  trained_idx = train_one vec
150
169
  print '.' if debug
151
- @ntrains[trained_idx] += 1
170
+ @ntrains[trained_idx] += 1 if @ntrains
152
171
  end
153
172
  end
154
173
  end
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  # Monkey patches
3
4
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require_relative 'neural_network/base'
2
4
  require_relative 'neural_network/feed_forward'
3
5
  require_relative 'neural_network/recurrent'
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::NeuralNetwork
3
4
  # Neural Network base class
@@ -26,10 +27,10 @@ module MachineLearningWorkbench::NeuralNetwork
26
27
 
27
28
  # @param struct [Array<Integer>] list of layer sizes
28
29
  # @param act_fn [Symbol] choice of activation function for the neurons
29
- def initialize struct, act_fn: nil
30
+ def initialize struct, act_fn: nil, **act_fn_args
30
31
  @struct = struct
31
32
  @act_fn_name = act_fn || :sigmoid
32
- @act_fn = send(act_fn_name)
33
+ @act_fn = send act_fn_name, **act_fn_args
33
34
  # @state holds both inputs, possibly recurrency, and bias
34
35
  # it is a complete input for the next layer, hence size from layer sizes
35
36
  @state = layer_row_sizes.collect do |size|
@@ -163,10 +164,10 @@ module MachineLearningWorkbench::NeuralNetwork
163
164
  ## Activation functions
164
165
 
165
166
  # Traditional sigmoid (logistic) with variable steepness
166
- def sigmoid k=1
167
- # k is steepness: 0<k<1 is flatter, 1<k is flatter
167
+ def sigmoid steepness: 1
168
+ # steepness: 0<s<1 is flatter, 1<s is flatter
168
169
  # flatter makes activation less sensitive, better with large number of inputs
169
- -> (vec) { 1.0 / (NMath.exp(-k * vec) + 1.0) }
170
+ -> (vec) { 1.0 / (NMath.exp(-steepness * vec) + 1.0) }
170
171
  end
171
172
  alias logistic sigmoid
172
173
 
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::NeuralNetwork
3
4
  # Feed Forward Neural Network
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::NeuralNetwork
3
4
  # Recurrent Neural Network
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Optimizer
2
4
  end
3
5
 
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
3
4
  # Natural Evolution Strategies base class
@@ -19,11 +20,15 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
19
20
  # a single instance.
20
21
  # @param rescale_popsize [Float] scaling for the default population size
21
22
  # @param rescale_lrate [Float] scaling for the default learning rate
22
- def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false, rescale_popsize: 1, rescale_lrate: 1
23
- raise ArgumentError unless [:min, :max].include? opt_type
24
- raise ArgumentError unless obj_fn.respond_to? :call
23
+ def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false, rescale_popsize: 1, rescale_lrate: 1, utilities: nil, popsize: nil, lrate: nil
24
+ raise ArgumentError, "opt_type: #{opt_type}" unless [:min, :max].include? opt_type
25
+ raise ArgumentError, "obj_fn not callable: #{obj_fn}" unless obj_fn.respond_to? :call
26
+ raise ArgumentError, "utilities only if popsize" if utilities && popsize.nil?
27
+ raise ArgumentError, "wrong sizes" if utilities && utilities.size != popsize
28
+ raise ArgumentError, "minimum popsize 5 for default utilities" if popsize&.<(5) && utilities.nil?
25
29
  @ndims, @opt_type, @obj_fn, @parallel_fit = ndims, opt_type, obj_fn, parallel_fit
26
- @rescale_popsize, @rescale_lrate = rescale_popsize, rescale_lrate
30
+ @rescale_popsize, @rescale_lrate = rescale_popsize, rescale_lrate # rescale defaults
31
+ @utilities, @popsize, @lrate = utilities, popsize, lrate # if not set, defaults below
27
32
  @eye = NArray.eye(ndims)
28
33
  rseed ||= Random.new_seed
29
34
  # puts "NES rseed: #{s}" # currently disabled
@@ -44,18 +49,20 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
44
49
  end
45
50
 
46
51
  # Memoized automatic magic numbers
52
+ # Initialization options allow to rescale or entirely override these.
47
53
  # NOTE: Doubling popsize and halving lrate often helps
48
54
  def utils; @utilities ||= cmaes_utilities end
49
55
  # (see #utils)
50
- def popsize; @popsize ||= cmaes_popsize * rescale_popsize end
56
+ def popsize; @popsize ||= Integer(cmaes_popsize * rescale_popsize) end
51
57
  # (see #utils)
52
58
  def lrate; @lrate ||= cmaes_lrate * rescale_lrate end
53
59
 
54
- # Magic numbers from CMA-ES (TODO: add proper citation)
60
+ # Magic numbers from CMA-ES (see `README` for citation)
55
61
  # @return [NArray] scale-invariant utilities
56
62
  def cmaes_utilities
57
63
  # Algorithm equations are meant for fitness maximization
58
64
  # Match utilities with individuals sorted by INCREASING fitness
65
+ raise ArgumentError, "Minimum `popsize` should be 5 (is #{popsize})" if popsize < 5
59
66
  log_range = (1..popsize).collect do |v|
60
67
  [0, Math.log(popsize.to_f/2 - 1) - Math.log(v)].max
61
68
  end
@@ -1,11 +1,13 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
3
4
  # Block-Diagonal Natural Evolution Strategies
4
5
  class BDNES < Base
5
6
 
6
- MAX_RSEED = 10**Random.new_seed.size # same range as Random.new_seed
7
+ MAX_RSEED = 10**Random.new_seed.size # block random seeds to be on the same range as `Random.new_seed`
7
8
 
8
- attr_reader :ndims_lst, :blocks, :popsize
9
+ attr_reader :ndims_lst, :blocks, :popsize, :parallel_update
10
+ undef :ndims # only `ndims_lst` here
9
11
 
10
12
  # Initialize a list of XNES, one for each block
11
13
  # see class `Base` for the description of the rest of the arguments.
@@ -13,7 +15,8 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
13
15
  # matrix. Note: entire (reconstructed) individuals will be passed to the `obj_fn`
14
16
  # regardless of the division here described.
15
17
  # @param init_opts [Hash] the rest of the options will be passed directly to XNES
16
- def initialize ndims_lst, obj_fn, opt_type, parallel_fit: false, rseed: nil, **init_opts
18
+ # @parellel_update [bool] whether to parallelize block updates
19
+ def initialize ndims_lst, obj_fn, opt_type, parallel_fit: false, rseed: nil, parallel_update: false, **init_opts
17
20
  # mu_init: 0, sigma_init: 1
18
21
  # init_opts = {rseed: rseed, mu_init: mu_init, sigma_init: sigma_init}
19
22
  # TODO: accept list of `mu_init`s and `sigma_init`s
@@ -21,9 +24,8 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
21
24
  block_fit = -> (*args) { raise "Should never be called" }
22
25
  # the BD-NES seed should ensure deterministic reproducibility
23
26
  # but each block should have a different seed
24
- rseed ||= Random.new_seed
25
27
  # puts "BD-NES rseed: #{s}" # currently disabled
26
- @rng = Random.new rseed
28
+ @rng = Random.new rseed || Random.new_seed
27
29
  @blocks = ndims_lst.map do |ndims|
28
30
  b_rseed = rng.rand MAX_RSEED
29
31
  XNES.new ndims, block_fit, opt_type, rseed: b_rseed, **init_opts
@@ -34,6 +36,8 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
34
36
 
35
37
  @best = [(opt_type==:max ? -1 : 1) * Float::INFINITY, nil]
36
38
  @last_fits = []
39
+ @parallel_update = parallel_update
40
+ require 'parallel' if parallel_update
37
41
  end
38
42
 
39
43
  def sorted_inds_lst
@@ -82,9 +86,22 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
82
86
 
83
87
  # duck-type the interface: [:train, :mu, :convergence, :save, :load]
84
88
 
89
+ # TODO: refactor DRY
85
90
  def train picks: sorted_inds_lst
86
- blocks.zip(sorted_inds_lst).each do |xnes, s_inds|
87
- xnes.train picks: s_inds
91
+ if parallel_update
92
+ # Parallel.each(blocks.zip(picks)) do |xnes, s_inds|
93
+ # xnes.train picks: s_inds
94
+ # end
95
+ # Actually it's not this simple.
96
+ # Forks do not act on the parent, so I need to send back updated mu and sigma
97
+ # Luckily we have `NES#save` and `NES#load` at the ready
98
+ # Next: need to implement `#marshal_dump` and `#marshal_load` in `Base`
99
+ # Actually using `Cumo` rather than `Parallel` may avoid marshaling altogether
100
+ raise NotImplementedError, "Should dump and load each instance"
101
+ else
102
+ blocks.zip(picks).each do |xnes, s_inds|
103
+ xnes.train picks: s_inds
104
+ end
88
105
  end
89
106
  end
90
107
 
@@ -92,6 +109,10 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
92
109
  blocks.map(&:mu).reduce { |mem, var| mem.concatenate var, axis: 1 }
93
110
  end
94
111
 
112
+ def sigma
113
+ raise NotImplementedError, "need to write a concatenation like for mu here"
114
+ end
115
+
95
116
  def convergence
96
117
  blocks.map(&:convergence).reduce(:+)
97
118
  end
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
3
4
  # Fixed Variance Natural Evolution Strategies
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
3
4
  # Radial Natural Evolution Strategies
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
3
4
  # Separable Natural Evolution Strategies
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
3
4
  # Exponential Natural Evolution Strategies
@@ -11,17 +12,23 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
11
12
  NArray[mu_init]
12
13
  when Numeric
13
14
  NArray.new([1,ndims]).fill mu_init
15
+ when NArray
16
+ raise ArgumentError unless mu_init.size == ndims
17
+ mu_init.ndim < 2 ? mu_init.reshape(1, ndims) : mu_init
14
18
  else
15
19
  raise ArgumentError, "Something is wrong with mu_init: #{mu_init}"
16
20
  end
17
21
  @sigma = case sigma_init
18
- when Array
19
- raise ArgumentError unless sigma_init.size == ndims
20
- NArray[*sigma_init].diag
21
- when Numeric
22
- NArray.new([ndims]).fill(sigma_init).diag
23
- else
24
- raise ArgumentError, "Something is wrong with sigma_init: #{sigma_init}"
22
+ when Array
23
+ raise ArgumentError unless sigma_init.size == ndims
24
+ NArray[*sigma_init].diag
25
+ when Numeric
26
+ NArray.new([ndims]).fill(sigma_init).diag
27
+ when NArray
28
+ raise ArgumentError unless sigma_init.size == ndims**2
29
+ sigma_init.ndim < 2 ? sigma_init.reshape(ndims, ndims) : sigma_init
30
+ else
31
+ raise ArgumentError, "Something is wrong with sigma_init: #{sigma_init}"
25
32
  end
26
33
  # Works with the log of sigma to avoid continuous decompositions (thanks Sun Yi)
27
34
  @log_sigma = NMath.log(sigma.diagonal).diag
@@ -1 +1,3 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require_relative 'systems/neuroevolution'
@@ -1,2 +1,3 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  "Work in progress"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require_relative 'tools/execution'
2
4
  require_relative 'tools/normalization'
3
5
  require_relative 'tools/imaging'
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Tools
2
4
  module Execution
3
5
  $fork_pids ||= []
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Tools
2
4
  module Imaging
3
5
  Forkable = MachineLearningWorkbench::Tools::Execution
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Tools
2
4
  module Normalization
3
5
  def self.feature_scaling narr, from: nil, to: [0,1]
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module MachineLearningWorkbench::Tools
2
4
  module Verification
3
5
  # TODO: switch to NArray
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
 
2
3
  lib = File.expand_path("../lib", __FILE__)
3
4
  $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: machine_learning_workbench
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.1
4
+ version: 0.7.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Giuseppe Cuccu
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-04-12 00:00:00.000000000 Z
11
+ date: 2018-04-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -204,6 +204,7 @@ files:
204
204
  - lib/machine_learning_workbench/compressor.rb
205
205
  - lib/machine_learning_workbench/compressor/copy_vq.rb
206
206
  - lib/machine_learning_workbench/compressor/decaying_learning_rate_vq.rb
207
+ - lib/machine_learning_workbench/compressor/incr_dict_vq.rb
207
208
  - lib/machine_learning_workbench/compressor/vector_quantization.rb
208
209
  - lib/machine_learning_workbench/monkey.rb
209
210
  - lib/machine_learning_workbench/neural_network.rb