machine_learning_workbench 0.4.0 → 0.4.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 743cc4c65bda521785e00dc563b20fcd5660a6af
4
- data.tar.gz: 80924d34dc550df7b25d565022f50297b8bb72d5
3
+ metadata.gz: 05de54fb5221e0b9c4e0a7f59f79ed3b0fb7c166
4
+ data.tar.gz: da580d26078824e27aff0a518d8a57080d06c2d7
5
5
  SHA512:
6
- metadata.gz: 2ef96c2884a6f43304f0dba63239a7469b80c7be46c97af7c8de283a448a8ee049b55cdc98b0aa5bfde82fc024fde891c982ec621bb4cafcd6070814e8366976
7
- data.tar.gz: 0b5571eb26babf90015deec8425dc0a9c75bebd47ef50b4927a131e86f5f0c9e6e828e3ee1b212597222b9aa7209cd54f6b3db7a9c8ec6afcceafe94016165a1
6
+ metadata.gz: 50df1af8dc0cfdf8c3ecf3e99553749f25fc562ab022032b31d43bbc7986f17ec2a444a1a60efb06c55f2c85a825986fd45d3f8753ff3eb81b9f51549603e476
7
+ data.tar.gz: c84df475c3be8175d7c46c83bfdc755803d63c060f1ed7ae4749602003a7f4917726a89412d63011b9d144d241cbf35c24c1cfd871beeae5d379cada304aba23
data/.travis.yml CHANGED
@@ -5,5 +5,5 @@ rvm:
5
5
  addons:
6
6
  apt:
7
7
  packages:
8
- - libatlas-base-dev
9
- before_install: gem install bundler -v 1.16.0
8
+ - libopenblas-base
9
+ before_install: gem install bundler -v 1.16.0
@@ -44,16 +44,15 @@ module MachineLearningWorkbench::NeuralNetwork
44
44
  def reset_state
45
45
  state.each do |s|
46
46
  s.fill 0 # reset state to zero
47
- s[0,-1] = 1 # add bias
47
+ s[-1] = 1 # add bias
48
48
  end
49
- state[-1][0,-1] = 0 # last layer has no bias
49
+ state[-1][-1] = 0 # last layer has no bias
50
50
  end
51
51
 
52
52
  # Initialize the network with random weights
53
53
  def init_random
54
- # Will only be used for testing, no sense optimizing it now (NArray#rand)
55
54
  # Reusing `#load_weights` instead helps catching bugs
56
- load_weights nweights.times.collect { rand(-1.0..1.0) }
55
+ load_weights NArray.new(nweights).rand(-1,1)
57
56
  end
58
57
 
59
58
  ## Weight utilities
@@ -88,10 +87,9 @@ module MachineLearningWorkbench::NeuralNetwork
88
87
  end
89
88
 
90
89
  # Returns the weight matrix
91
- # @return [Array] three-dimensional Array of weights: a list of weight
92
- # matrices, one for each layer.
90
+ # @return [Array<NArray>] list of NArray matrices of weights (one per layer).
93
91
  def weights
94
- layers.collect(&:to_a)
92
+ layers
95
93
  end
96
94
 
97
95
  # Number of neurons per layer. Although this implementation includes inputs
@@ -126,12 +124,13 @@ module MachineLearningWorkbench::NeuralNetwork
126
124
  # all goes well there's nothing to return but a confirmation to the caller.
127
125
  def load_weights weights
128
126
  raise ArgumentError unless weights.size == nweights
129
- weights_iter = weights.each
130
- @layers ||= layer_shapes.collect { |shape| NArray.zeros shape }
131
- layers.each do |narr|
132
- narr.each_with_index do |_val, *idxs|
133
- narr[*idxs] = weights_iter.next
134
- end
127
+ weights = weights.to_na unless weights.kind_of? NArray
128
+ from = 0
129
+ @layers = layer_shapes.collect do |shape|
130
+ to = from + shape.reduce(:*)
131
+ lay_w = weights[from...to].reshape *shape
132
+ from = to
133
+ lay_w
135
134
  end
136
135
  reset_state
137
136
  return true
@@ -145,52 +144,49 @@ module MachineLearningWorkbench::NeuralNetwork
145
144
  # @return [Array] the activation of the output layer
146
145
  def activate input
147
146
  raise ArgumentError unless input.size == struct.first
148
- raise ArgumentError unless input.is_a? Array
149
147
  # load input in first state
150
- @state[0][0, 0..-2] = input
148
+ state[0][0...struct.first] = input
151
149
  # activate layers in sequence
152
150
  nlayers.times.each do |i|
153
151
  act = activate_layer i
154
- @state[i+1][0, 0...act.size] = act
152
+ state[i+1][0...act.size] = act
155
153
  end
156
154
  return out
157
155
  end
158
156
 
159
157
  # Extract and convert the output layer's activation
160
- # @return [Array] the activation of the output layer as 1-dim Array
158
+ # @return [NArray] the activation of the output layer
161
159
  def out
162
- state.last.to_a.flatten
160
+ state.last
163
161
  end
164
162
 
165
- # define #activate_layer in child class
166
-
167
163
  ## Activation functions
168
164
 
169
165
  # Traditional sigmoid with variable steepness
170
166
  def sigmoid k=0.5
171
167
  # k is steepness: 0<k<1 is flatter, 1<k is flatter
172
168
  # flatter makes activation less sensitive, better with large number of inputs
173
- lambda { |x| 1.0 / (Numo::NMath.exp(-k * x) + 1.0) }
169
+ -> (x) { 1.0 / (Numo::NMath.exp(-k * x) + 1.0) }
174
170
  end
175
171
 
176
172
  # Traditional logistic
177
173
  def logistic
178
- lambda { |x|
174
+ -> (x) do
179
175
  exp = Numo::NMath.exp(x)
180
176
  # exp.infinite? ? exp : exp / (1.0 + exp)
181
177
  exp / (1.0 + exp)
182
- }
178
+ end
183
179
  end
184
180
 
185
181
  # LeCun hyperbolic activation
186
182
  # @see http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf Section 4.4
187
183
  def lecun_hyperbolic
188
- lambda { |x| 1.7159 * Numo::NMath.tanh(2.0*x/3.0) + 1e-3*x }
184
+ -> (x) { 1.7159 * Numo::NMath.tanh(2.0*x/3.0) + 1e-3*x }
189
185
  end
190
186
 
191
187
  # Rectified Linear Unit (ReLU)
192
188
  def relu
193
- lambda { |x| (x>0).all? && x || x.class.zeros(x.shape) }
189
+ -> (x) { (x>0).all? && x || x.class.zeros(x.shape) }
194
190
  end
195
191
 
196
192
 
@@ -14,21 +14,29 @@ module MachineLearningWorkbench::NeuralNetwork
14
14
  end
15
15
  end
16
16
 
17
+ # # NOTE: current layer index corresponds to index of next state!
18
+ # previous = nlay # index of previous layer (inputs)
19
+ # current = nlay + 1 # index of current layer (outputs)
20
+ # # Copy the level's last-time activation to the input (previous state)
21
+ # # TODO: ranges in `NArray#[]` should be reliable, get rid of loop
22
+ # nneurs(current).times do |i| # for each activations to copy
23
+ # # Copy output from last-time activation to recurrency in previous state
24
+ # @state[previous][0, nneurs(previous) + i] = state[current][0, i]
25
+ # end
26
+ # act_fn.call state[previous].dot layers[nlay]
27
+
17
28
  # Activates a layer of the network.
18
29
  # Bit more complex since it has to copy the layer's activation on
19
30
  # last input to its own inputs, for recursion.
20
31
  # @param i [Integer] the layer to activate, zero-indexed
21
- def activate_layer nlay #_layer
22
- # NOTE: current layer index corresponds to index of next state!
23
- previous = nlay # index of previous layer (inputs)
24
- current = nlay + 1 # index of current layer (outputs)
25
- # Copy the level's last-time activation to the input (previous state)
26
- # TODO: ranges in `NArray#[]` should be reliable, get rid of loop
27
- nneurs(current).times do |i| # for each activations to copy
28
- # Copy output from last-time activation to recurrency in previous state
29
- @state[previous][0, nneurs(previous) + i] = state[current][0, i]
30
- end
31
- act_fn.call state[previous].dot layers[nlay]
32
+ def activate_layer nlay
33
+ # Mark begin and end of recursion outputs in current state
34
+ begin_recur = nneurs(nlay)
35
+ end_recur = nneurs(nlay) + nneurs(nlay+1)
36
+ # Copy the level's last-time activation to the current input recurrency
37
+ state[nlay][begin_recur...end_recur] = state[nlay+1][0...nneurs(nlay+1)]
38
+ # Activate current layer
39
+ act_fn.call state[nlay].dot layers[nlay]
32
40
  end
33
41
 
34
42
  end
@@ -119,7 +119,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
119
119
 
120
120
  sort_idxs = fits.sort_index
121
121
  sort_idxs = sort_idxs.reverse if opt_type == :min
122
- this_best = [fits[sort_idxs[-1]], inds[sort_idxs[-1]]]
122
+ this_best = [fits[sort_idxs[-1]], inds[sort_idxs[-1], true]]
123
123
 
124
124
  opt_cmp_fn = opt_type==:min ? :< : :>
125
125
  @best = this_best if this_best.first.send(opt_cmp_fn, best.first)
@@ -5,8 +5,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
5
5
 
6
6
  MAX_RSEED = 10**Random.new_seed.size # same range as Random.new_seed
7
7
 
8
- attr_reader :ndims_lst, :obj_fn, :opt_type, :parallel_fit, :blocks, :popsize, :rng,
9
- :best, :last_fits
8
+ attr_reader :ndims_lst, :blocks, :popsize
10
9
 
11
10
  # Initialize a list of XNES, one for each block
12
11
  # see class `Base` for the description of the rest of the arguments.
@@ -67,7 +66,7 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
67
66
  # sorted_samples = sorted.map(&:last)
68
67
  sort_idxs = fits.sort_index
69
68
  sort_idxs = sort_idxs.reverse if opt_type == :min
70
- this_best = [fits[sort_idxs[-1]], full_inds[sort_idxs[-1]]]
69
+ this_best = [fits[sort_idxs[-1]], full_inds[sort_idxs[-1], true]]
71
70
  opt_cmp_fn = opt_type==:min ? :< : :>
72
71
  @best = this_best if this_best.first.send(opt_cmp_fn, best.first)
73
72
  sorted_samples = full_samples.values_at *sort_idxs
@@ -34,21 +34,18 @@ Gem::Specification.new do |spec|
34
34
 
35
35
  # Test
36
36
  spec.add_development_dependency "rspec", "~> 3.0"
37
- spec.add_development_dependency "rmagick" # only used for one example
37
+ spec.add_development_dependency "rmagick" # uhm would gladly drop this
38
38
 
39
39
  # Debug
40
40
  spec.add_development_dependency "pry", "~> 0.10"
41
41
  spec.add_development_dependency "pry-nav", "~> 0.2"
42
42
  spec.add_development_dependency "pry-rescue", "~> 1.4"
43
43
  spec.add_development_dependency "pry-stack_explorer", "~> 0.4"
44
+ spec.add_development_dependency "pry-doc", "~> 0.12"
44
45
 
45
46
  # Run
46
47
  spec.requirements << "libopenblas-base" # library for following dependency
47
- spec.add_dependency "numo-linalg"
48
+ spec.add_dependency "numo-narray", "~> 0.9"
49
+ spec.add_dependency "numo-linalg", "~> 0.1"
48
50
  spec.add_dependency "parallel", "~> 1.12"
49
-
50
-
51
-
52
- # DELETEME
53
- spec.add_dependency "nmatrix-atlas"
54
51
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: machine_learning_workbench
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.0
4
+ version: 0.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Giuseppe Cuccu
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-03-25 00:00:00.000000000 Z
11
+ date: 2018-03-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -123,47 +123,61 @@ dependencies:
123
123
  - !ruby/object:Gem::Version
124
124
  version: '0.4'
125
125
  - !ruby/object:Gem::Dependency
126
- name: numo-linalg
126
+ name: pry-doc
127
127
  requirement: !ruby/object:Gem::Requirement
128
128
  requirements:
129
- - - ">="
129
+ - - "~>"
130
130
  - !ruby/object:Gem::Version
131
- version: '0'
131
+ version: '0.12'
132
+ type: :development
133
+ prerelease: false
134
+ version_requirements: !ruby/object:Gem::Requirement
135
+ requirements:
136
+ - - "~>"
137
+ - !ruby/object:Gem::Version
138
+ version: '0.12'
139
+ - !ruby/object:Gem::Dependency
140
+ name: numo-narray
141
+ requirement: !ruby/object:Gem::Requirement
142
+ requirements:
143
+ - - "~>"
144
+ - !ruby/object:Gem::Version
145
+ version: '0.9'
132
146
  type: :runtime
133
147
  prerelease: false
134
148
  version_requirements: !ruby/object:Gem::Requirement
135
149
  requirements:
136
- - - ">="
150
+ - - "~>"
137
151
  - !ruby/object:Gem::Version
138
- version: '0'
152
+ version: '0.9'
139
153
  - !ruby/object:Gem::Dependency
140
- name: parallel
154
+ name: numo-linalg
141
155
  requirement: !ruby/object:Gem::Requirement
142
156
  requirements:
143
157
  - - "~>"
144
158
  - !ruby/object:Gem::Version
145
- version: '1.12'
159
+ version: '0.1'
146
160
  type: :runtime
147
161
  prerelease: false
148
162
  version_requirements: !ruby/object:Gem::Requirement
149
163
  requirements:
150
164
  - - "~>"
151
165
  - !ruby/object:Gem::Version
152
- version: '1.12'
166
+ version: '0.1'
153
167
  - !ruby/object:Gem::Dependency
154
- name: nmatrix-atlas
168
+ name: parallel
155
169
  requirement: !ruby/object:Gem::Requirement
156
170
  requirements:
157
- - - ">="
171
+ - - "~>"
158
172
  - !ruby/object:Gem::Version
159
- version: '0'
173
+ version: '1.12'
160
174
  type: :runtime
161
175
  prerelease: false
162
176
  version_requirements: !ruby/object:Gem::Requirement
163
177
  requirements:
164
- - - ">="
178
+ - - "~>"
165
179
  - !ruby/object:Gem::Version
166
- version: '0'
180
+ version: '1.12'
167
181
  description: |-
168
182
  This workbench holds a collection of machine learning
169
183
  methods in Ruby. Rather than specializing on a single task or method, this