neuronet 6.1.0 → 7.0.230416

Sign up to get free protection for your applications and to get access to all the features.
data/lib/neuronet.rb CHANGED
@@ -1,621 +1,15 @@
1
- # Neuronet module
2
- module Neuronet
3
- VERSION = '6.1.0'
4
-
5
- # An artificial neural network uses a squash function
6
- # to determine the activation value of a neuron.
7
- # The squash function for Neuronet is the
8
- # [Sigmoid function](http://en.wikipedia.org/wiki/Sigmoid_function)
9
- # which sets the neuron's activation value between 1.0 and 0.0.
10
- # This activation value is often thought of on/off or true/false.
11
- # For classification problems, activation values near one are considered true
12
- # while activation values near 0.0 are considered false.
13
- # In Neuronet I make a distinction between the neuron's activation value and
14
- # it's representation to the problem.
15
- # This attribute, activation, need never appear in an implementation of Neuronet, but
16
- # it is mapped back to it's unsquashed value every time
17
- # the implementation asks for the neuron's value.
18
- # One should scale the problem with most data points between -1 and 1,
19
- # extremes under 2s, and no outbounds above 3s.
20
- # Standard deviations from the mean is probably a good way to figure the scale of the problem.
21
- def self.squash(unsquashed)
22
- 1.0 / (1.0 + Math.exp(-unsquashed))
23
- end
24
-
25
- def self.unsquash(squashed)
26
- Math.log(squashed / (1.0 - squashed))
27
- end
28
-
29
- BZERO = 1.0/(1.0-2.0*squash(1.0))
30
- WONE = -2.0*BZERO
31
-
32
- # Although the implementation is free to set all parameters for each neuron,
33
- # Neuronet by default creates zeroed neurons.
34
- # Association between inputs and outputs are trained, and
35
- # neurons differentiate from each other randomly.
36
- # Differentiation among neurons is achieved by noise in the back-propagation of errors.
37
- # This noise is provided by Neuronet.noise.
38
- # I chose rand + rand to give the noise an average value of one and a bell shape distribution.
39
- def self.noise
40
- rand + rand
41
- end
42
-
43
- # In Neuronet, there are two main types of objects: Nodes and Connections.
44
- # A Node has a value which the implementation can set.
45
- # A plain Node instance is used primarily as input neurons, and
46
- # its value is not changed by training.
47
- # It is a terminal for backpropagation of errors.
48
- # Nodes are used for the input layer.
49
- class Node
50
- attr_reader :activation
51
- # A Node is constant (Input)
52
- alias update activation
53
-
54
- # The "real world" value of a node is the value of it's activation unsquashed.
55
- def value=(val)
56
- @activation = Neuronet.squash(val)
57
- end
58
-
59
- def initialize(val=0.0)
60
- self.value = val
61
- end
62
-
63
- # The "real world" value is stored as a squashed activation.
64
- def value
65
- Neuronet.unsquash(@activation)
66
- end
67
-
68
- # Node is a terminal where backpropagation ends.
69
- def backpropagate(error)
70
- # to be over-ridden
71
- nil
72
- end
73
- end
74
-
75
- # Connections between neurons (and nodes) are there own separate objects.
76
- # In Neuronet, a neuron contains it's bias, and a list of it's connections.
77
- # Each connection contains it's weight (strength) and connected node.
78
- class Connection
79
- attr_accessor :node, :weight
80
- def initialize(node, weight=0.0)
81
- @node, @weight = node, weight
82
- end
83
-
84
- # The value of a connection is the weighted activation of the connected node.
85
- def value
86
- @node.activation * @weight
87
- end
88
-
89
- # Connection#update returns the updated value of a connection,
90
- # which is the weighted updated activation of
91
- # the node it's connected to ( weight * node.update ).
92
- # This method is the one to use
93
- # whenever the value of the inputs are changed (right after training).
94
- # Otherwise, both update and value should give the same result.
95
- # Use Connection#value when back calculations are not needed instead.
96
- def update
97
- @node.update * @weight
98
- end
99
-
100
- # Connectoin#backpropagate modifies the connection's weight
101
- # in proportion to the error given and passes that error
102
- # to its connected node via the node's backpropagate method.
103
- def backpropagate(error)
104
- @weight += @node.activation * error * Neuronet.noise
105
- @node.backpropagate(error)
106
- end
107
- end
108
-
109
- # A Neuron is a Node with some extra features.
110
- # It adds two attributes: connections, and bias.
111
- # The connections attribute is a list of
112
- # the neuron's connections to other neurons (or nodes).
113
- # A neuron's bias is it's kicker (or deduction) to it's activation value,
114
- # a sum of its connections values.
115
- class Neuron < Node
116
- attr_reader :connections
117
- attr_accessor :bias
118
- def initialize(bias=0.0)
119
- super(bias)
120
- @connections = []
121
- @bias = bias
122
- end
123
-
124
- # Updates the activation with the current value of bias and updated values of connections.
125
- # If you're not familiar with ruby's Array::inject method,
126
- # it is a Ruby way of doing summations. Checkout:
127
- # [Jay Field's Thoughts on Ruby: inject](http://blog.jayfields.com/2008/03/ruby-inject.html)
128
- # [Induction ( for_all )](http://carlosjhr64.blogspot.com/2011/02/induction.html)
129
- def update
130
- self.value = @bias + @connections.inject(0.0){|sum,connection| sum + connection.update}
131
- end
132
-
133
- # For when connections are already updated,
134
- # Neuron#partial updates the activation with the current values of bias and connections.
135
- # It is not always necessary to burrow all the way down to the terminal input node
136
- # to update the current neuron if it's connected neurons have all been updated.
137
- # The implementation should set it's algorithm to use partial
138
- # instead of update as update will most likely needlessly update previously updated neurons.
139
- def partial
140
- self.value = @bias + @connections.inject(0.0){|sum,connection| sum + connection.value}
141
- end
142
-
143
- # The backpropagate method modifies
144
- # the neuron's bias in proportion to the given error and
145
- # passes on this error to each of its connection's backpropagate method.
146
- # While updates flows from input to output,
147
- # back-propagation of errors flows from output to input.
148
- def backpropagate(error)
149
- # Adjusts bias according to error and...
150
- @bias += error * Neuronet.noise
151
- # backpropagates the error to the connections.
152
- @connections.each{|connection| connection.backpropagate(error)}
153
- end
154
-
155
- # Connects the neuron to another node.
156
- # Updates the activation with the new connection.
157
- # The default weight=0 means there is no initial association.
158
- # The connect method is how the implementation adds a connection,
159
- # the way to connect the neuron to another.
160
- # To connect neuron out to neuron in, for example, it is:
161
- # in = Neuronet::Neuron.new
162
- # out = Neuronet::Neuron.new
163
- # out.connect(in)
164
- # Think output connects to input.
165
- def connect(node, weight=0.0)
166
- @connections.push(Connection.new(node,weight))
167
- update
168
- end
169
- end
170
-
171
- # Neuronet::InputLayer is an Array of Neuronet::Node's.
172
- # It can be used for the input layer of a feed forward network.
173
- class InputLayer < Array
174
- def initialize(length) # number of nodes
175
- super(length)
176
- 0.upto(length-1){|index| self[index] = Neuronet::Node.new }
177
- end
178
-
179
- # This is where one enters the "real world" inputs.
180
- def set(inputs)
181
- 0.upto(self.length-1){|index| self[index].value = inputs[index]}
182
- end
183
- end
184
-
185
- # Just a regular Layer.
186
- # InputLayer is to Layer what Node is to Neuron.
187
- class Layer < Array
188
- def initialize(length)
189
- super(length)
190
- 0.upto(length-1){|index| self[index] = Neuronet::Neuron.new }
191
- end
192
-
193
- # Allows one to fully connect layers.
194
- def connect(layer, weight=0.0)
195
- # creates the neuron matrix... note that node can be either Neuron or Node class.
196
- self.each{|neuron| layer.each{|node| neuron.connect(node,weight) }}
197
- end
198
-
199
- # updates layer with current values of the previous layer
200
- def partial
201
- self.each{|neuron| neuron.partial}
202
- end
203
-
204
- # Takes the real world targets for each node in this layer
205
- # and backpropagates the error to each node.
206
- # Note that the learning constant is really a value
207
- # that needs to be determined for each network.
208
- def train(targets, learning)
209
- 0.upto(self.length-1) do |index|
210
- node = self[index]
211
- node.backpropagate(learning*(targets[index] - node.value))
212
- end
213
- end
214
-
215
- # Returns the real world values of this layer.
216
- def values
217
- self.map{|node| node.value}
218
- end
219
- end
220
-
221
- # A Feed Forward Network
222
- class FeedForward < Array
223
- # Whatchamacallits?
224
- # The learning constant is given different names...
225
- # often some greek letter.
226
- # It's a small number less than one.
227
- # Ideally, it divides the errors evenly among all contributors.
228
- # Contributors are the neurons' biases and the connections' weights.
229
- # Thus if one counts all the contributors as N,
230
- # the learning constant should be at most 1/N.
231
- # But there are other considerations, such as how noisy the data is.
232
- # In any case, I'm calling this N value FeedForward#mu.
233
- # 1/mu is used for the initial default value for the learning constant.
234
- def mu
235
- sum = 1.0
236
- 1.upto(self.length-1) do |i|
237
- n, m = self[i-1].length, self[i].length
238
- sum += n + n*m
239
- end
240
- return sum
241
- end
242
- # Given that the learning constant is initially set to 1/mu as defined above,
243
- # muk gives a way to modify the learning constant by some factor, k.
244
- # In theory, when there is no noice in the target data, k can be set to 1.0.
245
- # If the data is noisy, k is set to some value less than 1.0.
246
- def muk(k=1.0)
247
- @learning = k/mu
248
- end
249
- # Given that the learning constant can be modified by some factor k with #muk,
250
- # #num gives an alternate way to express
251
- # the k factor in terms of some number n greater than 1, setting k to 1/sqrt(n).
252
- # I believe that the optimal value for the learning constant
253
- # for a training set of size n is somewhere between #muk(1) and #num(n).
254
- # Whereas the learning constant can be too high,
255
- # a low learning value just increases the training time.
256
- def num(n)
257
- muk(1.0/(Math.sqrt(n)))
258
- end
259
-
260
- attr_reader :in, :out
261
- attr_reader :yin, :yang
262
- attr_accessor :learning
263
-
264
- # I find very useful to name certain layers:
265
- # [0] @in Input Layer
266
- # [1] @yin Tipically the first middle layer
267
- # [-2] @yang Tipically the last middle layer
268
- # [-1] @out Output Layer
269
- def initialize(layers)
270
- super(length = layers.length)
271
- @in = self[0] = Neuronet::InputLayer.new(layers[0])
272
- (1).upto(length-1){|index|
273
- self[index] = Neuronet::Layer.new(layers[index])
274
- self[index].connect(self[index-1])
275
- }
276
- @out = self.last
277
- @yin = self[1] # first middle layer
278
- @yang = self[-2] # last middle layer
279
- @learning = 1.0/mu
280
- end
281
-
282
- def update
283
- # update up the layers
284
- (1).upto(self.length-1){|index| self[index].partial}
285
- end
286
-
287
- def set(inputs)
288
- @in.set(inputs)
289
- update
290
- end
291
-
292
- def train!(targets)
293
- @out.train(targets, @learning)
294
- update
295
- end
296
-
297
- # trains an input/output pair
298
- def exemplar(inputs, targets)
299
- set(inputs)
300
- train!(targets)
301
- end
302
-
303
- def input
304
- @in.values
305
- end
306
-
307
- def output
308
- @out.values
309
- end
310
- end
311
-
312
- # Neuronet::Scale is a class to
313
- # help scale problems to fit within a network's "field of view".
314
- # Given a list of values, it finds the minimum and maximum values and
315
- # establishes a mapping to a scaled set of numbers between minus one and one (-1,1).
316
- class Scale
317
- attr_accessor :spread, :center
318
- attr_writer :init
319
-
320
- # If the value of center is provided, then
321
- # that value will be used instead of
322
- # calculating it from the values passed to method set.
323
- # Likewise, if spread is provided, that value of spread will be used.
324
- # The attribute @init flags if
325
- # there is a initiation phase to the calculation of @spread and @center.
326
- # For Scale, @init is true and the initiation phase calculates
327
- # the intermediate values @min and @max (the minimum and maximum values in the data set).
328
- # It's possible for subclasses of Scale, such as Gaussian, to not have this initiation phase.
329
- def initialize(factor=1.0,center=nil,spread=nil)
330
- @factor,@center,@spread = factor,center,spread
331
- @centered, @spreaded = center.nil?, spread.nil?
332
- @init = true
333
- end
334
-
335
- def set_init(inputs)
336
- @min, @max = inputs.minmax
337
- end
338
-
339
- # In this case, inputs is unused, but
340
- # it's there for the general case.
341
- def set_spread(inputs)
342
- @spread = (@max - @min) / 2.0
343
- end
344
-
345
- # In this case, inputs is unused, but
346
- # it's there for the general case.
347
- def set_center(inputs)
348
- @center = (@max + @min) / 2.0
349
- end
350
-
351
- def set(inputs)
352
- set_init(inputs) if @init
353
- set_center(inputs) if @centered
354
- set_spread(inputs) if @spreaded
355
- end
356
-
357
- def mapped(inputs)
358
- factor = 1.0 / (@factor*@spread)
359
- inputs.map{|value| factor*(value - @center)}
360
- end
361
- alias mapped_input mapped
362
- alias mapped_output mapped
363
-
364
- # Note that it could also unmap inputs, but
365
- # outputs is typically what's being transformed back.
366
- def unmapped(outputs)
367
- factor = @factor*@spread
368
- outputs.map{|value| factor*value + @center}
369
- end
370
- alias unmapped_input unmapped
371
- alias unmapped_output unmapped
372
- end
373
-
374
- # "Normal Distribution"
375
- # Gaussian subclasses Scale and is used exactly the same way.
376
- # The only changes are that it calculates the arithmetic mean (average) for center and
377
- # the standard deviation for spread.
378
- class Gaussian < Scale
379
- def initialize(factor=1.0,center=nil,spread=nil)
380
- super(factor, center, spread)
381
- self.init = false
382
- end
383
-
384
- def set_center(inputs)
385
- self.center = inputs.inject(0.0,:+) / inputs.length
386
- end
387
-
388
- def set_spread(inputs)
389
- self.spread = Math.sqrt(inputs.map{|value|
390
- self.center - value}.inject(0.0){|sum,value|
391
- value*value + sum} / (inputs.length - 1.0))
392
- end
393
- end
394
-
395
- # "Log-Normal Distribution"
396
- # LogNormal subclasses Gaussian to transform the values to a logarithmic scale.
397
- class LogNormal < Gaussian
398
- def initialize(factor=1.0,center=nil,spread=nil)
399
- super(factor, center, spread)
400
- end
401
-
402
- def set(inputs)
403
- super( inputs.map{|value| Math::log(value)} )
404
- end
405
-
406
- def mapped(inputs)
407
- super( inputs.map{|value| Math::log(value)} )
408
- end
409
- alias mapped_input mapped
410
- alias mapped_output mapped
411
-
412
- def unmapped(outputs)
413
- super(outputs).map{|value| Math::exp(value)}
414
- end
415
- alias unmapped_input unmapped
416
- alias unmapped_output unmapped
417
- end
418
-
419
- # ScaledNetwork is a subclass of FeedForwardNetwork.
420
- # It automatically scales the problem given to it
421
- # by using a Scale type instance set in @distribution.
422
- # The attribute, @distribution, is set to Neuronet::Gausian.new by default,
423
- # but one can change this to Scale, LogNormal, or one's own custom mapper.
424
- class ScaledNetwork < FeedForward
425
- attr_accessor :distribution
426
-
427
- def initialize(layers)
428
- super(layers)
429
- @distribution = Gaussian.new
430
- end
431
-
432
- def train!(targets)
433
- super(@distribution.mapped_output(targets))
434
- end
435
-
436
- # @param (List of Float) values
437
- def set(inputs)
438
- super(@distribution.mapped_input(inputs))
439
- end
440
-
441
- # ScaledNetwork#reset works just like FeedForwardNetwork's set method,
442
- # but calls distribution.set( values ) first.
443
- # Sometimes you'll want to set the distribution
444
- # with the entire data set and the use set,
445
- # and then there will be times you'll want to
446
- # set the distribution with each input and use reset.
447
- def reset(inputs)
448
- @distribution.set(inputs)
449
- set(inputs)
450
- end
451
-
452
- def output
453
- @distribution.unmapped_output(super)
454
- end
455
-
456
- def input
457
- @distribution.unmapped_input(super)
458
- end
459
- end
460
-
461
- # A Perceptron Hybrid,
462
- # Tao directly connects the output layer to the input layer.
463
- module Tao
464
- # Tao's extra connections adds to mu.
465
- def mu
466
- sum = super
467
- sum += self.first.length * self.last.length
468
- return sum
469
- end
470
- # Tao.bless connects the network's output layer to the input layer,
471
- # extends it with Tao, and modifies the learning constant if needed.
472
- def self.bless(myself)
473
- # @out directly connects to @in
474
- myself.out.connect(myself.in)
475
- myself.extend Tao
476
- # Save current learning and set it to muk(1).
477
- l, m = myself.learning, myself.muk
478
- # If learning was lower b/4, revert.
479
- myself.learning = l if l<m
480
- return myself
481
- end
482
- end
483
-
484
- # Yin is a network which has its @yin layer initially mirroring @in.
485
- module Yin
486
- # Yin.bless increments the bias of each @yin[i] by BZERO, and
487
- # the weight of pairing (@yin[i], @in[i]) connections by WONE.
488
- # This makes @yin initially mirror @in.
489
- # The pairing is done starting with (@yin[0], @in[0]).
490
- # That is, starting with (@yin.first, @in.first).
491
- def self.bless(myself)
492
- yin = myself.yin
493
- if yin.length < (in_length = myself.in.length)
494
- raise "First hidden layer, yin, needs to have at least the same length as input"
495
- end
496
- # connections from yin[i] to in[i] are WONE... mirroring to start.
497
- 0.upto(in_length-1) do |index|
498
- node = yin[index]
499
- node.connections[index].weight += WONE
500
- node.bias += BZERO
501
- end
502
- return myself
503
- end
504
- end
505
-
506
- # Yang is a network wich has its @out layer initially mirroring @yang.
507
- module Yang
508
- # Yang.bless increments the bias of each @yang[i] by BZERO, and
509
- # the weight of pairing (@out[i], @yang[i]) connections by WONE.
510
- # This makes @out initially mirror @yang.
511
- # The pairing is done starting with (@out[-1], @yang[-1]).
512
- # That is, starting with (@out.last, @yang.last).
513
- def self.bless(myself)
514
- offset = myself.yang.length - (out_length = (out = myself.out).length)
515
- raise "Last hidden layer, yang, needs to have at least the same length as output" if offset < 0
516
- # Although the algorithm here is not as described,
517
- # the net effect to is pair @out.last with @yang.last, and so on down.
518
- 0.upto(out_length-1) do |index|
519
- node = out[index]
520
- node.connections[offset+index].weight += WONE
521
- node.bias += BZERO
522
- end
523
- return myself
524
- end
525
- end
526
-
527
- # A Yin Yang composite provided for convenience.
528
- module YinYang
529
- def self.bless(myself)
530
- Yang.bless(myself)
531
- Yin.bless(myself)
532
- return myself
533
- end
534
- end
535
-
536
- # A Tao Yin Yang composite provided for convenience.
537
- module TaoYinYang
538
- def self.bless(myself)
539
- Yang.bless(myself)
540
- Yin.bless(myself)
541
- Tao.bless(myself)
542
- return myself
543
- end
544
- end
545
-
546
- # A Tao Yin composite provided for convenience.
547
- module TaoYin
548
- def self.bless(myself)
549
- Yin.bless(myself)
550
- Tao.bless(myself)
551
- return myself
552
- end
553
- end
554
-
555
- # A Tao Yang composite provided for convenience.
556
- module TaoYang
557
- def self.bless(myself)
558
- Yang.bless(myself)
559
- Tao.bless(myself)
560
- return myself
561
- end
562
- end
563
-
564
- # Brahma is a network which has its @yin layer initially mirror and "shadow" @in.
565
- # I'm calling it shadow until I can think of a better name.
566
- # Note that a Brahma, Yin bless combination overwrite eachother and is probably useless.
567
- module Brahma
568
- # Brahma.bless increments the weights of pairing even yin (@yin[2*i], @in[i]) connections by WONE.
569
- # and pairing odd yin (@yin[2*i+1], @in[i]) connections by negative WONE.
570
- # Likewise the bias with BZERO.
571
- # This makes @yin initially mirror and shadow @in.
572
- # The pairing is done starting with (@yin[0], @in[0]).
573
- # That is, starting with (@yin.first, @in.first).
574
- def self.bless(myself)
575
- yin = myself.yin
576
- if yin.length < 2*(in_length = myself.in.length)
577
- raise "First hidden layer, yin, needs to be at least twice the length as input"
578
- end
579
- # connections from yin[2*i] to in[i] are WONE... mirroring to start.
580
- # connections from yin[2*i+1] to in[i] are -WONE... shadowing to start.
581
- 0.upto(in_length-1) do |index|
582
- even = yin[2*index]
583
- odd = yin[(2*index)+1]
584
- even.connections[index].weight += WONE
585
- even.bias += BZERO
586
- odd.connections[index].weight -= WONE
587
- odd.bias -= BZERO
588
- end
589
- return myself
590
- end
591
- end
592
-
593
- # A Brahma Yang composite provided for convenience.
594
- module BrahmaYang
595
- def self.bless(myself)
596
- Brahma.bless(myself)
597
- Yang.bless(myself)
598
- return myself
599
- end
600
- end
601
-
602
- # A Brahma Yang composite provided for convenience.
603
- module TaoBrahma
604
- def self.bless(myself)
605
- Brahma.bless(myself)
606
- Tao.bless(myself)
607
- return myself
608
- end
609
- end
610
-
611
- # A Tao Brahma Yang composite provided for convenience.
612
- module TaoBrahmaYang
613
- def self.bless(myself)
614
- Yang.bless(myself)
615
- Brahma.bless(myself)
616
- Tao.bless(myself)
617
- return myself
618
- end
619
- end
1
+ # frozen_string_literal: true
620
2
 
3
+ # Neuronet is a neural network library for Ruby.
4
+ module Neuronet
5
+ VERSION = '7.0.230416'
6
+ require_relative 'neuronet/constants'
7
+ autoload :Connection, 'neuronet/connection'
8
+ autoload :Neuron, 'neuronet/neuron'
9
+ autoload :Layer, 'neuronet/layer'
10
+ autoload :FeedForward, 'neuronet/feed_forward'
11
+ autoload :Scale, 'neuronet/scale'
12
+ autoload :Gaussian, 'neuronet/gaussian'
13
+ autoload :LogNormal, 'neuronet/log_normal'
14
+ autoload :ScaledNetwork, 'neuronet/scaled_network'
621
15
  end