neuronet 7.0.230416 → 8.0.251113

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Squash provides logistic sigmoid function.
5
+ module Squash
6
+ # Logistic sigmoid: maps Real to (0, 1).
7
+ def squash(value) = 1.0 / (1.0 + Math.exp(-value))
8
+ # Inverse sigmoid: maps (0, 1) to Real.
9
+ def unsquash(activation) = Math.log(activation / (1.0 - activation))
10
+ module_function :squash, :unsquash
11
+ end
12
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Trainable adds error backpropagation and training.
5
+ module Trainable
6
+ def pairs(pairs, nju: expected_nju)
7
+ pairs.shuffle.each { |inputs, targets| train(inputs, targets, nju:) }
8
+ end
9
+
10
+ def train(inputs, targets, nju:)
11
+ actuals = self * inputs
12
+ errors = targets.zip(actuals).map { |target, actual| target - actual }
13
+ error, index = pivot(errors)
14
+ neuron = output_layer[index]
15
+ neuron.backpropagate(error / nju)
16
+ end
17
+
18
+ def pivot(errors)
19
+ error = index = 0.0
20
+ errors.each_with_index do |e, i|
21
+ next unless e.abs > error.abs
22
+
23
+ error = e
24
+ index = i
25
+ end
26
+ [error, index]
27
+ end
28
+ end
29
+ end
data/lib/neuronet.rb CHANGED
@@ -1,15 +1,33 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # Neuronet is a neural network library for Ruby.
3
+ # Neuronet
4
4
  module Neuronet
5
- VERSION = '7.0.230416'
6
- require_relative 'neuronet/constants'
7
- autoload :Connection, 'neuronet/connection'
8
- autoload :Neuron, 'neuronet/neuron'
9
- autoload :Layer, 'neuronet/layer'
10
- autoload :FeedForward, 'neuronet/feed_forward'
11
- autoload :Scale, 'neuronet/scale'
12
- autoload :Gaussian, 'neuronet/gaussian'
13
- autoload :LogNormal, 'neuronet/log_normal'
14
- autoload :ScaledNetwork, 'neuronet/scaled_network'
5
+ VERSION = '8.0.251113'
6
+
7
+ autoload :Arrayable, 'neuronet/arrayable'
8
+ autoload :Exportable, 'neuronet/exportable'
9
+ autoload :Squash, 'neuronet/squash'
10
+ autoload :Connection, 'neuronet/connection'
11
+ autoload :Config, 'neuronet/config'
12
+ autoload :Backpropagate, 'neuronet/backpropagate'
13
+ autoload :NoisyBackpropagate, 'neuronet/noisy_backpropagate'
14
+ autoload :LayerPresets, 'neuronet/layer_presets'
15
+ autoload :Trainable, 'neuronet/trainable'
16
+ autoload :NeuronStats, 'neuronet/neuron_stats'
17
+ autoload :NetworkStats, 'neuronet/network_stats'
18
+ autoload :InputNeuron, 'neuronet/input_neuron'
19
+ autoload :MiddleNeuron, 'neuronet/middle_neuron'
20
+ autoload :OutputNeuron, 'neuronet/output_neuron'
21
+ autoload :Neuron, 'neuronet/neuron'
22
+ autoload :NoisyMiddleNeuron, 'neuronet/noisy_middle_neuron'
23
+ autoload :NoisyOutputNeuron, 'neuronet/noisy_output_neuron'
24
+ autoload :NoisyNeuron, 'neuronet/noisy_neuron'
25
+ autoload :InputLayer, 'neuronet/input_layer'
26
+ autoload :MiddleLayer, 'neuronet/middle_layer'
27
+ autoload :OutputLayer, 'neuronet/output_layer'
28
+ autoload :Layer, 'neuronet/layer'
29
+ autoload :Perceptron, 'neuronet/perceptron'
30
+ autoload :MLP, 'neuronet/mlp'
31
+ autoload :Deep, 'neuronet/deep'
32
+ autoload :FeedForward, 'neuronet/feed_forward'
15
33
  end
metadata CHANGED
@@ -1,121 +1,57 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: neuronet
3
3
  version: !ruby/object:Gem::Version
4
- version: 7.0.230416
4
+ version: 8.0.251113
5
5
  platform: ruby
6
6
  authors:
7
7
  - CarlosJHR64
8
- autorequire:
9
8
  bindir: bin
10
9
  cert_chain: []
11
- date: 2023-04-16 00:00:00.000000000 Z
12
- dependencies:
13
- - !ruby/object:Gem::Dependency
14
- name: colorize
15
- requirement: !ruby/object:Gem::Requirement
16
- requirements:
17
- - - "~>"
18
- - !ruby/object:Gem::Version
19
- version: '0.8'
20
- - - ">="
21
- - !ruby/object:Gem::Version
22
- version: 0.8.1
23
- type: :development
24
- prerelease: false
25
- version_requirements: !ruby/object:Gem::Requirement
26
- requirements:
27
- - - "~>"
28
- - !ruby/object:Gem::Version
29
- version: '0.8'
30
- - - ">="
31
- - !ruby/object:Gem::Version
32
- version: 0.8.1
33
- - !ruby/object:Gem::Dependency
34
- name: parser
35
- requirement: !ruby/object:Gem::Requirement
36
- requirements:
37
- - - "~>"
38
- - !ruby/object:Gem::Version
39
- version: '3.2'
40
- - - ">="
41
- - !ruby/object:Gem::Version
42
- version: 3.2.1
43
- type: :development
44
- prerelease: false
45
- version_requirements: !ruby/object:Gem::Requirement
46
- requirements:
47
- - - "~>"
48
- - !ruby/object:Gem::Version
49
- version: '3.2'
50
- - - ">="
51
- - !ruby/object:Gem::Version
52
- version: 3.2.1
53
- - !ruby/object:Gem::Dependency
54
- name: rubocop
55
- requirement: !ruby/object:Gem::Requirement
56
- requirements:
57
- - - "~>"
58
- - !ruby/object:Gem::Version
59
- version: '1.45'
60
- - - ">="
61
- - !ruby/object:Gem::Version
62
- version: 1.45.1
63
- type: :development
64
- prerelease: false
65
- version_requirements: !ruby/object:Gem::Requirement
66
- requirements:
67
- - - "~>"
68
- - !ruby/object:Gem::Version
69
- version: '1.45'
70
- - - ">="
71
- - !ruby/object:Gem::Version
72
- version: 1.45.1
73
- - !ruby/object:Gem::Dependency
74
- name: test-unit
75
- requirement: !ruby/object:Gem::Requirement
76
- requirements:
77
- - - "~>"
78
- - !ruby/object:Gem::Version
79
- version: '3.5'
80
- - - ">="
81
- - !ruby/object:Gem::Version
82
- version: 3.5.7
83
- type: :development
84
- prerelease: false
85
- version_requirements: !ruby/object:Gem::Requirement
86
- requirements:
87
- - - "~>"
88
- - !ruby/object:Gem::Version
89
- version: '3.5'
90
- - - ">="
91
- - !ruby/object:Gem::Version
92
- version: 3.5.7
10
+ date: 2025-11-14 00:00:00.000000000 Z
11
+ dependencies: []
93
12
  description: |
94
13
  Library to create neural networks.
95
14
 
96
- This is primarily a math project meant to be used to investigate the behavior of
97
- different small neural networks.
15
+ Features perceptron, MLP, and deep feed forward networks.
16
+ Uses a logistic squash function.
98
17
  email: carlosjhr64@gmail.com
99
18
  executables: []
100
19
  extensions: []
101
20
  extra_rdoc_files: []
102
21
  files:
22
+ - CREDITS.md
103
23
  - README.md
104
24
  - lib/neuronet.rb
25
+ - lib/neuronet/arrayable.rb
26
+ - lib/neuronet/backpropagate.rb
27
+ - lib/neuronet/config.rb
105
28
  - lib/neuronet/connection.rb
106
- - lib/neuronet/constants.rb
29
+ - lib/neuronet/deep.rb
30
+ - lib/neuronet/exportable.rb
107
31
  - lib/neuronet/feed_forward.rb
108
- - lib/neuronet/gaussian.rb
32
+ - lib/neuronet/input_layer.rb
33
+ - lib/neuronet/input_neuron.rb
109
34
  - lib/neuronet/layer.rb
110
- - lib/neuronet/log_normal.rb
35
+ - lib/neuronet/layer_presets.rb
36
+ - lib/neuronet/middle_layer.rb
37
+ - lib/neuronet/middle_neuron.rb
38
+ - lib/neuronet/mlp.rb
39
+ - lib/neuronet/network_stats.rb
111
40
  - lib/neuronet/neuron.rb
112
- - lib/neuronet/scale.rb
113
- - lib/neuronet/scaled_network.rb
41
+ - lib/neuronet/neuron_stats.rb
42
+ - lib/neuronet/noisy_backpropagate.rb
43
+ - lib/neuronet/noisy_middle_neuron.rb
44
+ - lib/neuronet/noisy_neuron.rb
45
+ - lib/neuronet/noisy_output_neuron.rb
46
+ - lib/neuronet/output_layer.rb
47
+ - lib/neuronet/output_neuron.rb
48
+ - lib/neuronet/perceptron.rb
49
+ - lib/neuronet/squash.rb
50
+ - lib/neuronet/trainable.rb
114
51
  homepage: https://github.com/carlosjhr64/neuronet
115
52
  licenses:
116
53
  - MIT
117
54
  metadata: {}
118
- post_install_message:
119
55
  rdoc_options: []
120
56
  require_paths:
121
57
  - lib
@@ -123,16 +59,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
123
59
  requirements:
124
60
  - - ">="
125
61
  - !ruby/object:Gem::Version
126
- version: '0'
62
+ version: '3.4'
127
63
  required_rubygems_version: !ruby/object:Gem::Requirement
128
64
  requirements:
129
65
  - - ">="
130
66
  - !ruby/object:Gem::Version
131
67
  version: '0'
132
- requirements:
133
- - 'git: 2.30'
134
- rubygems_version: 3.4.11
135
- signing_key:
68
+ requirements: []
69
+ rubygems_version: 3.7.2
136
70
  specification_version: 4
137
71
  summary: Library to create neural networks.
138
72
  test_files: []
@@ -1,110 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Neuronet module / Constants
4
- module Neuronet
5
- # Neuronet allows one to set the format to use for displaying float values,
6
- # mostly used in the inspect methods.
7
- # [Docs](https://docs.ruby-lang.org/en/master/format_specifications_rdoc.html)
8
- FORMAT = '%.13g'
9
-
10
- # An artificial neural network uses a squash function to determine the
11
- # activation value of a neuron. The squash function for Neuronet is the
12
- # [Sigmoid function](http://en.wikipedia.org/wiki/Sigmoid_function) which sets
13
- # the neuron's activation value between 0.0 and 1.0. This activation value is
14
- # often thought of on/off or true/false. For classification problems,
15
- # activation values near one are considered true while activation values near
16
- # 0.0 are considered false. In Neuronet I make a distinction between the
17
- # neuron's activation value and it's representation to the problem. This
18
- # attribute, activation, need never appear in an implementation of Neuronet,
19
- # but it is mapped back to it's unsquashed value every time the implementation
20
- # asks for the neuron's value. One should scale the problem with most data
21
- # points between -1 and 1, extremes under 2s, and no outbounds above 3s.
22
- # Standard deviations from the mean is probably a good way to figure the scale
23
- # of the problem.
24
- SQUASH = ->(unsquashed) { 1.0 / (1.0 + Math.exp(-unsquashed)) }
25
- UNSQUASH = ->(squashed) { Math.log(squashed / (1.0 - squashed)) }
26
- DERIVATIVE = ->(squash) { squash * (1.0 - squash) }
27
-
28
- # I'll want to have a neuron roughly mirror another later. Let [v] be the
29
- # squash of v. Consider:
30
- # v = b + w*[v]
31
- # There is no constant b and w that will satisfy the above equation for all v.
32
- # But one can satisfy the equation for v in {-1, 0, 1}. Find b and w such
33
- # that:
34
- # A: 0 = b + w*[0]
35
- # B: 1 = b + w*[1]
36
- # C: -1 = b + w*[-1]
37
- # Use A and B to solve for b and w:
38
- # A: 0 = b + w*[0]
39
- # b = -w*[0]
40
- # B: 1 = b + w*[1]
41
- # 1 = -w*[0] + w*[1]
42
- # 1 = w*(-[0] + [1])
43
- # w = 1/([1] - [0])
44
- # b = -[0]/([1] - [0])
45
- # Verify A, B, and C:
46
- # A: 0 = b + w*[0]
47
- # 0 = -[0]/([1] - [0]) + [0]/([1] - [0])
48
- # 0 = 0 # OK
49
- # B: 1 = b + w*[1]
50
- # 1 = -[0]/([1] - [0]) + [1]/([1] - [0])
51
- # 1 = ([1] - [0])/([1] - [0])
52
- # 1 = 1 # OK
53
- # Using the squash function identity, [v] = 1 - [-v]:
54
- # C: -1 = b + w*[-1]
55
- # -1 = -[0]/([1] - [0]) + [-1]/([1] - [0])
56
- # -1 = ([-1] - [0])/([1] - [0])
57
- # [0] - [1] = [-1] - [0]
58
- # [0] - [1] = 1 - [1] - [0] # Identity substitution.
59
- # [0] = 1 - [0] # OK, by identity(0=-0).
60
- # Evaluate given that [0] = 0.5:
61
- # b = -[0]/([1] - [0])
62
- # b = [0]/([0] - [1])
63
- # b = 0.5/(0.5 - [1])
64
- # w = 1/([1] - [0])
65
- # w = 1/([1] - 0.5)
66
- # w = -2 * 0.5/(0.5 - [1])
67
- # w = -2 * b
68
- BZERO = 0.5 / (0.5 - SQUASH[1.0])
69
- WONE = -2.0 * BZERO
70
-
71
- # Although the implementation is free to set all parameters for each neuron,
72
- # Neuronet by default creates zeroed neurons. Association between inputs and
73
- # outputs are trained, and neurons differentiate from each other randomly.
74
- # Differentiation among neurons is achieved by noise in the back-propagation
75
- # of errors. This noise is provided by rand + rand. I chose rand + rand to
76
- # give the noise an average value of one and a bell shape distribution.
77
- NOISE = ->(error) { error * (rand + rand) }
78
-
79
- # One may choose not to have noise.
80
- NO_NOISE = ->(error) { error }
81
-
82
- # To keep components bounded, Neuronet limits the weights, biases, and values.
83
- # Note that on a 64-bit machine SQUASH[37] rounds to 1.0, and
84
- # SQUASH[9] is 0.99987...
85
- MAXW = 9.0 # Maximum weight
86
- MAXB = 18.0 # Maximum bias
87
- MAXV = 36.0 # Maximum value
88
-
89
- # Mu learning factor.
90
- LEARNING = 1.0
91
-
92
- # The above constants are the defaults for Neuronet. They are set below in
93
- # accessable module attributes. The user may change these to suit their
94
- # needs.
95
- class << self
96
- attr_accessor :format, :squash, :unsquash, :derivative, :bzero, :wone,
97
- :noise, :maxw, :maxb, :maxv, :learning
98
- end
99
- self.squash = SQUASH
100
- self.unsquash = UNSQUASH
101
- self.derivative = DERIVATIVE
102
- self.bzero = BZERO
103
- self.wone = WONE
104
- self.noise = NOISE
105
- self.format = FORMAT
106
- self.maxw = MAXW
107
- self.maxb = MAXB
108
- self.maxv = MAXV
109
- self.learning = LEARNING
110
- end
@@ -1,19 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Neuronet module
4
- module Neuronet
5
- # "Normal Distribution"
6
- # Gaussian sub-classes Scale and is used exactly the same way. The only
7
- # changes are that it calculates the arithmetic mean (average) for center and
8
- # the standard deviation for spread.
9
- class Gaussian < Scale
10
- def set(inputs)
11
- @center ||= inputs.sum.to_f / inputs.length
12
- unless @spread
13
- sum2 = inputs.map { @center - _1 }.sum { _1 * _1 }.to_f
14
- @spread = Math.sqrt(sum2 / (inputs.length - 1.0))
15
- end
16
- self
17
- end
18
- end
19
- end
@@ -1,21 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Neuronet module
4
- module Neuronet
5
- # "Log-Normal Distribution"
6
- # LogNormal sub-classes Gaussian to transform the values to a logarithmic
7
- # scale.
8
- class LogNormal < Gaussian
9
- def set(inputs)
10
- super(inputs.map { |value| Math.log(value) })
11
- end
12
-
13
- def mapped(inputs)
14
- super(inputs.map { |value| Math.log(value) })
15
- end
16
-
17
- def unmapped(outputs)
18
- super(outputs).map { |value| Math.exp(value) }
19
- end
20
- end
21
- end
@@ -1,50 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Neuronet module
4
- module Neuronet
5
- # Neuronet::Scale is a class to help scale problems to fit within a network's
6
- # "field of view". Given a list of values, it finds the minimum and maximum
7
- # values and establishes a mapping to a scaled set of numbers between minus
8
- # one and one (-1,1).
9
- class Scale
10
- attr_accessor :spread, :center
11
-
12
- # If the value of center is provided, then
13
- # that value will be used instead of
14
- # calculating it from the values passed to method #set.
15
- # Likewise, if spread is provided, that value of spread will be used.
16
- def initialize(factor: 1.0, center: nil, spread: nil)
17
- @factor = factor
18
- @center = center
19
- @spread = spread
20
- end
21
-
22
- def set(inputs)
23
- min, max = inputs.minmax
24
- @center ||= (max + min) / 2.0
25
- @spread ||= (max - min) / 2.0
26
- self
27
- end
28
-
29
- def reset(inputs)
30
- @center = @spread = nil
31
- set(inputs)
32
- end
33
-
34
- def mapped(inputs)
35
- factor = 1.0 / (@factor * @spread)
36
- inputs.map { |value| factor * (value - @center) }
37
- end
38
- alias mapped_input mapped
39
- alias mapped_output mapped
40
-
41
- # Note that it could also unmap inputs, but
42
- # outputs is typically what's being transformed back.
43
- def unmapped(outputs)
44
- factor = @factor * @spread
45
- outputs.map { |value| (factor * value) + @center }
46
- end
47
- alias unmapped_input unmapped
48
- alias unmapped_output unmapped
49
- end
50
- end
@@ -1,50 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Neuronet module
4
- module Neuronet
5
- # ScaledNetwork is a subclass of FeedForwardNetwork.
6
- # It automatically scales the problem given to it
7
- # by using a Scale type instance set in @distribution.
8
- # The attribute, @distribution, is set to Neuronet::Gaussian.new by default,
9
- # but one can change this to Scale, LogNormal, or one's own custom mapper.
10
- class ScaledNetwork < FeedForward
11
- attr_accessor :distribution, :reset
12
-
13
- def initialize(layers, distribution: Gaussian.new, reset: false)
14
- super(layers)
15
- @distribution = distribution
16
- @reset = reset
17
- end
18
-
19
- # ScaledNetwork set works just like FeedForwardNetwork's set method,
20
- # but calls @distribution.set(values) first if @reset is true.
21
- # Sometimes you'll want to set the distribution with the entire data set,
22
- # and then there will be times you'll want to reset the distribution
23
- # with each input.
24
- def set(input)
25
- @distribution.reset(input) if @reset
26
- super(@distribution.mapped_input(input))
27
- end
28
-
29
- def input
30
- @distribution.unmapped_input(super)
31
- end
32
-
33
- def output
34
- @distribution.unmapped_output(super)
35
- end
36
-
37
- def *(_other)
38
- @distribution.unmapped_output(super)
39
- end
40
-
41
- def train(target, mju = expected_mju)
42
- super(@distribution.mapped_output(target), mju)
43
- end
44
-
45
- def inspect
46
- distribution = @distribution.class.to_s.split(':').last
47
- "#distribution:#{distribution} #reset:#{@reset}\n" + super
48
- end
49
- end
50
- end