neuronet 7.0.230416 β†’ 8.0.251113

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8ae4f34320ea739510cafcdf20b058723dadcb9ac4fde8313529c91c444a5ec1
4
- data.tar.gz: a1b493bcc891960a96d279203fa8c50959135832cb19946ed0f21150b984a4e1
3
+ metadata.gz: 34f23ce75292128094b31f4eac5245ac41a7b3cf42026aa4bbb2ce74e19fd255
4
+ data.tar.gz: ec4105888e4bc67361a9f4d1bde5edbfe94f7363bbe1233e39fc29fc3c2e6de7
5
5
  SHA512:
6
- metadata.gz: 7a9b58b4f67ebdd293af8753a80ad6f94963a9d97f20996a6e225bb34ee79344f6766ed4fac7520ad57742f818083b0879a6d7a54fab0ba878d7a3838abc513c
7
- data.tar.gz: f19e93126a1f485189d6d4ca1599617aebd10582eaa5757ffb5a446a39eba0389692fea85b8b9305fdb997788bed7421252b078d7ac06fa305f3568ab400b8fb
6
+ metadata.gz: 5222bf678484647b6911c489d304206ed3730438b7810a0a19e08a633a2e422e0afb2ff7cbb84272ffc7080136d361dd1d1ee9cdd3424b21aea6b274bd6f306c
7
+ data.tar.gz: 4082141fd54fedf66e3449334b5546abe0b78ee6306eee2e3acd15864ff93b446af8f0a0ed520d27e98c991f7bd3a62222b00b652d0289616ea5ea2c167eb5c7
data/CREDITS.md ADDED
@@ -0,0 +1,10 @@
1
+ # Credits
2
+
3
+ We want to credit where credit is due.
4
+ This project owes thanks to the authors of this repository (see their commits),
5
+ the third-party libraries we rely on (check the dependencies),
6
+ and the diffuse pool of ideas and inspirations that shaped it.
7
+
8
+ We welcome contributions to this list!
9
+ If you feel someone or something is missing,
10
+ please submit a pull request with the suggested addition.
data/README.md CHANGED
@@ -1,118 +1,77 @@
1
1
  # Neuronet
2
2
 
3
- * [VERSION 7.0.230416](https://github.com/carlosjhr64/neuronet/releases)
4
- * [github](https://github.com/carlosjhr64/neuronet)
5
- * [rubygems](https://rubygems.org/gems/neuronet)
3
+ * [VERSION 8.0.251113](https://github.com/carlosjhr64/neuronet/releases)
4
+ * [github](https://www.github.com/carlosjhr64/neuronet)
5
+ * [rubygems](https://rubygems.org/neuronet)
6
6
 
7
- ## DESCRIPTION:
7
+ ## DESCRIPTION
8
8
 
9
9
  Library to create neural networks.
10
10
 
11
- This is primarily a math project meant to be used to investigate the behavior of
12
- different small neural networks.
11
+ Features perceptron, MLP, and deep feed forward networks.
12
+ Uses a logistic squash function.
13
13
 
14
- ## INSTALL:
14
+ ## INSTALL
15
15
  ```console
16
- gem install neuronet
16
+ $ gem install neuronet
17
17
  ```
18
- ## SYNOPSIS:
18
+ * Required Ruby version: `>= 3.4`
19
19
 
20
- The library is meant to be read, but here is a motivating example:
20
+ ## SYNOPSIS
21
+
22
+ The library is meant to be read, but here are some quick bits:
21
23
  ```ruby
22
24
  require 'neuronet'
23
- include Neuronet
24
25
 
25
- ff = FeedForward.new([3,3])
26
+ # Perceptron
26
27
  # It can mirror, equivalent to "copy":
27
- ff.last.mirror
28
- values = ff * [-1, 0, 1]
29
- values.map { '%.13g' % _1 } #=> ["-1", "0", "1"]
28
+ np = Neuronet::Perceptron.new(3, 3)
29
+ np.output_layer.mirror
30
+ values = np * [-1, 0, 1] #=> [-1.0, 0.0, 1.0]
30
31
  # It can anti-mirror, equivalent to "not":
31
- ff.last.mirror(-1)
32
- values = ff * [-1, 0, 1]
33
- values.map { '%.13g' % _1 } #=> ["1", "0", "-1"]
34
-
35
- # It can "and";
36
- ff = FeedForward.new([2,2,1])
37
- ff[1].mirror(-1)
38
- ff.last.connect(ff.first)
39
- ff.last.average
40
- # Training "and" pairs:
32
+ np.output_layer.mirror(-1)
33
+ values = np * [-1, 0, 1] #=> [1.0, 0.0, -1.0]
34
+
35
+ # MPL: Multi-Layer(3) Perceptron
36
+ # It can "and".
37
+ # In this example, NoisyMiddleNeuron is needed to differentiate the neurons:
38
+ mlp = Neuronet::MLP.new(2, 4, 1,
39
+ middle_neuron: Neuronet::NoisyMiddleNeuron)
41
40
  pairs = [
42
41
  [[1, 1], [1]],
43
42
  [[-1, 1], [-1]],
44
43
  [[1, -1], [-1]],
45
44
  [[-1, -1], [-1]],
46
45
  ]
47
- # Train until values match:
48
- ff.pairs(pairs) do
49
- pairs.any? { |input, target| (ff * input).map { _1.round(1) } != target }
46
+ while pairs.any? { |input, target| (mlp * input).map(&:round) != target }
47
+ mlp.pairs(pairs) # Training...
50
48
  end
51
- (ff * [-1, -1]).map{ _1.round } #=> [-1]
52
- (ff * [-1, 1]).map{ _1.round } #=> [-1]
53
- (ff * [ 1, -1]).map{ _1.round } #=> [-1]
54
- (ff * [ 1, 1]).map{ _1.round } #=> [1]
55
-
56
- # It can "or";
57
- ff = FeedForward.new([2,2,1])
58
- ff[1].mirror(-1)
59
- ff.last.connect(ff.first)
60
- ff.last.average
61
- # Training "or" pairs:
62
- pairs = [
63
- [[1, 1], [1]],
64
- [[-1, 1], [1]],
65
- [[1, -1], [1]],
66
- [[-1, -1], [-1]],
67
- ]
68
- # Train until values match:
69
- ff.pairs(pairs) do
70
- pairs.any? { |input, target| (ff * input).map { _1.round(1) } != target }
71
- end
72
- (ff * [-1, -1]).map{ _1.round } #=> [-1]
73
- (ff * [-1, 1]).map{ _1.round } #=> [1]
74
- (ff * [ 1, -1]).map{ _1.round } #=> [1]
75
- (ff * [ 1, 1]).map{ _1.round } #=> [1]
49
+ (mlp * [1, 1]).map(&:round) #=> [1]
50
+ (mlp * [-1, 1]).map(&:round) #=> [-1]
51
+ (mlp * [1, -1]).map(&:round) #=> [-1]
52
+ (mlp * [-1, -1]).map(&:round) #=> [-1]
53
+
54
+ # To export to a file:
55
+ # mlp.export_to_file(filename)
56
+ # To import from a file:
57
+ # mlp.import_from_file(filename)
58
+ # These will export/import the network's biases and weights.
76
59
  ```
77
- ## CONTENTS:
78
-
79
- * [Neuronet wiki](https://github.com/carlosjhr64/neuronet/wiki)
60
+ ## HELP
80
61
 
81
- ### Mju
62
+ When reading the library, this is order the order I would read it:
82
63
 
83
- Mju is a Marklar which value depends on which Marklar is asked.
84
- Other known Marklars are Mu and Kappa.
85
- Hope it's not confusing...
86
- I tried to give related Marklars the same name.
87
- ![Marklar](img/marklar.png)
88
-
89
- ### Marshal
90
-
91
- Marshal works with Neuronet to save your networks:
92
- ```ruby
93
- dump = Marshal.dump ff
94
- ff2 = Marshal.load dump
95
- ff2.inspect == ff.inspect #=> true
96
- ```
97
- ### Base
98
-
99
- * [Requires and autoloads](lib/neuronet.rb)
100
- * [Constants and lambdas](lib/neuronet/constants.rb)
101
- * [Connection](lib/neuronet/connection.rb)
102
64
  * [Neuron](lib/neuronet/neuron.rb)
103
65
  * [Layer](lib/neuronet/layer.rb)
104
- * [FeedForward](lib/neuronet/feed_forward.rb)
105
-
106
- ### Scaled
66
+ * [Feed Forward](lib/neuronet/feed_forward.rb)
107
67
 
108
- * [Scale](lib/neuronet/scale.rb)
109
- * [Gaussian](lib/neuronet/gaussian.rb)
110
- * [LogNormal](lib/neuronet/log_normal.rb)
111
- * [ScaledNetwork](lib/neuronet/scaled_network.rb)
68
+ Once you understand these files, the rest should all make sense.
69
+ For some math on neural networks,
70
+ see the [Wiki](https://github.com/carlosjhr64/neuronet/wiki).
112
71
 
113
- ## LICENSE:
72
+ ## LICENSE
114
73
 
115
- Copyright (c) 2023 CarlosJHR64
74
+ Copyright (c) 2025 CarlosJHR64
116
75
 
117
76
  Permission is hereby granted, free of charge,
118
77
  to any person obtaining a copy of this software and
@@ -135,3 +94,5 @@ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
135
94
  DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
136
95
  TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
137
96
  THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
97
+
98
+ ## [CREDITS](CREDITS.md)
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Arrayable avoids explicit `to_a` calls for common Array methods.
5
+ module Arrayable
6
+ def each(&blk) = to_a.each { blk[it] }
7
+ def each_with_index(&blk) = to_a.each_with_index { |n, i| blk[n, i] }
8
+ def [](index) = to_a[index]
9
+ def map(&) = to_a.map(&)
10
+ def size = to_a.size
11
+ def reverse = to_a.reverse
12
+ end
13
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Backpropagate provides simple, clamp-limited weight/bias updates.
5
+ module Backpropagate
6
+ # Back-propagates errors, updating bias and connection weights.
7
+ # Clamps updates to [-max, +max].
8
+ # Recursively calls on connected neurons.
9
+ # rubocop: disable Metrics, Style
10
+ def backpropagate(error)
11
+ bmax = Config.bias_clamp
12
+ b = bias + error
13
+ self.bias = b.abs > bmax ? (b.positive? ? bmax : -bmax) : b
14
+
15
+ wmax = Config.weight_clamp
16
+ connections.each do |c|
17
+ n = c.neuron
18
+ w = c.weight + (n.activation * error)
19
+ c.weight = w.abs > wmax ? (w.positive? ? wmax : -wmax) : w
20
+ n.backpropagate(error)
21
+ end
22
+ end
23
+ # rubocop: enable Metrics, Style
24
+ end
25
+ end
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Maximum values for biases and weights
5
+ module Config
6
+ class << self; attr_accessor :bias_clamp, :weight_clamp; end
7
+ self.bias_clamp = 18.0
8
+ self.weight_clamp = 9.0
9
+ end
10
+ end
@@ -1,65 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # Neuronet module / Connection class
4
3
  module Neuronet
5
- # Connections between neurons are there own separate objects. In Neuronet, a
6
- # neuron contains it's bias, and a list of it's connections. Each connection
7
- # contains it's weight (strength) and connected neuron.
8
- class Connection
9
- attr_accessor :neuron, :weight
10
-
11
- # Connection#initialize takes a neuron and a weight with a default of 0.0.
12
- def initialize(neuron = Neuron.new, weight: 0.0)
13
- @neuron = neuron
14
- @weight = weight
15
- end
16
-
17
- # The connection's mu is the activation of the connected neuron.
18
- def mu = @neuron.activation
19
- alias activation mu
20
-
21
- # The connection's mju is 𝑾𝓑𝒂'.
22
- def mju = @weight * @neuron.derivative
23
-
24
- # The connection kappa is a component of the neuron's sum kappa:
25
- # 𝜿 := 𝑾 𝝀'
26
- def kappa = @weight * @neuron.lamda
27
-
28
- # The weighted activation of the connected neuron.
29
- def weighted_activation = @neuron.activation * @weight
30
-
31
- # Consistent with #update
32
- alias partial weighted_activation
33
-
34
- # Connection#update returns the updated activation of a connection, which is
35
- # the weighted updated activation of the neuron it's connected to:
36
- # weight * neuron.update
37
- # This method is the one to use whenever the value of the inputs are changed
38
- # (or right after training). Otherwise, both update and value should give
39
- # the same result. When back calculation are not needed, use
40
- # Connection#weighted_activation instead.
41
- def update = @neuron.update * @weight
42
-
43
- # Connection#backpropagate modifies the connection's weight in proportion to
44
- # the error given and passes that error to its connected neuron via the
45
- # neuron's backpropagate method.
46
- def backpropagate(error)
47
- @weight += @neuron.activation * Neuronet.noise[error]
48
- if @weight.abs > Neuronet.maxw
49
- @weight = @weight.positive? ? Neuronet.maxw : -Neuronet.maxw
50
- end
51
- @neuron.backpropagate(error)
52
- self
53
- end
54
- # On how to reduce the error, the above makes it obvious how to interpret
55
- # the equipartition of errors among the connections. Backpropagation is
56
- # symmetric to forward propagation of errors. The error variable is the
57
- # reduced error, 𝛆(see the wiki notes).
58
-
59
- # A connection inspects itself as "weight*label:...".
60
- def inspect = "#{Neuronet.format % @weight}*#{@neuron.inspect}"
61
-
62
- # A connection puts itself as "weight*label".
63
- def to_s = "#{Neuronet.format % @weight}*#{@neuron}"
4
+ # Connection is a lightweight struct for weighted neuron links.
5
+ Connection = Struct.new('Connection', :neuron, :weight) do
6
+ # Weighted activation value
7
+ def value = neuron.activation * weight
64
8
  end
65
9
  end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Feed Forward
5
+ class Deep
6
+ include NetworkStats
7
+ include Exportable
8
+ include Trainable
9
+ include Arrayable
10
+
11
+ # rubocop: disable Metrics
12
+ def initialize(*sizes, input_neuron: InputNeuron,
13
+ middle_neuron: MiddleNeuron,
14
+ output_neuron: OutputNeuron)
15
+ length = sizes.length
16
+ raise 'Need at least 3 layers' if length < 3
17
+
18
+ @input_layer = InputLayer.new(sizes.shift, input_neuron:)
19
+ @output_layer = OutputLayer.new(sizes.pop, output_neuron:)
20
+ @hidden_layers = sizes.map { MiddleLayer.new(it, middle_neuron:) }
21
+ previous = @input_layer
22
+ @hidden_layers.each do |layer|
23
+ layer.connect(previous)
24
+ previous = layer
25
+ end
26
+ @output_layer.connect(previous)
27
+ end
28
+ # rubocop: enable Metrics
29
+
30
+ attr_reader :input_layer, :hidden_layers, :output_layer
31
+
32
+ def set(values)
33
+ @input_layer.set(values)
34
+ end
35
+
36
+ def update
37
+ @hidden_layers.each(&:update)
38
+ end
39
+
40
+ def values
41
+ @output_layer.values
42
+ end
43
+
44
+ def *(other)
45
+ set(other)
46
+ update
47
+ values
48
+ end
49
+
50
+ def to_a = [@input_layer, *@hidden_layers, @output_layer]
51
+ end
52
+ end
@@ -0,0 +1,67 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Exportable serializes network biases and weights only.
5
+ # Human-readable, compact, excludes activations.
6
+ module Exportable
7
+ # Writes serialized network to writer(from self).
8
+ # rubocop: disable Metrics
9
+ def export(writer)
10
+ sizes = map(&:size)
11
+ writer.puts "# #{self.class}"
12
+ # The first "float" here is the number of layers in the FFN...
13
+ # Just to be consistent:
14
+ writer.puts "#{sizes.size.to_f} #{sizes.join(' ')}"
15
+ each_with_index do |layer, i|
16
+ next if i.zero? # skip input layer
17
+
18
+ layer.each_with_index do |neuron, j|
19
+ writer.puts "# neuron = FFN[#{i}, #{j}]"
20
+ writer.puts "#{neuron.bias} #{i} #{j}"
21
+ neuron.connections.each_with_index do |connection, k|
22
+ writer.puts "#{connection.weight} #{i} #{j} #{k}"
23
+ end
24
+ end
25
+ end
26
+ end
27
+ # rubocop: enable Metrics
28
+
29
+ def export_to_file(filename) = File.open(filename, 'w') { export it }
30
+ def import_from_file(filename) = File.open(filename, 'r') { import it }
31
+
32
+ # Reads and validates serialized network from reader to set self.
33
+ # rubocop: disable Metrics
34
+ def import(reader)
35
+ gets_data = lambda do |reader|
36
+ return nil unless (line = reader.gets)
37
+
38
+ line = reader.gets while line.start_with?('#')
39
+ fs, *is = line.strip.split
40
+ [fs.to_f, *is.map(&:to_i)]
41
+ end
42
+
43
+ size, *sizes = gets_data[reader]
44
+ raise 'Size/Sizes mismatch' unless size == sizes.size
45
+ raise 'Sizes mismatch' unless sizes == map(&:size)
46
+
47
+ each_with_index do |layer, i|
48
+ next if i.zero? # skip input layer
49
+
50
+ layer.each_with_index do |neuron, j|
51
+ bias, *indeces = gets_data[reader]
52
+ raise "bad bias index: #{indeces}" unless indeces == [i, j]
53
+
54
+ neuron.bias = bias
55
+ neuron.connections.each_with_index do |connection, k|
56
+ weight, *indeces = gets_data[reader]
57
+ raise "bad weight index: #{indeces}" unless indeces == [i, j, k]
58
+
59
+ connection.weight = weight
60
+ end
61
+ end
62
+ end
63
+ raise 'Expected end of file.' unless gets_data[reader].nil?
64
+ end
65
+ # rubocop: enable Metrics
66
+ end
67
+ end
@@ -1,89 +1,54 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # Neuronet module / FeedForward class
4
3
  module Neuronet
5
- # A Feed Forward Network
6
- class FeedForward < Array
4
+ # FeedForward is a fully connected neural network with >= 3 layers.
5
+ class FeedForward
6
+ # [NetwordStats](network_stats.rb)
7
+ include NetworkStats
8
+ # [Exportable](exportable.rb)
9
+ include Exportable
10
+ # [Trainable](trainable.rb)
11
+ include Trainable
12
+ # [Arrayble](arrayable.rb)
13
+ include Arrayable
14
+
7
15
  # Example:
8
- # ff = Neuronet::FeedForward.new([2, 3, 1])
9
- def initialize(layers)
10
- length = layers.length
11
- raise 'Need at least 2 layers' if length < 2
16
+ # ff = Neuronet::FeedForward.new(4, 8, 4)
17
+ def initialize(*sizes, full_neuron: Neuron)
18
+ length = sizes.length
19
+ raise 'Need at least 3 layers' if length < 3
12
20
 
13
- super(length) { Layer.new(layers[_1]) }
14
- 1.upto(length - 1) { self[_1].connect(self[_1 - 1]) }
21
+ @layers = Array.new(length) { Layer.new(sizes[it], full_neuron:) }
22
+ 1.upto(length - 1) { @layers[it].connect(@layers[it - 1]) }
23
+ @input_layer = @layers[0]
24
+ @output_layer = @layers[-1]
25
+ @hidden_layers = @layers[1...-1]
15
26
  end
16
27
 
17
- # Set the input layer.
18
- def set(input)
19
- first.set(input)
20
- self
21
- end
28
+ attr_reader :input_layer, :hidden_layers, :output_layer
22
29
 
23
- def input = first.values
30
+ # Sets the input values
31
+ def set(values)
32
+ @input_layer.set(values)
33
+ end
24
34
 
25
- # Update the network.
35
+ # Updates hidden layers (input assumed set).
26
36
  def update
27
- # update up the layers
28
- 1.upto(length - 1) { self[_1].partial }
29
- self
37
+ @hidden_layers.each(&:update)
30
38
  end
31
39
 
32
- def output = last.values
40
+ # Gets output
41
+ def values
42
+ @output_layer.values
43
+ end
33
44
 
34
- # Consider:
35
- # m = Neuronet::FeedForward.new(layers)
36
- # Want:
37
- # output = m * input
45
+ # Forward pass: set input, update, return output.
38
46
  def *(other)
39
47
  set(other)
40
48
  update
41
- last.values
42
- end
43
-
44
- # 𝝁 + 𝜧 𝝁' + 𝜧 𝜧'𝝁" + 𝜧 𝜧'𝜧"𝝁"' + ...
45
- # |𝜧| ~ |𝑾||𝓑𝒂|
46
- # |βˆ‘π‘Ύ| ~ βˆšπ‘
47
- # |𝓑𝒂| ~ ΒΌ
48
- # |𝝁| ~ 1+βˆ‘|𝒂'| ~ 1+½𝑁
49
- def expected_mju!
50
- sum = 0.0
51
- mju = 1.0
52
- reverse[1..].each do |layer|
53
- n = layer.length
54
- sum += mju * (1.0 + (0.5 * n))
55
- mju *= 0.25 * Math.sqrt(layer.length)
56
- end
57
- @expected_mju = Neuronet.learning * sum
58
- end
59
-
60
- def expected_mju
61
- @expected_mju || expected_mju!
62
- end
63
-
64
- def average_mju
65
- last.average_mju
66
- end
67
-
68
- def train(target, mju = expected_mju)
69
- last.train(target, mju)
70
- self
71
- end
72
-
73
- def pair(input, target, mju = expected_mju)
74
- set(input).update.train(target, mju)
49
+ values
75
50
  end
76
51
 
77
- def pairs(pairs, mju = expected_mju)
78
- pairs.shuffle.each { |input, target| pair(input, target, mju) }
79
- return self unless block_given?
80
-
81
- pairs.shuffle.each { |i, t| pair(i, t, mju) } while yield
82
- self
83
- end
84
-
85
- def inspect = map(&:inspect).join("\n")
86
-
87
- def to_s = map(&:to_s).join("\n")
52
+ def to_a = @layers
88
53
  end
89
54
  end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Input Layer
5
+ class InputLayer
6
+ include Arrayable
7
+
8
+ def initialize(length, input_neuron: InputNeuron)
9
+ @layer = Array.new(length) { input_neuron.new }
10
+ @endex = length - 1
11
+ end
12
+
13
+ def set(values)
14
+ 0.upto(@endex) { @layer[it].set values[it] }
15
+ end
16
+
17
+ def to_a = @layer
18
+ end
19
+ end
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Neuronet
4
+ # Input Neuron
5
+ class InputNeuron
6
+ include NeuronStats
7
+ include Squash
8
+
9
+ EMPTY = [].freeze
10
+
11
+ def initialize
12
+ @activation = 0.5
13
+ end
14
+
15
+ attr_reader :activation
16
+
17
+ def bias = nil
18
+ def connections = EMPTY
19
+ def value = nil
20
+
21
+ def set(value)
22
+ @activation = squash(value)
23
+ end
24
+
25
+ def backpropagate(_) = nil
26
+ end
27
+ end