qoa 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e77f61af824088258ddbbe792ff87b1b068f1a98018aa8f1e8632a566c988b00
4
- data.tar.gz: '0749da84fe9312c2f34ea00a56b3e0a58dd12af41ccb5abd9db13d144c1799ca'
3
+ metadata.gz: a45d057fc9994389ef429aa82b3207a2e30b8e67a880f3ad6e6b57f7e7a79b75
4
+ data.tar.gz: 273f73d4184545b75ec338f75c9ea39b5e4e2cc6e2582091bab036e93f06b320
5
5
  SHA512:
6
- metadata.gz: 7c3bed59d35246abcd4ae32b4f4ecad352bc3834d01ef2d08e07fb2e60686c2cf906211e0bcee9741413e17ad8164d9b6e2bbb28c2a9d987e61d9b92afc40881
7
- data.tar.gz: 01f25c01f8bfee1ade459953983bfc2425ca544288ac400870b5a0820f61dfc54620c36f504d909499485efab68c12028e0137c3e6cacd3654b774c8f2384a76
6
+ metadata.gz: 00eedcfdee31e73c559be6b8c3e75d31c72d2f802a40e9322c82b0bfe6e615ce9f2498d559722fa360d9ebb7d23328f8275c94374b40651d82e3705f869e413a
7
+ data.tar.gz: c125116a1e5deb7d345315eaa9954e006b198a881864b123b452906dcd1d009f473bbfc82a7cd7f09a67dca7c61de83aefb8156eed4cfde8b5d593858448c017
data/README.md CHANGED
@@ -13,12 +13,18 @@ Qoa is a simple and customizable neural network library for Ruby. It allows you
13
13
 
14
14
  ## Installation
15
15
 
16
- Simply copy the `neural_network.rb`, `activation_functions.rb`, and `matrix_helpers.rb` files into your project and require them.
16
+ ### Install via RubyGems
17
+
18
+ You can install the gem via RubyGems:
19
+
20
+ ```
21
+ gem install qoa
22
+ ```
23
+
24
+ Then, require the gem in your project:
17
25
 
18
26
  ```ruby
19
- require_relative 'neural_network'
20
- require_relative 'activation_functions'
21
- require_relative 'matrix_helpers'
27
+ require 'qoa'
22
28
  ```
23
29
 
24
30
  ## Usage
@@ -40,7 +46,30 @@ To create a new neural network, you can initialize an instance of `Qoa::NeuralNe
40
46
  Example:
41
47
 
42
48
  ```ruby
43
- nn = Qoa::NeuralNetwork.new(784, [128, 64], 10, 0.001, 0.5, :relu, 0.9, 1e-8, 32)
49
+ require 'qoa'
50
+
51
+ input_nodes = 784 # Number of input features (e.g., 28x28 pixels for MNIST dataset)
52
+ hidden_layers = [128, 64] # Two hidden layers with 128 and 64 nodes each
53
+ output_nodes = 10 # Number of output classes (e.g., 10 for MNIST dataset)
54
+ learning_rate = 0.01
55
+ dropout_rate = 0.5
56
+ activation_func = :relu
57
+
58
+ nn = Qoa::NeuralNetwork.new(input_nodes, hidden_layers, output_nodes, learning_rate, dropout_rate, activation_func)
59
+ ```
60
+
61
+ ### Saving and Loading Models
62
+
63
+ To save the trained model to a file, call the `save_model` method:
64
+
65
+ ```ruby
66
+ nn.save_model('model.json')
67
+ ```
68
+
69
+ To load a previously saved model, call the `load_model` method:
70
+
71
+ ```ruby
72
+ nn.load_model('model.json')
44
73
  ```
45
74
 
46
75
  ### Training the Neural Network
@@ -98,4 +127,4 @@ Bug reports and pull requests are welcome on GitHub at https://github.com/mmaton
98
127
 
99
128
  ## License
100
129
 
101
- The library is available as open source under the terms of the [MIT License](http://opensource.org/licenses/Apache-2.0).
130
+ The library is available as open source under the terms of the [Apache-2.0 License](http://opensource.org/licenses/Apache-2.0).
data/lib/qoa/layer.rb ADDED
@@ -0,0 +1,20 @@
1
+ module Qoa
2
+ class Layer
3
+ attr_reader :input_size, :output_size, :weights
4
+
5
+ def initialize(input_size, output_size)
6
+ @input_size = input_size
7
+ @output_size = output_size
8
+ @weights = random_matrix(output_size, input_size)
9
+ end
10
+
11
+ def random_matrix(rows, cols)
12
+ limit = Math.sqrt(6.0 / (rows + cols))
13
+ Array.new(rows) { Array.new(cols) { rand(-limit..limit) } }
14
+ end
15
+
16
+ def weights=(new_weights)
17
+ @weights = new_weights
18
+ end
19
+ end
20
+ end
@@ -1,10 +1,12 @@
1
- require 'concurrent'
1
+ require_relative 'layer'
2
2
  require_relative 'activation_functions'
3
- require_relative 'matrix_helpers'
3
+ require_relative 'training'
4
+ require_relative 'utils'
4
5
 
5
6
  module Qoa
6
7
  class NeuralNetwork
7
- include MatrixHelpers
8
+ include Training
9
+ include Utils
8
10
  attr_reader :input_nodes, :hidden_layers, :output_nodes, :learning_rate, :activation_func, :dropout_rate, :decay_rate, :epsilon, :batch_size
9
11
 
10
12
  def initialize(input_nodes, hidden_layers, output_nodes, learning_rate, dropout_rate, activation_func = :sigmoid, decay_rate = 0.9, epsilon = 1e-8, batch_size = 10)
@@ -18,101 +20,29 @@ module Qoa
18
20
  @epsilon = epsilon
19
21
  @batch_size = batch_size
20
22
 
21
- @weights = []
22
- @weights << random_matrix(hidden_layers[0], input_nodes)
23
+ @layers = []
24
+ @layers << Layer.new(input_nodes, hidden_layers[0])
23
25
  hidden_layers.each_cons(2) do |l1, l2|
24
- @weights << random_matrix(l2, l1)
26
+ @layers << Layer.new(l1, l2)
25
27
  end
26
- @weights << random_matrix(output_nodes, hidden_layers[-1])
28
+ @layers << Layer.new(hidden_layers[-1], output_nodes)
27
29
  end
28
30
 
29
- def random_matrix(rows, cols)
30
- limit = Math.sqrt(6.0 / (rows + cols))
31
- Array.new(rows) { Array.new(cols) { rand(-limit..limit) } }
31
+ def query(inputs)
32
+ layer_outputs = forward_pass(inputs)
33
+ layer_outputs.last.flatten
32
34
  end
33
35
 
34
- def train(inputs, targets)
36
+ def calculate_loss(inputs, targets)
35
37
  raise ArgumentError, 'inputs and targets must have the same length' if inputs.size != targets.size
36
38
 
37
- inputs.zip(targets).each_slice(@batch_size) do |batch|
38
- train_batch(batch)
39
+ total_loss = 0.0
40
+ inputs.zip(targets).each do |input, target|
41
+ prediction = query(input)
42
+ total_loss += mean_squared_error(prediction, target)
39
43
  end
40
- end
41
-
42
- def train_batch(batch)
43
- derivative_func = "#{@activation_func}_derivative"
44
- batch_inputs = batch.map { |x, _| x }
45
- batch_targets = batch.map { |_, y| y }
46
-
47
- # Forward pass
48
- layer_outputs = batch_inputs.map { |inputs| forward_pass(inputs) }
49
-
50
- # Backward pass
51
- # Using thread pool to parallelize the backward pass for each input in the batch
52
- pool = Concurrent::FixedThreadPool.new(4)
53
- weight_deltas = Array.new(@weights.size) { Array.new(@weights[0].size) { Array.new(@weights[0][0].size, 0) } }
54
- mutex = Mutex.new
55
-
56
- batch.zip(layer_outputs).each do |(inputs, targets), outputs|
57
- pool.post do
58
- deltas = backward_pass(inputs, targets, outputs)
59
- mutex.synchronize do
60
- @weights.each_with_index do |_, i|
61
- weight_deltas[i] = matrix_add(weight_deltas[i], deltas[i])
62
- end
63
- end
64
- end
65
- end
66
-
67
- pool.shutdown
68
- pool.wait_for_termination
69
-
70
- # Update weights
71
- @weights.each_with_index do |w, i|
72
- @weights[i] = matrix_add(w, scalar_multiply(@learning_rate / batch.size, weight_deltas[i]))
73
- end
74
- end
75
-
76
- def forward_pass(inputs)
77
- inputs = inputs.map { |x| [x] } # Convert to column vector
78
-
79
- layer_outputs = [inputs]
80
- @weights.each_with_index do |w, i|
81
- layer_inputs = matrix_multiply(w, layer_outputs[-1])
82
- layer_outputs << apply_function(layer_inputs, ActivationFunctions.method(@activation_func))
83
44
 
84
- # Apply dropout to hidden layers
85
- layer_outputs[-1] = apply_dropout(layer_outputs[-1], @dropout_rate) if i < @weights.size - 1
86
- end
87
-
88
- layer_outputs
89
- end
90
-
91
- def backward_pass(inputs, targets, layer_outputs)
92
- derivative_func = "#{@activation_func}_derivative"
93
- inputs = inputs.map { |x| [x] } # Convert to column vector
94
- targets = targets.map { |x| [x] } # Convert to column vector
95
-
96
- # Compute errors
97
- errors = [matrix_subtract(targets, layer_outputs.last)]
98
- (@weights.size - 1).downto(1) do |i|
99
- errors << matrix_multiply(transpose(@weights[i]), errors.last)
100
- end
101
-
102
- # Compute weight deltas
103
- weight_deltas = []
104
- @weights.each_with_index do |w, i|
105
- gradients = matrix_multiply_element_wise(errors[i], apply_function(layer_outputs[i + 1], ActivationFunctions.method(derivative_func)))
106
- w_delta = matrix_multiply(gradients, transpose(layer_outputs[i]))
107
- weight_deltas << w_delta
108
- end
109
-
110
- weight_deltas
111
- end
112
-
113
- def query(inputs)
114
- layer_outputs = forward_pass(inputs)
115
- layer_outputs.last.flatten
45
+ total_loss / inputs.size
116
46
  end
117
47
  end
118
48
  end
@@ -0,0 +1,113 @@
1
+ require_relative 'matrix_helpers'
2
+ require 'concurrent'
3
+
4
+ module Qoa
5
+ module Training
6
+ include MatrixHelpers
7
+
8
+ def train(inputs, targets)
9
+ raise ArgumentError, 'inputs and targets must have the same length' if inputs.size != targets.size
10
+
11
+ inputs.zip(targets).each_slice(@batch_size) do |batch|
12
+ train_batch(batch)
13
+ end
14
+ end
15
+
16
+ def train_batch(batch)
17
+ derivative_func = "#{@activation_func}_derivative"
18
+ batch_inputs = batch.map { |x, _| x }
19
+ batch_targets = batch.map { |_, y| y }
20
+
21
+ # Forward pass
22
+ layer_outputs = batch_inputs.map { |inputs| forward_pass(inputs) }
23
+
24
+ # Backward pass
25
+ # Using thread pool to parallelize the backward pass for each input in the batch
26
+ pool = Concurrent::FixedThreadPool.new(4)
27
+ # weight_deltas = Array.new(@layers.size - 1) { |i| Array.new(@layers[i].output_size) { Array.new(@layers[i].input_size, 0) } }
28
+ weight_deltas = Array.new(@layers.size) { |i| Array.new(@layers[i].output_size) { Array.new(@layers[i].input_size, 0) } }
29
+ mutex = Mutex.new
30
+
31
+ batch.zip(layer_outputs).each do |(inputs, targets), outputs|
32
+ pool.post do
33
+ deltas = backward_pass(inputs, targets, outputs)
34
+ mutex.synchronize do
35
+ @layers.each_with_index do |_, i|
36
+ weight_deltas[i] = matrix_add(weight_deltas[i], deltas[i])
37
+ end
38
+ end
39
+ end
40
+ end
41
+
42
+ pool.shutdown
43
+ pool.wait_for_termination
44
+
45
+ # Update weights
46
+ @layers.each_with_index do |layer, i|
47
+ layer.weights = matrix_add(layer.weights, scalar_multiply(@learning_rate / batch.size, weight_deltas[i]))
48
+ end
49
+ end
50
+
51
+ def train_with_early_stopping(inputs, targets, validation_inputs, validation_targets, max_epochs, patience)
52
+ best_validation_loss = Float::INFINITY
53
+ patience_left = patience
54
+ epoch = 0
55
+
56
+ while epoch < max_epochs && patience_left > 0
57
+ train(inputs, targets)
58
+ validation_loss = calculate_loss(validation_inputs, validation_targets)
59
+ puts "Epoch #{epoch + 1}: Validation loss = #{validation_loss}"
60
+
61
+ if validation_loss < best_validation_loss
62
+ best_validation_loss = validation_loss
63
+ save_model('best_model.json')
64
+ patience_left = patience
65
+ else
66
+ patience_left -= 1
67
+ end
68
+
69
+ epoch += 1
70
+ end
71
+
72
+ puts "Training stopped. Best validation loss = #{best_validation_loss}"
73
+ load_model('best_model.json')
74
+ end
75
+
76
+ def forward_pass(inputs)
77
+ inputs = inputs.map { |x| [x] } # Convert to column vector
78
+
79
+ layer_outputs = [inputs]
80
+ @layers.map(&:weights).each_with_index do |w, i|
81
+ layer_inputs = matrix_multiply(w, layer_outputs[-1])
82
+ layer_outputs << apply_function(layer_inputs, ActivationFunctions.method(@activation_func))
83
+
84
+ # Apply dropout to hidden layers
85
+ layer_outputs[-1] = apply_dropout(layer_outputs[-1], @dropout_rate) if i < @layers.size - 2
86
+ end
87
+
88
+ layer_outputs
89
+ end
90
+
91
+ def backward_pass(inputs, targets, layer_outputs)
92
+ derivative_func = "#{@activation_func}_derivative"
93
+ inputs = inputs.map { |x| [x] } # Convert to column vector
94
+ targets = targets.map { |x| [x] } # Convert to column vector
95
+
96
+ # Compute errors
97
+ errors = [matrix_subtract(targets, layer_outputs.last)]
98
+ (@layers.size - 2).downto(0) do |i|
99
+ errors << matrix_multiply(transpose(@layers[i + 1].weights), errors.last)
100
+ end
101
+
102
+ # Compute weight deltas
103
+ weight_deltas = []
104
+ @layers.each_with_index do |_, i|
105
+ gradients = matrix_multiply_element_wise(errors[i], apply_function(layer_outputs[i + 1], ActivationFunctions.method(derivative_func)))
106
+ w_delta = matrix_multiply(gradients, transpose(layer_outputs[i]))
107
+ weight_deltas << w_delta
108
+ end
109
+
110
+ weight_deltas
111
+ end
112
+ end
113
+ end
data/lib/qoa/utils.rb ADDED
@@ -0,0 +1,49 @@
1
+ require 'json'
2
+
3
+ module Qoa
4
+ module Utils
5
+ def save_model(file_path)
6
+ model_data = {
7
+ input_nodes: @input_nodes,
8
+ hidden_layers: @hidden_layers,
9
+ output_nodes: @output_nodes,
10
+ learning_rate: @learning_rate,
11
+ activation_func: @activation_func,
12
+ dropout_rate: @dropout_rate,
13
+ decay_rate: @decay_rate,
14
+ epsilon: @epsilon,
15
+ batch_size: @batch_size,
16
+ weights: @layers.map(&:weights),
17
+ }
18
+
19
+ File.open(file_path, 'w') do |f|
20
+ f.write(JSON.pretty_generate(model_data))
21
+ end
22
+ end
23
+
24
+ def load_model(file_path)
25
+ model_data = JSON.parse(File.read(file_path), symbolize_names: true)
26
+
27
+ @input_nodes = model_data[:input_nodes]
28
+ @hidden_layers = model_data[:hidden_layers]
29
+ @output_nodes = model_data[:output_nodes]
30
+ @learning_rate = model_data[:learning_rate]
31
+ @activation_func = model_data[:activation_func].to_sym
32
+ @dropout_rate = model_data[:dropout_rate]
33
+ @decay_rate = model_data[:decay_rate]
34
+ @epsilon = model_data[:epsilon]
35
+ @batch_size = model_data[:batch_size]
36
+
37
+ @layers = model_data[:weights].map { |w| Layer.new(w.first.size, w.size) }
38
+ @layers.each_with_index do |layer, i|
39
+ layer.weights = model_data[:weights][i]
40
+ end
41
+ end
42
+
43
+ def mean_squared_error(prediction, target)
44
+ raise ArgumentError, 'prediction and target must have the same length' if prediction.size != target.size
45
+
46
+ prediction.zip(target).map { |p, t| (p - t) ** 2 }.sum / prediction.size
47
+ end
48
+ end
49
+ end
data/lib/qoa/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Qoa
2
- VERSION = '0.0.1'
3
- end
2
+ VERSION = '0.0.3'
3
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: qoa
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.1
4
+ version: 0.0.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel M. Matongo
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-04-29 00:00:00.000000000 Z
11
+ date: 2023-05-01 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -65,8 +65,11 @@ files:
65
65
  - code_of_conduct.md
66
66
  - lib/qoa.rb
67
67
  - lib/qoa/activation_functions.rb
68
+ - lib/qoa/layer.rb
68
69
  - lib/qoa/matrix_helpers.rb
69
70
  - lib/qoa/neural_network.rb
71
+ - lib/qoa/training.rb
72
+ - lib/qoa/utils.rb
70
73
  - lib/qoa/version.rb
71
74
  homepage: https://github.com/mmatongo/qoa
72
75
  licenses:
@@ -80,7 +83,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
80
83
  requirements:
81
84
  - - ">="
82
85
  - !ruby/object:Gem::Version
83
- version: '0'
86
+ version: 2.5.0
84
87
  required_rubygems_version: !ruby/object:Gem::Requirement
85
88
  requirements:
86
89
  - - ">="