rumale-neural_network 0.24.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 48d32f0d24a38ae0fe2976d323a949c621305348e0629f3b4b99b142da8e311d
4
+ data.tar.gz: 8b41c40aa425001c9ee8244e0da8f4de113177fbf41c3d5a9bad2633c778ff84
5
+ SHA512:
6
+ metadata.gz: 5661b39220d75b8fbb60ce73b499a592a66a939c106d7643171fccf1ee5b5e018d139bdae2b89ec70e9987b8869aa48cf8681b90a01cd62fa4399a44f87682bd
7
+ data.tar.gz: 50094943ebe950cf8ea9056e545167159ac93a81378253e2e33b794db98f82c66b4fe46541631803d2f7469993212528b5b6f22e089b92ffb32a701f1ea0a674
data/LICENSE.txt ADDED
@@ -0,0 +1,27 @@
1
+ Copyright (c) 2022 Atsushi Tatsuma
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ * Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
data/README.md ADDED
@@ -0,0 +1,33 @@
1
+ # Rumale::NeuralNetwork
2
+
3
+ [![Gem Version](https://badge.fury.io/rb/rumale-neural_network.svg)](https://badge.fury.io/rb/rumale-neural_network)
4
+ [![BSD 3-Clause License](https://img.shields.io/badge/License-BSD%203--Clause-orange.svg)](https://github.com/yoshoku/rumale/blob/main/rumale-neural_network/LICENSE.txt)
5
+ [![Documentation](https://img.shields.io/badge/api-reference-blue.svg)](https://yoshoku.github.io/rumale/doc/Rumale/NeuralNetwork.html)
6
+
7
+ Rumale is a machine learning library in Ruby.
8
+ Rumale::NeuralNetwork provides classifier and regression based on multi-layer perceptron
9
+ with Rumale interface.
10
+
11
+ ## Installation
12
+
13
+ Add this line to your application's Gemfile:
14
+
15
+ ```ruby
16
+ gem 'rumale-neural_network'
17
+ ```
18
+
19
+ And then execute:
20
+
21
+ $ bundle install
22
+
23
+ Or install it yourself as:
24
+
25
+ $ gem install rumale-neural_network
26
+
27
+ ## Documentation
28
+
29
+ - [Rumale API Documentation - NeuralNetwork](https://yoshoku.github.io/rumale/doc/Rumale/NeuralNetwork.html)
30
+
31
+ ## License
32
+
33
+ The gem is available as open source under the terms of the [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause).
@@ -0,0 +1,293 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/base/estimator'
4
+ require 'rumale/utils'
5
+
6
+ module Rumale
7
+ module NeuralNetwork
8
+ # @!visibility private
9
+ # This module consists of the classes that implement layer functions of neural network.
10
+ module Layer
11
+ # @!visibility private
12
+ # Affine is a class that calculates the linear transform.
13
+ # This class is used internally.
14
+ class Affine
15
+ # @!visibility private
16
+ def initialize(n_inputs: nil, n_outputs: nil, optimizer: nil, rng: nil)
17
+ @weight = 0.01 * ::Rumale::Utils.rand_normal([n_inputs, n_outputs], rng)
18
+ @bias = Numo::DFloat.zeros(n_outputs)
19
+ @optimizer_weight = optimizer.dup
20
+ @optimizer_bias = optimizer.dup
21
+ end
22
+
23
+ # @!visibility private
24
+ def forward(x)
25
+ out = x.dot(@weight) + @bias
26
+
27
+ backward = proc do |dout|
28
+ dx = dout.dot(@weight.transpose)
29
+ dw = x.transpose.dot(dout)
30
+ db = dout.sum(axis: 0)
31
+
32
+ @weight = @optimizer_weight.call(@weight, dw)
33
+ @bias = @optimizer_bias.call(@bias, db)
34
+
35
+ dx
36
+ end
37
+
38
+ [out, backward]
39
+ end
40
+ end
41
+
42
+ # @!visibility private
43
+ # Dropout is a class that performs dropout regularization.
44
+ # This class is used internally.
45
+ class Dropout
46
+ # @!visibility private
47
+ def initialize(rate: 0.3, rng: nil)
48
+ @rate = rate
49
+ @rng = rng
50
+ end
51
+
52
+ # @!visibility private
53
+ def forward(x)
54
+ rand_mat = ::Rumale::Utils.rand_uniform(x.shape, @rng)
55
+ mask = rand_mat.ge(@rate)
56
+ out = x * mask
57
+ out *= 1.fdiv(1 - @rate) if @rate < 1.0
58
+
59
+ backward = proc { |dout| dout * mask }
60
+
61
+ [out, backward]
62
+ end
63
+ end
64
+
65
+ # @!visibility private
66
+ # ReLU is a class that calculates rectified linear function.
67
+ # This class is used internally.
68
+ class Relu
69
+ # @!visibility private
70
+ def forward(x)
71
+ mask = x.gt(0)
72
+ out = x * mask
73
+
74
+ backward = proc { |dout| dout * mask }
75
+
76
+ [out, backward]
77
+ end
78
+ end
79
+ end
80
+
81
+ # @!visibility private
82
+ # This module consists of the classes that implement loss function for neural network.
83
+ module Loss
84
+ # @!visibility private
85
+ # MeanSquaredError is a class that calculates mean squared error for regression task.
86
+ # This class is used internally.
87
+ class MeanSquaredError
88
+ # @!visibility private
89
+ def call(out, y)
90
+ sz_batch = y.shape[0]
91
+ diff = out - y
92
+ loss = (diff**2).sum.fdiv(sz_batch)
93
+ dout = 2.fdiv(sz_batch) * diff
94
+ [loss, dout]
95
+ end
96
+ end
97
+
98
+ # @!visibility private
99
+ # SoftmaxCrossEntropy is a class that calculates softmax cross-entropy for classification task.
100
+ # This class is used internally.
101
+ class SoftmaxCrossEntropy
102
+ # @!visibility private
103
+ def call(out, y)
104
+ sz_batch = y.shape[0]
105
+ z = softmax(out)
106
+ loss = -(y * Numo::NMath.log(z + 1e-8)).sum.fdiv(sz_batch)
107
+ dout = (z - y) / sz_batch
108
+ [loss, dout]
109
+ end
110
+
111
+ private
112
+
113
+ def softmax(x)
114
+ clip = x.max(-1).expand_dims(-1)
115
+ exp_x = Numo::NMath.exp(x - clip)
116
+ exp_x / exp_x.sum(axis: -1).expand_dims(-1)
117
+ end
118
+ end
119
+ end
120
+
121
+ # @!visibility private
122
+ # This module consists of the classes for implementing neural network model.
123
+ module Model
124
+ # @!visibility private
125
+ attr_reader :layers
126
+
127
+ # @!visibility private
128
+ # Sequential is a class that implements linear stack model.
129
+ # This class is used internally.
130
+ class Sequential
131
+ # @!visibility private
132
+ def initialize
133
+ @layers = []
134
+ end
135
+
136
+ # @!visibility private
137
+ def push(ops)
138
+ @layers.push(ops)
139
+ self
140
+ end
141
+
142
+ # @!visibility private
143
+ def delete_dropout
144
+ @layers.delete_if { |node| node.is_a?(Layer::Dropout) }
145
+ self
146
+ end
147
+
148
+ # @!visibility private
149
+ def forward(x)
150
+ backprops = []
151
+ out = x.dup
152
+
153
+ @layers.each do |l|
154
+ out, bw = l.forward(out)
155
+ backprops.push(bw)
156
+ end
157
+
158
+ backward = proc do |dout|
159
+ backprops.reverse_each { |bw| dout = bw.call(dout) }
160
+ dout
161
+ end
162
+
163
+ [out, backward]
164
+ end
165
+ end
166
+ end
167
+
168
+ # @!visibility private
169
+ # This module consists of the classes that implement optimizers adaptively tuning learning rate.
170
+ module Optimizer
171
+ # @!visibility private
172
+ # Adam is a class that implements Adam optimizer.
173
+ #
174
+ # *Reference*
175
+ # - Kingma, D P., and Ba, J., "Adam: A Method for Stochastic Optimization," Proc. ICLR'15, 2015.
176
+ class Adam
177
+ # @!visibility private
178
+ # Create a new optimizer with Adam
179
+ #
180
+ # @param learning_rate [Float] The initial value of learning rate.
181
+ # @param decay1 [Float] The smoothing parameter for the first moment.
182
+ # @param decay2 [Float] The smoothing parameter for the second moment.
183
+ def initialize(learning_rate: 0.001, decay1: 0.9, decay2: 0.999)
184
+ @params = {
185
+ learning_rate: learning_rate,
186
+ decay1: decay1,
187
+ decay2: decay2
188
+ }
189
+ @iter = 0
190
+ end
191
+
192
+ # @!visibility private
193
+ # Calculate the updated weight with Adam adaptive learning rate.
194
+ #
195
+ # @param weight [Numo::DFloat] (shape: [n_features]) The weight to be updated.
196
+ # @param gradient [Numo::DFloat] (shape: [n_features]) The gradient for updating the weight.
197
+ # @return [Numo::DFloat] (shape: [n_feautres]) The updated weight.
198
+ def call(weight, gradient)
199
+ @fst_moment ||= Numo::DFloat.zeros(weight.shape)
200
+ @sec_moment ||= Numo::DFloat.zeros(weight.shape)
201
+
202
+ @iter += 1
203
+
204
+ @fst_moment = @params[:decay1] * @fst_moment + (1.0 - @params[:decay1]) * gradient
205
+ @sec_moment = @params[:decay2] * @sec_moment + (1.0 - @params[:decay2]) * gradient**2
206
+ nm_fst_moment = @fst_moment / (1.0 - @params[:decay1]**@iter)
207
+ nm_sec_moment = @sec_moment / (1.0 - @params[:decay2]**@iter)
208
+
209
+ weight - @params[:learning_rate] * nm_fst_moment / (nm_sec_moment**0.5 + 1e-8)
210
+ end
211
+ end
212
+ end
213
+
214
+ # BaseMLP is an abstract class for implementation of multi-layer peceptron estimator.
215
+ # This class is used internally.
216
+ class BaseMLP < ::Rumale::Base::Estimator
217
+ # Create a multi-layer perceptron estimator.
218
+ #
219
+ # @param hidden_units [Array] The number of units in the i-th hidden layer.
220
+ # @param dropout_rate [Float] The rate of the units to drop.
221
+ # @param learning_rate [Float] The initial value of learning rate in Adam optimizer.
222
+ # @param decay1 [Float] The smoothing parameter for the first moment in Adam optimizer.
223
+ # @param decay2 [Float] The smoothing parameter for the second moment in Adam optimizer.
224
+ # @param max_iter [Integer] The maximum number of epochs that indicates
225
+ # how many times the whole data is given to the training process.
226
+ # @param batch_size [Intger] The size of the mini batches.
227
+ # @param tol [Float] The tolerance of loss for terminating optimization.
228
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
229
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
230
+ def initialize(hidden_units: [128, 128], dropout_rate: 0.4, learning_rate: 0.001, decay1: 0.9, decay2: 0.999,
231
+ max_iter: 200, batch_size: 50, tol: 1e-4, verbose: false, random_seed: nil)
232
+ super()
233
+ @params = {
234
+ hidden_units: hidden_units,
235
+ dropout_rate: dropout_rate,
236
+ learning_rate: learning_rate,
237
+ decay1: decay1,
238
+ decay2: decay2,
239
+ max_iter: max_iter,
240
+ batch_size: batch_size,
241
+ tol: tol,
242
+ verbose: verbose,
243
+ random_seed: random_seed || srand
244
+ }
245
+ @rng = Random.new(@params[:random_seed])
246
+ end
247
+
248
+ private
249
+
250
+ def buld_network(n_inputs, n_outputs, srng = nil)
251
+ adam = ::Rumale::NeuralNetwork::Optimizer::Adam.new(
252
+ learning_rate: @params[:learning_rate], decay1: @params[:decay1], decay2: @params[:decay2]
253
+ )
254
+ model = ::Rumale::NeuralNetwork::Model::Sequential.new
255
+ n_units = [n_inputs, *@params[:hidden_units]]
256
+ n_units.each_cons(2) do |n_in, n_out|
257
+ model.push(::Rumale::NeuralNetwork::Layer::Affine.new(n_inputs: n_in, n_outputs: n_out, optimizer: adam, rng: srng))
258
+ model.push(::Rumale::NeuralNetwork::Layer::Relu.new)
259
+ model.push(::Rumale::NeuralNetwork::Layer::Dropout.new(rate: @params[:dropout_rate], rng: srng))
260
+ end
261
+ model.push(::Rumale::NeuralNetwork::Layer::Affine.new(n_inputs: n_units[-1], n_outputs: n_outputs, optimizer: adam,
262
+ rng: srng))
263
+ end
264
+
265
+ def train(x, y, network, loss_func, srng = nil)
266
+ class_name = self.class.to_s.split('::').last
267
+ n_samples = x.shape[0]
268
+
269
+ @params[:max_iter].times do |t|
270
+ sample_ids = Array(0...n_samples)
271
+ sample_ids.shuffle!(random: srng)
272
+ until (subset_ids = sample_ids.shift(@params[:batch_size])).empty?
273
+ # random sampling
274
+ sub_x = x[subset_ids, true].dup
275
+ sub_y = y[subset_ids, true].dup
276
+ # forward
277
+ out, backward = network.forward(sub_x)
278
+ # calc loss function
279
+ loss, dout = loss_func.call(out, sub_y)
280
+ break if loss < @params[:tol]
281
+
282
+ # backward
283
+ backward.call(dout)
284
+ end
285
+ @n_iter = t + 1
286
+ puts "[#{class_name}] Loss after #{@n_iter} epochs: #{loss}" if @params[:verbose]
287
+ end
288
+
289
+ network
290
+ end
291
+ end
292
+ end
293
+ end
@@ -0,0 +1,117 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/base/classifier'
4
+ require 'rumale/utils'
5
+ require 'rumale/validation'
6
+ require 'rumale/neural_network/base_mlp'
7
+
8
+ module Rumale
9
+ module NeuralNetwork
10
+ # MLPClassifier is a class that implements classifier based on multi-layer perceptron.
11
+ # MLPClassifier use ReLu as the activation function and Adam as the optimization method
12
+ # and softmax cross entropy as the loss function.
13
+ #
14
+ # @example
15
+ # require 'rumale/neural_network/mlp_classifier'
16
+ #
17
+ # estimator = Rumale::NeuralNetwork::MLPClassifier.new(hidden_units: [100, 100], dropout_rate: 0.3)
18
+ # estimator.fit(training_samples, traininig_labels)
19
+ # results = estimator.predict(testing_samples)
20
+ class MLPClassifier < BaseMLP
21
+ include ::Rumale::Base::Classifier
22
+
23
+ # Return the network.
24
+ # @return [Rumale::NeuralNetwork::Model::Sequential]
25
+ attr_reader :network
26
+
27
+ # Return the class labels.
28
+ # @return [Numo::Int32] (size: n_classes)
29
+ attr_reader :classes
30
+
31
+ # Return the number of iterations run for optimization
32
+ # @return [Integer]
33
+ attr_reader :n_iter
34
+
35
+ # Return the random generator.
36
+ # @return [Random]
37
+ attr_reader :rng
38
+
39
+ # Create a new classifier with multi-layer preceptron.
40
+ #
41
+ # @param hidden_units [Array] The number of units in the i-th hidden layer.
42
+ # @param dropout_rate [Float] The rate of the units to drop.
43
+ # @param learning_rate [Float] The initial value of learning rate in Adam optimizer.
44
+ # @param decay1 [Float] The smoothing parameter for the first moment in Adam optimizer.
45
+ # @param decay2 [Float] The smoothing parameter for the second moment in Adam optimizer.
46
+ # @param max_iter [Integer] The maximum number of epochs that indicates
47
+ # how many times the whole data is given to the training process.
48
+ # @param batch_size [Intger] The size of the mini batches.
49
+ # @param tol [Float] The tolerance of loss for terminating optimization.
50
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
51
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
52
+ def initialize(hidden_units: [128, 128], dropout_rate: 0.4, learning_rate: 0.001, decay1: 0.9, decay2: 0.999,
53
+ max_iter: 200, batch_size: 50, tol: 1e-4, verbose: false, random_seed: nil)
54
+ super
55
+ end
56
+
57
+ # Fit the model with given training data.
58
+ #
59
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
60
+ # @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
61
+ # @return [MLPClassifier] The learned classifier itself.
62
+ def fit(x, y)
63
+ x = ::Rumale::Validation.check_convert_sample_array(x)
64
+ y = ::Rumale::Validation.check_convert_label_array(y)
65
+ ::Rumale::Validation.check_sample_size(x, y)
66
+
67
+ @classes = Numo::Int32[*y.to_a.uniq.sort]
68
+ n_labels = @classes.size
69
+ n_features = x.shape[1]
70
+ sub_rng = @rng.dup
71
+
72
+ loss = ::Rumale::NeuralNetwork::Loss::SoftmaxCrossEntropy.new
73
+ @network = buld_network(n_features, n_labels, sub_rng)
74
+ @network = train(x, one_hot_encode(y), @network, loss, sub_rng)
75
+ @network.delete_dropout
76
+
77
+ self
78
+ end
79
+
80
+ # Predict class labels for samples.
81
+ #
82
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
83
+ # @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
84
+ def predict(x)
85
+ x = ::Rumale::Validation.check_convert_sample_array(x)
86
+
87
+ n_samples = x.shape[0]
88
+ decision_values = predict_proba(x)
89
+ predicted = Array.new(n_samples) { |n| @classes[decision_values[n, true].max_index] }
90
+ Numo::Int32.asarray(predicted)
91
+ end
92
+
93
+ # Predict probability for samples.
94
+ #
95
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
96
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
97
+ def predict_proba(x)
98
+ x = ::Rumale::Validation.check_convert_sample_array(x)
99
+
100
+ out, = @network.forward(x)
101
+ softmax(out)
102
+ end
103
+
104
+ private
105
+
106
+ def one_hot_encode(y)
107
+ ::Rumale::Utils.binarize_labels(y)
108
+ end
109
+
110
+ def softmax(x)
111
+ clip = x.max(-1).expand_dims(-1)
112
+ exp_x = Numo::NMath.exp(x - clip)
113
+ exp_x / exp_x.sum(axis: -1).expand_dims(-1)
114
+ end
115
+ end
116
+ end
117
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/base/regressor'
4
+ require 'rumale/neural_network/base_mlp'
5
+ require 'rumale/validation'
6
+
7
+ module Rumale
8
+ module NeuralNetwork
9
+ # MLPRegressor is a class that implements regressor based on multi-layer perceptron.
10
+ # MLPRegressor use ReLu as the activation function and Adam as the optimization method
11
+ # and mean squared error as the loss function.
12
+ #
13
+ # @example
14
+ # require 'rumale/neural_network/mlp_regressor'
15
+ #
16
+ # estimator = Rumale::NeuralNetwork::MLPRegressor.new(hidden_units: [100, 100], dropout_rate: 0.3)
17
+ # estimator.fit(training_samples, traininig_labels)
18
+ # results = estimator.predict(testing_samples)
19
+ class MLPRegressor < BaseMLP
20
+ include ::Rumale::Base::Regressor
21
+
22
+ # Return the network.
23
+ # @return [Rumale::NeuralNetwork::Model::Sequential]
24
+ attr_reader :network
25
+
26
+ # Return the number of iterations run for optimization
27
+ # @return [Integer]
28
+ attr_reader :n_iter
29
+
30
+ # Return the random generator.
31
+ # @return [Random]
32
+ attr_reader :rng
33
+
34
+ # Create a new regressor with multi-layer perceptron.
35
+ #
36
+ # @param hidden_units [Array] The number of units in the i-th hidden layer.
37
+ # @param dropout_rate [Float] The rate of the units to drop.
38
+ # @param learning_rate [Float] The initial value of learning rate in Adam optimizer.
39
+ # @param decay1 [Float] The smoothing parameter for the first moment in Adam optimizer.
40
+ # @param decay2 [Float] The smoothing parameter for the second moment in Adam optimizer.
41
+ # @param max_iter [Integer] The maximum number of epochs that indicates
42
+ # how many times the whole data is given to the training process.
43
+ # @param batch_size [Intger] The size of the mini batches.
44
+ # @param tol [Float] The tolerance of loss for terminating optimization.
45
+ # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
46
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
47
+ def initialize(hidden_units: [128, 128], dropout_rate: 0.4, learning_rate: 0.001, decay1: 0.9, decay2: 0.999,
48
+ max_iter: 200, batch_size: 50, tol: 1e-4, verbose: false, random_seed: nil)
49
+ super
50
+ end
51
+
52
+ # Fit the model with given training data.
53
+ #
54
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
55
+ # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The taget values to be used for fitting the model.
56
+ # @return [MLPRegressor] The learned regressor itself.
57
+ def fit(x, y)
58
+ x = ::Rumale::Validation.check_convert_sample_array(x)
59
+ y = ::Rumale::Validation.check_convert_target_value_array(y)
60
+ ::Rumale::Validation.check_sample_size(x, y)
61
+
62
+ y = y.expand_dims(1) if y.ndim == 1
63
+ n_targets = y.shape[1]
64
+ n_features = x.shape[1]
65
+ sub_rng = @rng.dup
66
+
67
+ loss = ::Rumale::NeuralNetwork::Loss::MeanSquaredError.new
68
+ @network = buld_network(n_features, n_targets, sub_rng)
69
+ @network = train(x, y, @network, loss, sub_rng)
70
+ @network.delete_dropout
71
+
72
+ self
73
+ end
74
+
75
+ # Predict values for samples.
76
+ #
77
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
78
+ # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
79
+ def predict(x)
80
+ x = ::Rumale::Validation.check_convert_sample_array(x)
81
+
82
+ out, = @network.forward(x)
83
+ out = out[true, 0] if out.shape[1] == 1
84
+ out
85
+ end
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Rumale is a machine learning library in Ruby.
4
+ module Rumale
5
+ # This module consists of the modules and classes for implementation multi-layer perceptron estimator.
6
+ module NeuralNetwork
7
+ # @!visibility private
8
+ VERSION = '0.24.0'
9
+ end
10
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'numo/narray'
4
+
5
+ require_relative 'neural_network/version'
6
+
7
+ require_relative 'neural_network/base_mlp'
8
+ require_relative 'neural_network/mlp_classifier'
9
+ require_relative 'neural_network/mlp_regressor'
metadata ADDED
@@ -0,0 +1,86 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: rumale-neural_network
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.24.0
5
+ platform: ruby
6
+ authors:
7
+ - yoshoku
8
+ autorequire:
9
+ bindir: exe
10
+ cert_chain: []
11
+ date: 2022-12-31 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: numo-narray
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ">="
18
+ - !ruby/object:Gem::Version
19
+ version: 0.9.1
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - ">="
25
+ - !ruby/object:Gem::Version
26
+ version: 0.9.1
27
+ - !ruby/object:Gem::Dependency
28
+ name: rumale-core
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: 0.24.0
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: 0.24.0
41
+ description: |
42
+ Rumale::NeuralNetwork provides classifier and regression
43
+ based on multi-layer perceptron with Rumale interface.
44
+ email:
45
+ - yoshoku@outlook.com
46
+ executables: []
47
+ extensions: []
48
+ extra_rdoc_files: []
49
+ files:
50
+ - LICENSE.txt
51
+ - README.md
52
+ - lib/rumale/neural_network.rb
53
+ - lib/rumale/neural_network/base_mlp.rb
54
+ - lib/rumale/neural_network/mlp_classifier.rb
55
+ - lib/rumale/neural_network/mlp_regressor.rb
56
+ - lib/rumale/neural_network/version.rb
57
+ homepage: https://github.com/yoshoku/rumale
58
+ licenses:
59
+ - BSD-3-Clause
60
+ metadata:
61
+ homepage_uri: https://github.com/yoshoku/rumale
62
+ source_code_uri: https://github.com/yoshoku/rumale/tree/main/rumale-neural_network
63
+ changelog_uri: https://github.com/yoshoku/rumale/blob/main/CHANGELOG.md
64
+ documentation_uri: https://yoshoku.github.io/rumale/doc/
65
+ rubygems_mfa_required: 'true'
66
+ post_install_message:
67
+ rdoc_options: []
68
+ require_paths:
69
+ - lib
70
+ required_ruby_version: !ruby/object:Gem::Requirement
71
+ requirements:
72
+ - - ">="
73
+ - !ruby/object:Gem::Version
74
+ version: '0'
75
+ required_rubygems_version: !ruby/object:Gem::Requirement
76
+ requirements:
77
+ - - ">="
78
+ - !ruby/object:Gem::Version
79
+ version: '0'
80
+ requirements: []
81
+ rubygems_version: 3.3.26
82
+ signing_key:
83
+ specification_version: 4
84
+ summary: Rumale::NeuralNetwork provides classifier and regression based on multi-layer
85
+ perceptron with Rumale interface.
86
+ test_files: []