rumale 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. checksums.yaml +7 -0
  2. data/.coveralls.yml +1 -0
  3. data/.gitignore +20 -0
  4. data/.rspec +3 -0
  5. data/.rubocop.yml +47 -0
  6. data/.rubocop_todo.yml +58 -0
  7. data/.travis.yml +13 -0
  8. data/CHANGELOG.md +2 -0
  9. data/CODE_OF_CONDUCT.md +74 -0
  10. data/Gemfile +4 -0
  11. data/LICENSE.txt +23 -0
  12. data/README.md +175 -0
  13. data/Rakefile +6 -0
  14. data/bin/console +14 -0
  15. data/bin/setup +8 -0
  16. data/lib/rumale.rb +70 -0
  17. data/lib/rumale/base/base_estimator.rb +13 -0
  18. data/lib/rumale/base/classifier.rb +36 -0
  19. data/lib/rumale/base/cluster_analyzer.rb +31 -0
  20. data/lib/rumale/base/evaluator.rb +17 -0
  21. data/lib/rumale/base/regressor.rb +36 -0
  22. data/lib/rumale/base/splitter.rb +21 -0
  23. data/lib/rumale/base/transformer.rb +22 -0
  24. data/lib/rumale/clustering/dbscan.rb +125 -0
  25. data/lib/rumale/clustering/k_means.rb +138 -0
  26. data/lib/rumale/dataset.rb +110 -0
  27. data/lib/rumale/decomposition/nmf.rb +141 -0
  28. data/lib/rumale/decomposition/pca.rb +148 -0
  29. data/lib/rumale/ensemble/ada_boost_classifier.rb +196 -0
  30. data/lib/rumale/ensemble/ada_boost_regressor.rb +178 -0
  31. data/lib/rumale/ensemble/random_forest_classifier.rb +180 -0
  32. data/lib/rumale/ensemble/random_forest_regressor.rb +141 -0
  33. data/lib/rumale/evaluation_measure/accuracy.rb +29 -0
  34. data/lib/rumale/evaluation_measure/f_score.rb +50 -0
  35. data/lib/rumale/evaluation_measure/log_loss.rb +45 -0
  36. data/lib/rumale/evaluation_measure/mean_absolute_error.rb +29 -0
  37. data/lib/rumale/evaluation_measure/mean_squared_error.rb +29 -0
  38. data/lib/rumale/evaluation_measure/normalized_mutual_information.rb +62 -0
  39. data/lib/rumale/evaluation_measure/precision.rb +50 -0
  40. data/lib/rumale/evaluation_measure/precision_recall.rb +91 -0
  41. data/lib/rumale/evaluation_measure/purity.rb +40 -0
  42. data/lib/rumale/evaluation_measure/r2_score.rb +43 -0
  43. data/lib/rumale/evaluation_measure/recall.rb +50 -0
  44. data/lib/rumale/kernel_approximation/rbf.rb +121 -0
  45. data/lib/rumale/kernel_machine/kernel_svc.rb +193 -0
  46. data/lib/rumale/linear_model/base_linear_model.rb +89 -0
  47. data/lib/rumale/linear_model/lasso.rb +136 -0
  48. data/lib/rumale/linear_model/linear_regression.rb +110 -0
  49. data/lib/rumale/linear_model/logistic_regression.rb +159 -0
  50. data/lib/rumale/linear_model/ridge.rb +110 -0
  51. data/lib/rumale/linear_model/svc.rb +183 -0
  52. data/lib/rumale/linear_model/svr.rb +122 -0
  53. data/lib/rumale/model_selection/cross_validation.rb +123 -0
  54. data/lib/rumale/model_selection/grid_search_cv.rb +247 -0
  55. data/lib/rumale/model_selection/k_fold.rb +76 -0
  56. data/lib/rumale/model_selection/stratified_k_fold.rb +94 -0
  57. data/lib/rumale/multiclass/one_vs_rest_classifier.rb +100 -0
  58. data/lib/rumale/naive_bayes/naive_bayes.rb +315 -0
  59. data/lib/rumale/nearest_neighbors/k_neighbors_classifier.rb +111 -0
  60. data/lib/rumale/nearest_neighbors/k_neighbors_regressor.rb +93 -0
  61. data/lib/rumale/optimizer/nadam.rb +90 -0
  62. data/lib/rumale/optimizer/rmsprop.rb +69 -0
  63. data/lib/rumale/optimizer/sgd.rb +65 -0
  64. data/lib/rumale/optimizer/yellow_fin.rb +144 -0
  65. data/lib/rumale/pairwise_metric.rb +91 -0
  66. data/lib/rumale/pipeline/pipeline.rb +197 -0
  67. data/lib/rumale/polynomial_model/base_factorization_machine.rb +99 -0
  68. data/lib/rumale/polynomial_model/factorization_machine_classifier.rb +197 -0
  69. data/lib/rumale/polynomial_model/factorization_machine_regressor.rb +131 -0
  70. data/lib/rumale/preprocessing/l2_normalizer.rb +62 -0
  71. data/lib/rumale/preprocessing/label_encoder.rb +94 -0
  72. data/lib/rumale/preprocessing/min_max_scaler.rb +92 -0
  73. data/lib/rumale/preprocessing/one_hot_encoder.rb +98 -0
  74. data/lib/rumale/preprocessing/standard_scaler.rb +86 -0
  75. data/lib/rumale/probabilistic_output.rb +112 -0
  76. data/lib/rumale/tree/base_decision_tree.rb +153 -0
  77. data/lib/rumale/tree/decision_tree_classifier.rb +163 -0
  78. data/lib/rumale/tree/decision_tree_regressor.rb +135 -0
  79. data/lib/rumale/tree/node.rb +70 -0
  80. data/lib/rumale/utils.rb +37 -0
  81. data/lib/rumale/validation.rb +79 -0
  82. data/lib/rumale/values.rb +13 -0
  83. data/lib/rumale/version.rb +6 -0
  84. data/rumale.gemspec +41 -0
  85. metadata +204 -0
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/base/evaluator'
4
+ require 'rumale/evaluation_measure/precision_recall'
5
+
6
+ module Rumale
7
+ # This module consists of the classes for model evaluation.
8
+ module EvaluationMeasure
9
+ # Recall is a class that calculates the recall of the predicted labels.
10
+ #
11
+ # @example
12
+ # evaluator = Rumale::EvaluationMeasure::Recall.new
13
+ # puts evaluator.score(ground_truth, predicted)
14
+ class Recall
15
+ include Base::Evaluator
16
+ include EvaluationMeasure::PrecisionRecall
17
+
18
+ # Return the average type for calculation of recall.
19
+ # @return [String] ('binary', 'micro', 'macro')
20
+ attr_reader :average
21
+
22
+ # Create a new evaluation measure calculater for recall score.
23
+ #
24
+ # @param average [String] The average type ('binary', 'micro', 'macro')
25
+ def initialize(average: 'binary')
26
+ check_params_string(average: average)
27
+ @average = average
28
+ end
29
+
30
+ # Calculate average recall
31
+ #
32
+ # @param y_true [Numo::Int32] (shape: [n_samples]) Ground truth labels.
33
+ # @param y_pred [Numo::Int32] (shape: [n_samples]) Predicted labels.
34
+ # @return [Float] Average recall
35
+ def score(y_true, y_pred)
36
+ check_label_array(y_true)
37
+ check_label_array(y_pred)
38
+
39
+ case @average
40
+ when 'binary'
41
+ recall_each_class(y_true, y_pred).last
42
+ when 'micro'
43
+ micro_average_recall(y_true, y_pred)
44
+ when 'macro'
45
+ macro_average_recall(y_true, y_pred)
46
+ end
47
+ end
48
+ end
49
+ end
50
+ end
@@ -0,0 +1,121 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/utils'
4
+ require 'rumale/base/base_estimator'
5
+ require 'rumale/base/transformer'
6
+
7
+ module Rumale
8
+ # Module for kernel approximation algorithms.
9
+ module KernelApproximation
10
+ # Class for RBF kernel feature mapping.
11
+ #
12
+ # @example
13
+ # transformer = Rumale::KernelApproximation::RBF.new(gamma: 1.0, n_coponents: 128, random_seed: 1)
14
+ # new_training_samples = transformer.fit_transform(training_samples)
15
+ # new_testing_samples = transformer.transform(testing_samples)
16
+ #
17
+ # *Refernce*:
18
+ # 1. A. Rahimi and B. Recht, "Random Features for Large-Scale Kernel Machines," Proc. NIPS'07, pp.1177--1184, 2007.
19
+ class RBF
20
+ include Base::BaseEstimator
21
+ include Base::Transformer
22
+
23
+ # Return the random matrix for transformation.
24
+ # @return [Numo::DFloat] (shape: [n_features, n_components])
25
+ attr_reader :random_mat
26
+
27
+ # Return the random vector for transformation.
28
+ # @return [Numo::DFloat] (shape: [n_components])
29
+ attr_reader :random_vec
30
+
31
+ # Return the random generator for transformation.
32
+ # @return [Random]
33
+ attr_reader :rng
34
+
35
+ # Create a new transformer for mapping to RBF kernel feature space.
36
+ #
37
+ # @param gamma [Float] The parameter of RBF kernel: exp(-gamma * x^2).
38
+ # @param n_components [Integer] The number of dimensions of the RBF kernel feature space.
39
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
40
+ def initialize(gamma: 1.0, n_components: 128, random_seed: nil)
41
+ check_params_float(gamma: gamma)
42
+ check_params_integer(n_components: n_components)
43
+ check_params_type_or_nil(Integer, random_seed: random_seed)
44
+ check_params_positive(gamma: gamma, n_components: n_components)
45
+ @params = {}
46
+ @params[:gamma] = gamma
47
+ @params[:n_components] = n_components
48
+ @params[:random_seed] = random_seed
49
+ @params[:random_seed] ||= srand
50
+ @random_mat = nil
51
+ @random_vec = nil
52
+ @rng = Random.new(@params[:random_seed])
53
+ end
54
+
55
+ # Fit the model with given training data.
56
+ #
57
+ # @overload fit(x) -> RBF
58
+ #
59
+ # @param x [Numo::NArray] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
60
+ # This method uses only the number of features of the data.
61
+ # @return [RBF] The learned transformer itself.
62
+ def fit(x, _y = nil)
63
+ check_sample_array(x)
64
+
65
+ n_features = x.shape[1]
66
+ @params[:n_components] = 2 * n_features if @params[:n_components] <= 0
67
+ @random_mat = Rumale::Utils.rand_normal([n_features, @params[:n_components]], @rng) * (2.0 * @params[:gamma])**0.5
68
+ n_half_components = @params[:n_components] / 2
69
+ @random_vec = Numo::DFloat.zeros(@params[:n_components] - n_half_components).concatenate(
70
+ Numo::DFloat.ones(n_half_components) * (0.5 * Math::PI)
71
+ )
72
+ self
73
+ end
74
+
75
+ # Fit the model with training data, and then transform them with the learned model.
76
+ #
77
+ # @overload fit_transform(x) -> Numo::DFloat
78
+ #
79
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
80
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data
81
+ def fit_transform(x, _y = nil)
82
+ check_sample_array(x)
83
+
84
+ fit(x).transform(x)
85
+ end
86
+
87
+ # Transform the given data with the learned model.
88
+ #
89
+ # @overload transform(x) -> Numo::DFloat
90
+ #
91
+ # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The data to be transformed with the learned model.
92
+ # @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed data.
93
+ def transform(x)
94
+ check_sample_array(x)
95
+
96
+ n_samples, = x.shape
97
+ projection = x.dot(@random_mat) + @random_vec.tile(n_samples, 1)
98
+ Numo::NMath.sin(projection) * ((2.0 / @params[:n_components])**0.5)
99
+ end
100
+
101
+ # Dump marshal data.
102
+ # @return [Hash] The marshal data about RBF.
103
+ def marshal_dump
104
+ { params: @params,
105
+ random_mat: @random_mat,
106
+ random_vec: @random_vec,
107
+ rng: @rng }
108
+ end
109
+
110
+ # Load marshal data.
111
+ # @return [nil]
112
+ def marshal_load(obj)
113
+ @params = obj[:params]
114
+ @random_mat = obj[:random_mat]
115
+ @random_vec = obj[:random_vec]
116
+ @rng = obj[:rng]
117
+ nil
118
+ end
119
+ end
120
+ end
121
+ end
@@ -0,0 +1,193 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/base/base_estimator'
4
+ require 'rumale/base/classifier'
5
+ require 'rumale/probabilistic_output'
6
+
7
+ module Rumale
8
+ # This module consists of the classes that implement kernel method-based estimator.
9
+ module KernelMachine
10
+ # KernelSVC is a class that implements (Nonlinear) Kernel Support Vector Classifier
11
+ # with stochastic gradient descent (SGD) optimization.
12
+ # For multiclass classification problem, it uses one-vs-the-rest strategy.
13
+ #
14
+ # @example
15
+ # training_kernel_matrix = Rumale::PairwiseMetric::rbf_kernel(training_samples)
16
+ # estimator =
17
+ # Rumale::KernelMachine::KernelSVC.new(reg_param: 1.0, max_iter: 1000, random_seed: 1)
18
+ # estimator.fit(training_kernel_matrix, traininig_labels)
19
+ # testing_kernel_matrix = Rumale::PairwiseMetric::rbf_kernel(testing_samples, training_samples)
20
+ # results = estimator.predict(testing_kernel_matrix)
21
+ #
22
+ # *Reference*
23
+ # 1. S. Shalev-Shwartz, Y. Singer, N. Srebro, and A. Cotter, "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Mathematical Programming, vol. 127 (1), pp. 3--30, 2011.
24
+ class KernelSVC
25
+ include Base::BaseEstimator
26
+ include Base::Classifier
27
+
28
+ # Return the weight vector for Kernel SVC.
29
+ # @return [Numo::DFloat] (shape: [n_classes, n_trainig_sample])
30
+ attr_reader :weight_vec
31
+
32
+ # Return the class labels.
33
+ # @return [Numo::Int32] (shape: [n_classes])
34
+ attr_reader :classes
35
+
36
+ # Return the random generator for performing random sampling.
37
+ # @return [Random]
38
+ attr_reader :rng
39
+
40
+ # Create a new classifier with Kernel Support Vector Machine by the SGD optimization.
41
+ #
42
+ # @param reg_param [Float] The regularization parameter.
43
+ # @param max_iter [Integer] The maximum number of iterations.
44
+ # @param probability [Boolean] The flag indicating whether to perform probability estimation.
45
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
46
+ def initialize(reg_param: 1.0, max_iter: 1000, probability: false, random_seed: nil)
47
+ check_params_float(reg_param: reg_param)
48
+ check_params_integer(max_iter: max_iter)
49
+ check_params_boolean(probability: probability)
50
+ check_params_type_or_nil(Integer, random_seed: random_seed)
51
+ check_params_positive(reg_param: reg_param, max_iter: max_iter)
52
+ @params = {}
53
+ @params[:reg_param] = reg_param
54
+ @params[:max_iter] = max_iter
55
+ @params[:probability] = probability
56
+ @params[:random_seed] = random_seed
57
+ @params[:random_seed] ||= srand
58
+ @weight_vec = nil
59
+ @prob_param = nil
60
+ @classes = nil
61
+ @rng = Random.new(@params[:random_seed])
62
+ end
63
+
64
+ # Fit the model with given training data.
65
+ #
66
+ # @param x [Numo::DFloat] (shape: [n_training_samples, n_training_samples])
67
+ # The kernel matrix of the training data to be used for fitting the model.
68
+ # @param y [Numo::Int32] (shape: [n_training_samples]) The labels to be used for fitting the model.
69
+ # @return [KernelSVC] The learned classifier itself.
70
+ def fit(x, y)
71
+ check_sample_array(x)
72
+ check_label_array(y)
73
+ check_sample_label_size(x, y)
74
+
75
+ @classes = Numo::Int32[*y.to_a.uniq.sort]
76
+ n_classes = @classes.size
77
+ _n_samples, n_features = x.shape
78
+
79
+ if n_classes > 2
80
+ @weight_vec = Numo::DFloat.zeros(n_classes, n_features)
81
+ @prob_param = Numo::DFloat.zeros(n_classes, 2)
82
+ n_classes.times do |n|
83
+ bin_y = Numo::Int32.cast(y.eq(@classes[n])) * 2 - 1
84
+ @weight_vec[n, true] = binary_fit(x, bin_y)
85
+ @prob_param[n, true] = if @params[:probability]
86
+ Rumale::ProbabilisticOutput.fit_sigmoid(x.dot(@weight_vec[n, true].transpose), bin_y)
87
+ else
88
+ Numo::DFloat[1, 0]
89
+ end
90
+ end
91
+ else
92
+ negative_label = y.to_a.uniq.min
93
+ bin_y = Numo::Int32.cast(y.ne(negative_label)) * 2 - 1
94
+ @weight_vec = binary_fit(x, bin_y)
95
+ @prob_param = if @params[:probability]
96
+ Rumale::ProbabilisticOutput.fit_sigmoid(x.dot(@weight_vec.transpose), bin_y)
97
+ else
98
+ Numo::DFloat[1, 0]
99
+ end
100
+ end
101
+
102
+ self
103
+ end
104
+
105
+ # Calculate confidence scores for samples.
106
+ #
107
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
108
+ # The kernel matrix between testing samples and training samples to compute the scores.
109
+ # @return [Numo::DFloat] (shape: [n_testing_samples, n_classes]) Confidence score per sample.
110
+ def decision_function(x)
111
+ check_sample_array(x)
112
+
113
+ x.dot(@weight_vec.transpose)
114
+ end
115
+
116
+ # Predict class labels for samples.
117
+ #
118
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
119
+ # The kernel matrix between testing samples and training samples to predict the labels.
120
+ # @return [Numo::Int32] (shape: [n_testing_samples]) Predicted class label per sample.
121
+ def predict(x)
122
+ check_sample_array(x)
123
+
124
+ return Numo::Int32.cast(decision_function(x).ge(0.0)) * 2 - 1 if @classes.size <= 2
125
+
126
+ n_samples, = x.shape
127
+ decision_values = decision_function(x)
128
+ Numo::Int32.asarray(Array.new(n_samples) { |n| @classes[decision_values[n, true].max_index] })
129
+ end
130
+
131
+ # Predict probability for samples.
132
+ #
133
+ # @param x [Numo::DFloat] (shape: [n_testing_samples, n_training_samples])
134
+ # The kernel matrix between testing samples and training samples to predict the labels.
135
+ # @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
136
+ def predict_proba(x)
137
+ check_sample_array(x)
138
+
139
+ if @classes.size > 2
140
+ probs = 1.0 / (Numo::NMath.exp(@prob_param[true, 0] * decision_function(x) + @prob_param[true, 1]) + 1.0)
141
+ return (probs.transpose / probs.sum(axis: 1)).transpose
142
+ end
143
+
144
+ n_samples, = x.shape
145
+ probs = Numo::DFloat.zeros(n_samples, 2)
146
+ probs[true, 1] = 1.0 / (Numo::NMath.exp(@prob_param[0] * decision_function(x) + @prob_param[1]) + 1.0)
147
+ probs[true, 0] = 1.0 - probs[true, 1]
148
+ probs
149
+ end
150
+
151
+ # Dump marshal data.
152
+ # @return [Hash] The marshal data about KernelSVC.
153
+ def marshal_dump
154
+ { params: @params,
155
+ weight_vec: @weight_vec,
156
+ prob_param: @prob_param,
157
+ classes: @classes,
158
+ rng: @rng }
159
+ end
160
+
161
+ # Load marshal data.
162
+ # @return [nil]
163
+ def marshal_load(obj)
164
+ @params = obj[:params]
165
+ @weight_vec = obj[:weight_vec]
166
+ @prob_param = obj[:prob_param]
167
+ @classes = obj[:classes]
168
+ @rng = obj[:rng]
169
+ nil
170
+ end
171
+
172
+ private
173
+
174
+ def binary_fit(x, bin_y)
175
+ # Initialize some variables.
176
+ n_training_samples = x.shape[0]
177
+ rand_ids = []
178
+ weight_vec = Numo::DFloat.zeros(n_training_samples)
179
+ # Start optimization.
180
+ @params[:max_iter].times do |t|
181
+ # random sampling
182
+ rand_ids = [*0...n_training_samples].shuffle(random: @rng) if rand_ids.empty?
183
+ target_id = rand_ids.shift
184
+ # update the weight vector
185
+ func = (weight_vec * bin_y).dot(x[target_id, true].transpose).to_f
186
+ func *= bin_y[target_id] / (@params[:reg_param] * (t + 1))
187
+ weight_vec[target_id] += 1.0 if func < 1.0
188
+ end
189
+ weight_vec * bin_y
190
+ end
191
+ end
192
+ end
193
+ end
@@ -0,0 +1,89 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/base/base_estimator'
4
+ require 'rumale/optimizer/nadam'
5
+
6
+ module Rumale
7
+ module LinearModel
8
+ # BaseLinearModel is an abstract class for implementation of linear estimator
9
+ # with mini-batch stochastic gradient descent optimization.
10
+ # This class is used for internal process.
11
+ class BaseLinearModel
12
+ include Base::BaseEstimator
13
+
14
+ # Initialize a linear estimator.
15
+ #
16
+ # @param reg_param [Float] The regularization parameter.
17
+ # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
18
+ # @param bias_scale [Float] The scale of the bias term.
19
+ # @param max_iter [Integer] The maximum number of iterations.
20
+ # @param batch_size [Integer] The size of the mini batches.
21
+ # @param optimizer [Optimizer] The optimizer to calculate adaptive learning rate.
22
+ # If nil is given, Nadam is used.
23
+ # @param random_seed [Integer] The seed value using to initialize the random generator.
24
+ def initialize(reg_param: 1.0, fit_bias: false, bias_scale: 1.0,
25
+ max_iter: 1000, batch_size: 10, optimizer: nil, random_seed: nil)
26
+ @params = {}
27
+ @params[:reg_param] = reg_param
28
+ @params[:fit_bias] = fit_bias
29
+ @params[:bias_scale] = bias_scale
30
+ @params[:max_iter] = max_iter
31
+ @params[:batch_size] = batch_size
32
+ @params[:optimizer] = optimizer
33
+ @params[:optimizer] ||= Optimizer::Nadam.new
34
+ @params[:random_seed] = random_seed
35
+ @params[:random_seed] ||= srand
36
+ @weight_vec = nil
37
+ @bias_term = nil
38
+ @rng = Random.new(@params[:random_seed])
39
+ end
40
+
41
+ private
42
+
43
+ def partial_fit(x, y)
44
+ # Expand feature vectors for bias term.
45
+ samples = @params[:fit_bias] ? expand_feature(x) : x
46
+ # Initialize some variables.
47
+ n_samples, n_features = samples.shape
48
+ rand_ids = [*0...n_samples].shuffle(random: @rng)
49
+ weight = Numo::DFloat.zeros(n_features)
50
+ optimizer = @params[:optimizer].dup
51
+ # Optimization.
52
+ @params[:max_iter].times do |_t|
53
+ # Random sampling
54
+ subset_ids = rand_ids.shift(@params[:batch_size])
55
+ rand_ids.concat(subset_ids)
56
+ sub_samples = samples[subset_ids, true]
57
+ sub_targets = y[subset_ids]
58
+ # Update weight.
59
+ loss_gradient = calc_loss_gradient(sub_samples, sub_targets, weight)
60
+ next if loss_gradient.ne(0.0).count.zero?
61
+ weight = calc_new_weight(optimizer, sub_samples, weight, loss_gradient)
62
+ end
63
+ split_weight(weight)
64
+ end
65
+
66
+ def calc_loss_gradient(_x, _y, _weight)
67
+ raise NotImplementedError, "#{__method__} has to be implemented in #{self.class}."
68
+ end
69
+
70
+ def calc_new_weight(optimizer, x, weight, loss_gradient)
71
+ weight_gradient = x.transpose.dot(loss_gradient) / @params[:batch_size] + @params[:reg_param] * weight
72
+ optimizer.call(weight, weight_gradient)
73
+ end
74
+
75
+ def expand_feature(x)
76
+ n_samples = x.shape[0]
77
+ Numo::NArray.hstack([x, Numo::DFloat.ones([n_samples, 1]) * @params[:bias_scale]])
78
+ end
79
+
80
+ def split_weight(weight)
81
+ if @params[:fit_bias]
82
+ [weight[0...-1].dup, weight[-1]]
83
+ else
84
+ [weight, 0.0]
85
+ end
86
+ end
87
+ end
88
+ end
89
+ end