rumale-linear_model 0.25.0 → 0.27.0
Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: da770eaccf1809e9507caddea80332adf08759ae8af86179e418f7f787447153
|
4
|
+
data.tar.gz: '08b4a248944649b0b1216207fdeca4dd578d75a4280fc22a98ecca717b37f3b9'
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8f160cac5b9cd6abad11ad4f6d095629f22075fa1e27642892c955cb7682fb114eeee330a54e835e48b5f2b40a32e7841f2b909011bda1d67033a34308ad43cf
|
7
|
+
data.tar.gz: 2b6d68a3f573d3d1394278f2a4ba5bf1a820be2eb1866967ec0e27e2b35be660f1b3367c765ecbd42e8aa41b53a1851b87163ffc81695a4b28eed6b483a9492f
|
@@ -114,6 +114,32 @@ module Rumale
|
|
114
114
|
self
|
115
115
|
end
|
116
116
|
|
117
|
+
# Perform 1-epoch of stochastic gradient descent optimization with given training data.
|
118
|
+
#
|
119
|
+
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
120
|
+
# @param y [Numo::Int32] (shape: [n_samples]) The binary labels to be used for fitting the model.
|
121
|
+
# @return [SGDClassifier] The learned classifier itself.
|
122
|
+
def partial_fit(x, y)
|
123
|
+
x = Rumale::Validation.check_convert_sample_array(x)
|
124
|
+
y = Rumale::Validation.check_convert_label_array(y)
|
125
|
+
Rumale::Validation.check_sample_size(x, y)
|
126
|
+
|
127
|
+
n_features = x.shape[1]
|
128
|
+
n_features += 1 if fit_bias?
|
129
|
+
need_init = @weight.nil? || @weight.shape[0] != n_features
|
130
|
+
|
131
|
+
@classes = Numo::Int32[*y.to_a.uniq.sort] if need_init
|
132
|
+
negative_label = @classes[0]
|
133
|
+
bin_y = Numo::Int32.cast(y.ne(negative_label)) * 2 - 1
|
134
|
+
|
135
|
+
@weight_vec, @bias_term = partial_fit_(x, bin_y, max_iter: 1, init: need_init)
|
136
|
+
if @loss_func.name == Rumale::LinearModel::Loss::HingeLoss::NAME
|
137
|
+
@prob_param = Rumale::ProbabilisticOutput.fit_sigmoid(x.dot(@weight_vec.transpose) + @bias_term, bin_y)
|
138
|
+
end
|
139
|
+
|
140
|
+
self
|
141
|
+
end
|
142
|
+
|
117
143
|
# Calculate confidence scores for samples.
|
118
144
|
#
|
119
145
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
|
@@ -156,14 +182,14 @@ module Rumale
|
|
156
182
|
models = if enable_parallel?
|
157
183
|
parallel_map(n_classes) do |n|
|
158
184
|
bin_y = Numo::Int32.cast(y.eq(@classes[n])) * 2 - 1
|
159
|
-
w, b =
|
185
|
+
w, b = partial_fit_(x, bin_y)
|
160
186
|
prb = Rumale::ProbabilisticOutput.fit_sigmoid(x.dot(w.transpose) + b, bin_y)
|
161
187
|
[w, b, prb]
|
162
188
|
end
|
163
189
|
else
|
164
190
|
Array.new(n_classes) do |n|
|
165
191
|
bin_y = Numo::Int32.cast(y.eq(@classes[n])) * 2 - 1
|
166
|
-
w, b =
|
192
|
+
w, b = partial_fit_(x, bin_y)
|
167
193
|
prb = Rumale::ProbabilisticOutput.fit_sigmoid(x.dot(w.transpose) + b, bin_y)
|
168
194
|
[w, b, prb]
|
169
195
|
end
|
@@ -173,7 +199,7 @@ module Rumale
|
|
173
199
|
else
|
174
200
|
negative_label = @classes[0]
|
175
201
|
bin_y = Numo::Int32.cast(y.ne(negative_label)) * 2 - 1
|
176
|
-
@weight_vec, @bias_term =
|
202
|
+
@weight_vec, @bias_term = partial_fit_(x, bin_y)
|
177
203
|
@prob_param = Rumale::ProbabilisticOutput.fit_sigmoid(x.dot(@weight_vec.transpose) + @bias_term, bin_y)
|
178
204
|
end
|
179
205
|
end
|
@@ -187,19 +213,19 @@ module Rumale
|
|
187
213
|
if enable_parallel?
|
188
214
|
models = parallel_map(n_classes) do |n|
|
189
215
|
bin_y = Numo::Int32.cast(y.eq(@classes[n])) * 2 - 1
|
190
|
-
|
216
|
+
partial_fit_(x, bin_y)
|
191
217
|
end
|
192
218
|
n_classes.times { |n| @weight_vec[n, true], @bias_term[n] = models[n] }
|
193
219
|
else
|
194
220
|
n_classes.times do |n|
|
195
221
|
bin_y = Numo::Int32.cast(y.eq(@classes[n])) * 2 - 1
|
196
|
-
@weight_vec[n, true], @bias_term[n] =
|
222
|
+
@weight_vec[n, true], @bias_term[n] = partial_fit_(x, bin_y)
|
197
223
|
end
|
198
224
|
end
|
199
225
|
else
|
200
226
|
negative_label = @classes[0]
|
201
227
|
bin_y = Numo::Int32.cast(y.ne(negative_label)) * 2 - 1
|
202
|
-
@weight_vec, @bias_term =
|
228
|
+
@weight_vec, @bias_term = partial_fit_(x, bin_y)
|
203
229
|
end
|
204
230
|
end
|
205
231
|
|
@@ -215,45 +215,49 @@ module Rumale
|
|
215
215
|
|
216
216
|
private_constant :L2_PENALTY, :L1_PENALTY, :ELASTICNET_PENALTY
|
217
217
|
|
218
|
-
def
|
218
|
+
def init_vars(n_features)
|
219
|
+
@sub_rng = @rng.dup
|
220
|
+
@weight = Numo::DFloat.zeros(n_features)
|
221
|
+
@optimizer = ::Rumale::LinearModel::Optimizer::SGD.new(
|
222
|
+
learning_rate: @params[:learning_rate], momentum: @params[:momentum], decay: @params[:decay]
|
223
|
+
)
|
224
|
+
@l2_penalty = ::Rumale::LinearModel::Penalty::L2Penalty.new(reg_param: l2_reg_param)
|
225
|
+
@l1_penalty = ::Rumale::LinearModel::Penalty::L1Penalty.new(reg_param: l1_reg_param)
|
226
|
+
end
|
227
|
+
|
228
|
+
def partial_fit_(x, y, max_iter: @params[:max_iter], init: true)
|
219
229
|
class_name = self.class.to_s.split('::').last if @params[:verbose]
|
220
230
|
narr = x.class
|
221
231
|
# Expand feature vectors for bias term.
|
222
232
|
x = expand_feature(x) if fit_bias?
|
223
233
|
# Initialize some variables.
|
224
|
-
sub_rng = @rng.dup
|
225
234
|
n_samples, n_features = x.shape
|
226
|
-
|
227
|
-
optimizer = ::Rumale::LinearModel::Optimizer::SGD.new(
|
228
|
-
learning_rate: @params[:learning_rate], momentum: @params[:momentum], decay: @params[:decay]
|
229
|
-
)
|
230
|
-
l2_penalty = ::Rumale::LinearModel::Penalty::L2Penalty.new(reg_param: l2_reg_param) if apply_l2_penalty?
|
231
|
-
l1_penalty = ::Rumale::LinearModel::Penalty::L1Penalty.new(reg_param: l1_reg_param) if apply_l1_penalty?
|
235
|
+
init_vars(n_features) if init
|
232
236
|
# Optimization.
|
233
|
-
|
237
|
+
max_iter.times do |t|
|
234
238
|
sample_ids = Array(0...n_samples)
|
235
|
-
sample_ids.shuffle!(random: sub_rng)
|
239
|
+
sample_ids.shuffle!(random: @sub_rng)
|
236
240
|
until (subset_ids = sample_ids.shift(@params[:batch_size])).empty?
|
237
241
|
# sampling
|
238
242
|
sub_x = x[subset_ids, true]
|
239
243
|
sub_y = y[subset_ids]
|
240
244
|
# calculate gradient
|
241
|
-
dloss = @loss_func.dloss(sub_x.dot(weight), sub_y)
|
245
|
+
dloss = @loss_func.dloss(sub_x.dot(@weight), sub_y)
|
242
246
|
dloss = narr.minimum(1e12, narr.maximum(-1e12, dloss))
|
243
247
|
gradient = dloss.dot(sub_x)
|
244
248
|
# update weight
|
245
|
-
lr = optimizer.current_learning_rate
|
246
|
-
weight = optimizer.call(weight, gradient)
|
249
|
+
lr = @optimizer.current_learning_rate
|
250
|
+
@weight = @optimizer.call(@weight, gradient)
|
247
251
|
# l2 regularization
|
248
|
-
weight = l2_penalty.call(weight, lr) if apply_l2_penalty?
|
252
|
+
@weight = @l2_penalty.call(@weight, lr) if apply_l2_penalty?
|
249
253
|
# l1 regularization
|
250
|
-
weight = l1_penalty.call(weight, lr) if apply_l1_penalty?
|
254
|
+
@weight = @l1_penalty.call(@weight, lr) if apply_l1_penalty?
|
251
255
|
end
|
252
|
-
loss = @loss_func.loss(x.dot(weight), y)
|
256
|
+
loss = @loss_func.loss(x.dot(@weight), y)
|
253
257
|
puts "[#{class_name}] Loss after #{t + 1} epochs: #{loss}" if @params[:verbose]
|
254
258
|
break if loss < @params[:tol]
|
255
259
|
end
|
256
|
-
split_weight(weight)
|
260
|
+
split_weight(@weight)
|
257
261
|
end
|
258
262
|
|
259
263
|
def apply_l2_penalty?
|
@@ -112,18 +112,37 @@ module Rumale
|
|
112
112
|
@weight_vec = Numo::DFloat.zeros(n_outputs, n_features)
|
113
113
|
@bias_term = Numo::DFloat.zeros(n_outputs)
|
114
114
|
if enable_parallel?
|
115
|
-
models = parallel_map(n_outputs) { |n|
|
115
|
+
models = parallel_map(n_outputs) { |n| partial_fit_(x, y[true, n]) }
|
116
116
|
n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] = models[n] }
|
117
117
|
else
|
118
|
-
n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] =
|
118
|
+
n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] = partial_fit_(x, y[true, n]) }
|
119
119
|
end
|
120
120
|
else
|
121
|
-
@weight_vec, @bias_term =
|
121
|
+
@weight_vec, @bias_term = partial_fit_(x, y)
|
122
122
|
end
|
123
123
|
|
124
124
|
self
|
125
125
|
end
|
126
126
|
|
127
|
+
# Perform 1-epoch of stochastic gradient descent optimization with given training data.
|
128
|
+
#
|
129
|
+
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
130
|
+
# @param y [Numo::DFloat] (shape: [n_samples]) The single target variables to be used for fitting the model.
|
131
|
+
# @return [SGDRegressor] The learned regressor itself.
|
132
|
+
def partial_fit(x, y)
|
133
|
+
x = Rumale::Validation.check_convert_sample_array(x)
|
134
|
+
y = Rumale::Validation.check_convert_target_value_array(y)
|
135
|
+
Rumale::Validation.check_sample_size(x, y)
|
136
|
+
|
137
|
+
n_features = x.shape[1]
|
138
|
+
n_features += 1 if fit_bias?
|
139
|
+
need_init = @weight.nil? || @weight.shape[0] != n_features
|
140
|
+
|
141
|
+
@weight_vec, @bias_term = partial_fit_(x, y, max_iter: 1, init: need_init)
|
142
|
+
|
143
|
+
self
|
144
|
+
end
|
145
|
+
|
127
146
|
# Predict values for samples.
|
128
147
|
#
|
129
148
|
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rumale-linear_model
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.27.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-08-26 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: lbfgsb
|
@@ -44,14 +44,14 @@ dependencies:
|
|
44
44
|
requirements:
|
45
45
|
- - "~>"
|
46
46
|
- !ruby/object:Gem::Version
|
47
|
-
version: 0.
|
47
|
+
version: 0.27.0
|
48
48
|
type: :runtime
|
49
49
|
prerelease: false
|
50
50
|
version_requirements: !ruby/object:Gem::Requirement
|
51
51
|
requirements:
|
52
52
|
- - "~>"
|
53
53
|
- !ruby/object:Gem::Version
|
54
|
-
version: 0.
|
54
|
+
version: 0.27.0
|
55
55
|
description: |
|
56
56
|
Rumale::LinearModel provides linear model algorithms,
|
57
57
|
such as Logistic Regression, Support Vector Machine, Lasso, and Ridge Regression
|