svmkit 0.7.3 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +0 -9
- data/.rspec +1 -0
- data/.travis.yml +4 -12
- data/LICENSE.txt +1 -1
- data/README.md +11 -13
- data/lib/svmkit.rb +3 -66
- data/svmkit.gemspec +12 -7
- metadata +16 -81
- data/.coveralls.yml +0 -1
- data/.rubocop.yml +0 -47
- data/.rubocop_todo.yml +0 -58
- data/HISTORY.md +0 -168
- data/lib/svmkit/base/base_estimator.rb +0 -13
- data/lib/svmkit/base/classifier.rb +0 -34
- data/lib/svmkit/base/cluster_analyzer.rb +0 -29
- data/lib/svmkit/base/evaluator.rb +0 -13
- data/lib/svmkit/base/regressor.rb +0 -34
- data/lib/svmkit/base/splitter.rb +0 -17
- data/lib/svmkit/base/transformer.rb +0 -18
- data/lib/svmkit/clustering/dbscan.rb +0 -127
- data/lib/svmkit/clustering/k_means.rb +0 -140
- data/lib/svmkit/dataset.rb +0 -109
- data/lib/svmkit/decomposition/nmf.rb +0 -147
- data/lib/svmkit/decomposition/pca.rb +0 -150
- data/lib/svmkit/ensemble/ada_boost_classifier.rb +0 -198
- data/lib/svmkit/ensemble/ada_boost_regressor.rb +0 -180
- data/lib/svmkit/ensemble/random_forest_classifier.rb +0 -182
- data/lib/svmkit/ensemble/random_forest_regressor.rb +0 -143
- data/lib/svmkit/evaluation_measure/accuracy.rb +0 -30
- data/lib/svmkit/evaluation_measure/f_score.rb +0 -51
- data/lib/svmkit/evaluation_measure/log_loss.rb +0 -46
- data/lib/svmkit/evaluation_measure/mean_absolute_error.rb +0 -30
- data/lib/svmkit/evaluation_measure/mean_squared_error.rb +0 -30
- data/lib/svmkit/evaluation_measure/normalized_mutual_information.rb +0 -63
- data/lib/svmkit/evaluation_measure/precision.rb +0 -51
- data/lib/svmkit/evaluation_measure/precision_recall.rb +0 -91
- data/lib/svmkit/evaluation_measure/purity.rb +0 -41
- data/lib/svmkit/evaluation_measure/r2_score.rb +0 -44
- data/lib/svmkit/evaluation_measure/recall.rb +0 -51
- data/lib/svmkit/kernel_approximation/rbf.rb +0 -136
- data/lib/svmkit/kernel_machine/kernel_svc.rb +0 -194
- data/lib/svmkit/linear_model/lasso.rb +0 -138
- data/lib/svmkit/linear_model/linear_regression.rb +0 -112
- data/lib/svmkit/linear_model/logistic_regression.rb +0 -161
- data/lib/svmkit/linear_model/ridge.rb +0 -112
- data/lib/svmkit/linear_model/sgd_linear_estimator.rb +0 -89
- data/lib/svmkit/linear_model/svc.rb +0 -184
- data/lib/svmkit/linear_model/svr.rb +0 -123
- data/lib/svmkit/model_selection/cross_validation.rb +0 -121
- data/lib/svmkit/model_selection/grid_search_cv.rb +0 -247
- data/lib/svmkit/model_selection/k_fold.rb +0 -77
- data/lib/svmkit/model_selection/stratified_k_fold.rb +0 -95
- data/lib/svmkit/multiclass/one_vs_rest_classifier.rb +0 -101
- data/lib/svmkit/naive_bayes/naive_bayes.rb +0 -316
- data/lib/svmkit/nearest_neighbors/k_neighbors_classifier.rb +0 -112
- data/lib/svmkit/nearest_neighbors/k_neighbors_regressor.rb +0 -94
- data/lib/svmkit/optimizer/nadam.rb +0 -90
- data/lib/svmkit/optimizer/rmsprop.rb +0 -69
- data/lib/svmkit/optimizer/sgd.rb +0 -65
- data/lib/svmkit/optimizer/yellow_fin.rb +0 -144
- data/lib/svmkit/pairwise_metric.rb +0 -91
- data/lib/svmkit/pipeline/pipeline.rb +0 -197
- data/lib/svmkit/polynomial_model/factorization_machine_classifier.rb +0 -262
- data/lib/svmkit/polynomial_model/factorization_machine_regressor.rb +0 -194
- data/lib/svmkit/preprocessing/l2_normalizer.rb +0 -63
- data/lib/svmkit/preprocessing/label_encoder.rb +0 -95
- data/lib/svmkit/preprocessing/min_max_scaler.rb +0 -93
- data/lib/svmkit/preprocessing/one_hot_encoder.rb +0 -99
- data/lib/svmkit/preprocessing/standard_scaler.rb +0 -87
- data/lib/svmkit/probabilistic_output.rb +0 -112
- data/lib/svmkit/tree/decision_tree_classifier.rb +0 -276
- data/lib/svmkit/tree/decision_tree_regressor.rb +0 -251
- data/lib/svmkit/tree/node.rb +0 -70
- data/lib/svmkit/utils.rb +0 -22
- data/lib/svmkit/validation.rb +0 -79
- data/lib/svmkit/values.rb +0 -13
- data/lib/svmkit/version.rb +0 -7
@@ -1,197 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'svmkit/validation'
|
4
|
-
require 'svmkit/base/base_estimator'
|
5
|
-
|
6
|
-
module SVMKit
|
7
|
-
# Module implements utilities of pipeline that cosists of a chain of transfomers and estimators.
|
8
|
-
module Pipeline
|
9
|
-
# Pipeline is a class that implements the function to perform the transformers and estimators sequencially.
|
10
|
-
#
|
11
|
-
# @example
|
12
|
-
# rbf = SVMKit::KernelApproximation::RBF.new(gamma: 1.0, n_coponents: 128, random_seed: 1)
|
13
|
-
# svc = SVMKit::LinearModel::SVC.new(reg_param: 1.0, fit_bias: true, max_iter: 5000, random_seed: 1)
|
14
|
-
# pipeline = SVMKit::Pipeline::Pipeline.new(steps: { trs: rbf, est: svc })
|
15
|
-
# pipeline.fit(training_samples, traininig_labels)
|
16
|
-
# results = pipeline.predict(testing_samples)
|
17
|
-
#
|
18
|
-
class Pipeline
|
19
|
-
include Base::BaseEstimator
|
20
|
-
include Validation
|
21
|
-
|
22
|
-
# Return the steps.
|
23
|
-
# @return [Hash]
|
24
|
-
attr_reader :steps
|
25
|
-
|
26
|
-
# Create a new pipeline.
|
27
|
-
#
|
28
|
-
# @param steps [Hash] List of transformers and estimators. The order of transforms follows the insertion order of hash keys.
|
29
|
-
# The last entry is considered an estimator.
|
30
|
-
def initialize(steps:)
|
31
|
-
check_params_type(Hash, steps: steps)
|
32
|
-
validate_steps(steps)
|
33
|
-
@params = {}
|
34
|
-
@steps = steps
|
35
|
-
end
|
36
|
-
|
37
|
-
# Fit the model with given training data.
|
38
|
-
#
|
39
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be transformed and used for fitting the model.
|
40
|
-
# @param y [Numo::NArray] (shape: [n_samples, n_outputs]) The target values or labels to be used for fitting the model.
|
41
|
-
# @return [Pipeline] The learned pipeline itself.
|
42
|
-
def fit(x, y)
|
43
|
-
check_sample_array(x)
|
44
|
-
trans_x = apply_transforms(x, y, fit: true)
|
45
|
-
last_estimator.fit(trans_x, y) unless last_estimator.nil?
|
46
|
-
self
|
47
|
-
end
|
48
|
-
|
49
|
-
# Call the fit_predict method of last estimator after applying all transforms.
|
50
|
-
#
|
51
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be transformed and used for fitting the model.
|
52
|
-
# @param y [Numo::NArray] (shape: [n_samples, n_outputs], default: nil) The target values or labels to be used for fitting the model.
|
53
|
-
# @return [Numo::NArray] The predicted results by last estimator.
|
54
|
-
def fit_predict(x, y = nil)
|
55
|
-
check_sample_array(x)
|
56
|
-
trans_x = apply_transforms(x, y, fit: true)
|
57
|
-
last_estimator.fit_predict(trans_x)
|
58
|
-
end
|
59
|
-
|
60
|
-
# Call the fit_transform method of last estimator after applying all transforms.
|
61
|
-
#
|
62
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be transformed and used for fitting the model.
|
63
|
-
# @param y [Numo::NArray] (shape: [n_samples, n_outputs], default: nil) The target values or labels to be used for fitting the model.
|
64
|
-
# @return [Numo::NArray] The predicted results by last estimator.
|
65
|
-
def fit_transform(x, y = nil)
|
66
|
-
check_sample_array(x)
|
67
|
-
trans_x = apply_transforms(x, y, fit: true)
|
68
|
-
last_estimator.fit_transform(trans_x, y)
|
69
|
-
end
|
70
|
-
|
71
|
-
# Call the decision_function method of last estimator after applying all transforms.
|
72
|
-
#
|
73
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
|
74
|
-
# @return [Numo::DFloat] (shape: [n_samples]) Confidence score per sample.
|
75
|
-
def decision_function(x)
|
76
|
-
check_sample_array(x)
|
77
|
-
trans_x = apply_transforms(x)
|
78
|
-
last_estimator.decision_function(trans_x)
|
79
|
-
end
|
80
|
-
|
81
|
-
# Call the predict method of last estimator after applying all transforms.
|
82
|
-
#
|
83
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to obtain prediction result.
|
84
|
-
# @return [Numo::NArray] The predicted results by last estimator.
|
85
|
-
def predict(x)
|
86
|
-
check_sample_array(x)
|
87
|
-
trans_x = apply_transforms(x)
|
88
|
-
last_estimator.predict(trans_x)
|
89
|
-
end
|
90
|
-
|
91
|
-
# Call the predict_log_proba method of last estimator after applying all transforms.
|
92
|
-
#
|
93
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the log-probailities.
|
94
|
-
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted log-probability of each class per sample.
|
95
|
-
def predict_log_proba(x)
|
96
|
-
check_sample_array(x)
|
97
|
-
trans_x = apply_transforms(x)
|
98
|
-
last_estimator.predict_log_proba(trans_x)
|
99
|
-
end
|
100
|
-
|
101
|
-
# Call the predict_proba method of last estimator after applying all transforms.
|
102
|
-
#
|
103
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
|
104
|
-
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
|
105
|
-
def predict_proba(x)
|
106
|
-
check_sample_array(x)
|
107
|
-
trans_x = apply_transforms(x)
|
108
|
-
last_estimator.predict_proba(trans_x)
|
109
|
-
end
|
110
|
-
|
111
|
-
# Call the transform method of last estimator after applying all transforms.
|
112
|
-
#
|
113
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to be transformed.
|
114
|
-
# @return [Numo::DFloat] (shape: [n_samples, n_components]) The transformed samples.
|
115
|
-
def transform(x)
|
116
|
-
check_sample_array(x)
|
117
|
-
trans_x = apply_transforms(x)
|
118
|
-
last_estimator.nil? ? trans_x : last_estimator.transform(trans_x)
|
119
|
-
end
|
120
|
-
|
121
|
-
# Call the inverse_transform method in reverse order.
|
122
|
-
#
|
123
|
-
# @param z [Numo::DFloat] (shape: [n_samples, n_components]) The transformed samples to be restored into original space.
|
124
|
-
# @return [Numo::DFloat] (shape: [n_samples, n_featuress]) The restored samples.
|
125
|
-
def inverse_transform(z)
|
126
|
-
check_sample_array(z)
|
127
|
-
itrans_z = z
|
128
|
-
@steps.keys.reverse_each do |name|
|
129
|
-
transformer = @steps[name]
|
130
|
-
next if transformer.nil?
|
131
|
-
itrans_z = transformer.inverse_transform(itrans_z)
|
132
|
-
end
|
133
|
-
itrans_z
|
134
|
-
end
|
135
|
-
|
136
|
-
# Call the score method of last estimator after applying all transforms.
|
137
|
-
#
|
138
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) Testing data.
|
139
|
-
# @param y [Numo::NArray] (shape: [n_samples, n_outputs]) True target values or labels for testing data.
|
140
|
-
# @return [Float] The score of last estimator
|
141
|
-
def score(x, y)
|
142
|
-
check_sample_array(x)
|
143
|
-
trans_x = apply_transforms(x)
|
144
|
-
last_estimator.score(trans_x, y)
|
145
|
-
end
|
146
|
-
|
147
|
-
# Dump marshal data.
|
148
|
-
# @return [Hash] The marshal data about Pipeline.
|
149
|
-
def marshal_dump
|
150
|
-
{ params: @params,
|
151
|
-
steps: @steps }
|
152
|
-
end
|
153
|
-
|
154
|
-
# Load marshal data.
|
155
|
-
# @return [nil]
|
156
|
-
def marshal_load(obj)
|
157
|
-
@params = obj[:params]
|
158
|
-
@steps = obj[:steps]
|
159
|
-
nil
|
160
|
-
end
|
161
|
-
|
162
|
-
private
|
163
|
-
|
164
|
-
def validate_steps(steps)
|
165
|
-
steps.keys[0...-1].each do |name|
|
166
|
-
transformer = steps[name]
|
167
|
-
next if transformer.nil? || %i[fit transform].all? { |m| transformer.class.method_defined?(m) }
|
168
|
-
raise TypeError,
|
169
|
-
'Class of intermediate step in pipeline should be implemented fit and transform methods: ' \
|
170
|
-
"#{name} => #{transformer.class}"
|
171
|
-
end
|
172
|
-
|
173
|
-
estimator = steps[steps.keys.last]
|
174
|
-
unless estimator.nil? || estimator.class.method_defined?(:fit)
|
175
|
-
raise TypeError,
|
176
|
-
'Class of last step in pipeline should be implemented fit method: ' \
|
177
|
-
"#{steps.keys.last} => #{estimator.class}"
|
178
|
-
end
|
179
|
-
end
|
180
|
-
|
181
|
-
def apply_transforms(x, y = nil, fit: false)
|
182
|
-
trans_x = x
|
183
|
-
@steps.keys[0...-1].each do |name|
|
184
|
-
transformer = @steps[name]
|
185
|
-
next if transformer.nil?
|
186
|
-
transformer.fit(trans_x, y) if fit
|
187
|
-
trans_x = transformer.transform(trans_x)
|
188
|
-
end
|
189
|
-
trans_x
|
190
|
-
end
|
191
|
-
|
192
|
-
def last_estimator
|
193
|
-
@steps[@steps.keys.last]
|
194
|
-
end
|
195
|
-
end
|
196
|
-
end
|
197
|
-
end
|
@@ -1,262 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'svmkit/validation'
|
4
|
-
require 'svmkit/base/base_estimator'
|
5
|
-
require 'svmkit/base/classifier'
|
6
|
-
require 'svmkit/optimizer/nadam'
|
7
|
-
|
8
|
-
module SVMKit
|
9
|
-
# This module consists of the classes that implement polynomial models.
|
10
|
-
module PolynomialModel
|
11
|
-
# FactorizationMachineClassifier is a class that implements Factorization Machine
|
12
|
-
# with stochastic gradient descent (SGD) optimization.
|
13
|
-
# For multiclass classification problem, it uses one-vs-the-rest strategy.
|
14
|
-
#
|
15
|
-
# @example
|
16
|
-
# estimator =
|
17
|
-
# SVMKit::PolynomialModel::FactorizationMachineClassifier.new(
|
18
|
-
# n_factors: 10, loss: 'hinge', reg_param_linear: 0.001, reg_param_factor: 0.001,
|
19
|
-
# max_iter: 5000, batch_size: 50, random_seed: 1)
|
20
|
-
# estimator.fit(training_samples, traininig_labels)
|
21
|
-
# results = estimator.predict(testing_samples)
|
22
|
-
#
|
23
|
-
# *Reference*
|
24
|
-
# - S. Rendle, "Factorization Machines with libFM," ACM TIST, vol. 3 (3), pp. 57:1--57:22, 2012.
|
25
|
-
# - S. Rendle, "Factorization Machines," Proc. ICDM'10, pp. 995--1000, 2010.
|
26
|
-
class FactorizationMachineClassifier
|
27
|
-
include Base::BaseEstimator
|
28
|
-
include Base::Classifier
|
29
|
-
include Validation
|
30
|
-
|
31
|
-
# Return the factor matrix for Factorization Machine.
|
32
|
-
# @return [Numo::DFloat] (shape: [n_classes, n_factors, n_features])
|
33
|
-
attr_reader :factor_mat
|
34
|
-
|
35
|
-
# Return the weight vector for Factorization Machine.
|
36
|
-
# @return [Numo::DFloat] (shape: [n_classes, n_features])
|
37
|
-
attr_reader :weight_vec
|
38
|
-
|
39
|
-
# Return the bias term for Factoriazation Machine.
|
40
|
-
# @return [Numo::DFloat] (shape: [n_classes])
|
41
|
-
attr_reader :bias_term
|
42
|
-
|
43
|
-
# Return the class labels.
|
44
|
-
# @return [Numo::Int32] (shape: [n_classes])
|
45
|
-
attr_reader :classes
|
46
|
-
|
47
|
-
# Return the random generator for random sampling.
|
48
|
-
# @return [Random]
|
49
|
-
attr_reader :rng
|
50
|
-
|
51
|
-
# Create a new classifier with Factorization Machine.
|
52
|
-
#
|
53
|
-
# @param n_factors [Integer] The maximum number of iterations.
|
54
|
-
# @param loss [String] The loss function ('hinge' or 'logistic').
|
55
|
-
# @param reg_param_linear [Float] The regularization parameter for linear model.
|
56
|
-
# @param reg_param_factor [Float] The regularization parameter for factor matrix.
|
57
|
-
# @param max_iter [Integer] The maximum number of iterations.
|
58
|
-
# @param batch_size [Integer] The size of the mini batches.
|
59
|
-
# @param optimizer [Optimizer] The optimizer to calculate adaptive learning rate.
|
60
|
-
# If nil is given, Nadam is used.
|
61
|
-
# @param random_seed [Integer] The seed value using to initialize the random generator.
|
62
|
-
def initialize(n_factors: 2, loss: 'hinge', reg_param_linear: 1.0, reg_param_factor: 1.0,
|
63
|
-
max_iter: 1000, batch_size: 10, optimizer: nil, random_seed: nil)
|
64
|
-
check_params_float(reg_param_linear: reg_param_linear, reg_param_factor: reg_param_factor)
|
65
|
-
check_params_integer(n_factors: n_factors, max_iter: max_iter, batch_size: batch_size)
|
66
|
-
check_params_string(loss: loss)
|
67
|
-
check_params_type_or_nil(Integer, random_seed: random_seed)
|
68
|
-
check_params_positive(n_factors: n_factors,
|
69
|
-
reg_param_linear: reg_param_linear, reg_param_factor: reg_param_factor,
|
70
|
-
max_iter: max_iter, batch_size: batch_size)
|
71
|
-
@params = {}
|
72
|
-
@params[:n_factors] = n_factors
|
73
|
-
@params[:loss] = loss
|
74
|
-
@params[:reg_param_linear] = reg_param_linear
|
75
|
-
@params[:reg_param_factor] = reg_param_factor
|
76
|
-
@params[:max_iter] = max_iter
|
77
|
-
@params[:batch_size] = batch_size
|
78
|
-
@params[:optimizer] = optimizer
|
79
|
-
@params[:optimizer] ||= Optimizer::Nadam.new
|
80
|
-
@params[:random_seed] = random_seed
|
81
|
-
@params[:random_seed] ||= srand
|
82
|
-
@factor_mat = nil
|
83
|
-
@weight_vec = nil
|
84
|
-
@bias_term = nil
|
85
|
-
@classes = nil
|
86
|
-
@rng = Random.new(@params[:random_seed])
|
87
|
-
end
|
88
|
-
|
89
|
-
# Fit the model with given training data.
|
90
|
-
#
|
91
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The training data to be used for fitting the model.
|
92
|
-
# @param y [Numo::Int32] (shape: [n_samples]) The labels to be used for fitting the model.
|
93
|
-
# @return [FactorizationMachineClassifier] The learned classifier itself.
|
94
|
-
def fit(x, y)
|
95
|
-
check_sample_array(x)
|
96
|
-
check_label_array(y)
|
97
|
-
check_sample_label_size(x, y)
|
98
|
-
|
99
|
-
@classes = Numo::Int32[*y.to_a.uniq.sort]
|
100
|
-
n_classes = @classes.size
|
101
|
-
_n_samples, n_features = x.shape
|
102
|
-
|
103
|
-
if n_classes > 2
|
104
|
-
@factor_mat = Numo::DFloat.zeros(n_classes, @params[:n_factors], n_features)
|
105
|
-
@weight_vec = Numo::DFloat.zeros(n_classes, n_features)
|
106
|
-
@bias_term = Numo::DFloat.zeros(n_classes)
|
107
|
-
n_classes.times do |n|
|
108
|
-
bin_y = Numo::Int32.cast(y.eq(@classes[n])) * 2 - 1
|
109
|
-
@factor_mat[n, true, true], @weight_vec[n, true], @bias_term[n] = binary_fit(x, bin_y)
|
110
|
-
end
|
111
|
-
else
|
112
|
-
negative_label = y.to_a.uniq.min
|
113
|
-
bin_y = Numo::Int32.cast(y.ne(negative_label)) * 2 - 1
|
114
|
-
@factor_mat, @weight_vec, @bias_term = binary_fit(x, bin_y)
|
115
|
-
end
|
116
|
-
|
117
|
-
self
|
118
|
-
end
|
119
|
-
|
120
|
-
# Calculate confidence scores for samples.
|
121
|
-
#
|
122
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to compute the scores.
|
123
|
-
# @return [Numo::DFloat] (shape: [n_samples]) Confidence score per sample.
|
124
|
-
def decision_function(x)
|
125
|
-
check_sample_array(x)
|
126
|
-
linear_term = @bias_term + x.dot(@weight_vec.transpose)
|
127
|
-
factor_term = if @classes.size <= 2
|
128
|
-
0.5 * (@factor_mat.dot(x.transpose)**2 - (@factor_mat**2).dot(x.transpose**2)).sum(0)
|
129
|
-
else
|
130
|
-
0.5 * (@factor_mat.dot(x.transpose)**2 - (@factor_mat**2).dot(x.transpose**2)).sum(1).transpose
|
131
|
-
end
|
132
|
-
linear_term + factor_term
|
133
|
-
end
|
134
|
-
|
135
|
-
# Predict class labels for samples.
|
136
|
-
#
|
137
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the labels.
|
138
|
-
# @return [Numo::Int32] (shape: [n_samples]) Predicted class label per sample.
|
139
|
-
def predict(x)
|
140
|
-
check_sample_array(x)
|
141
|
-
return Numo::Int32.cast(decision_function(x).ge(0.0)) * 2 - 1 if @classes.size <= 2
|
142
|
-
|
143
|
-
n_samples, = x.shape
|
144
|
-
decision_values = decision_function(x)
|
145
|
-
Numo::Int32.asarray(Array.new(n_samples) { |n| @classes[decision_values[n, true].max_index] })
|
146
|
-
end
|
147
|
-
|
148
|
-
# Predict probability for samples.
|
149
|
-
#
|
150
|
-
# @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the probailities.
|
151
|
-
# @return [Numo::DFloat] (shape: [n_samples, n_classes]) Predicted probability of each class per sample.
|
152
|
-
def predict_proba(x)
|
153
|
-
check_sample_array(x)
|
154
|
-
proba = 1.0 / (Numo::NMath.exp(-decision_function(x)) + 1.0)
|
155
|
-
return (proba.transpose / proba.sum(axis: 1)).transpose if @classes.size > 2
|
156
|
-
|
157
|
-
n_samples, = x.shape
|
158
|
-
probs = Numo::DFloat.zeros(n_samples, 2)
|
159
|
-
probs[true, 1] = proba
|
160
|
-
probs[true, 0] = 1.0 - proba
|
161
|
-
probs
|
162
|
-
end
|
163
|
-
|
164
|
-
# Dump marshal data.
|
165
|
-
# @return [Hash] The marshal data about FactorizationMachineClassifier.
|
166
|
-
def marshal_dump
|
167
|
-
{ params: @params,
|
168
|
-
factor_mat: @factor_mat,
|
169
|
-
weight_vec: @weight_vec,
|
170
|
-
bias_term: @bias_term,
|
171
|
-
classes: @classes,
|
172
|
-
rng: @rng }
|
173
|
-
end
|
174
|
-
|
175
|
-
# Load marshal data.
|
176
|
-
# @return [nil]
|
177
|
-
def marshal_load(obj)
|
178
|
-
@params = obj[:params]
|
179
|
-
@factor_mat = obj[:factor_mat]
|
180
|
-
@weight_vec = obj[:weight_vec]
|
181
|
-
@bias_term = obj[:bias_term]
|
182
|
-
@classes = obj[:classes]
|
183
|
-
@rng = obj[:rng]
|
184
|
-
nil
|
185
|
-
end
|
186
|
-
|
187
|
-
private
|
188
|
-
|
189
|
-
def binary_fit(x, y)
|
190
|
-
# Initialize some variables.
|
191
|
-
n_samples, n_features = x.shape
|
192
|
-
rand_ids = [*0...n_samples].shuffle(random: @rng)
|
193
|
-
weight_vec = Numo::DFloat.zeros(n_features + 1)
|
194
|
-
factor_mat = Numo::DFloat.zeros(@params[:n_factors], n_features)
|
195
|
-
weight_optimizer = @params[:optimizer].dup
|
196
|
-
factor_optimizers = Array.new(@params[:n_factors]) { @params[:optimizer].dup }
|
197
|
-
# Start optimization.
|
198
|
-
@params[:max_iter].times do |_t|
|
199
|
-
# Random sampling.
|
200
|
-
subset_ids = rand_ids.shift(@params[:batch_size])
|
201
|
-
rand_ids.concat(subset_ids)
|
202
|
-
data = x[subset_ids, true]
|
203
|
-
ex_data = expand_feature(data)
|
204
|
-
label = y[subset_ids]
|
205
|
-
# Calculate gradients for loss function.
|
206
|
-
loss_grad = loss_gradient(data, ex_data, label, factor_mat, weight_vec)
|
207
|
-
next if loss_grad.ne(0.0).count.zero?
|
208
|
-
# Update each parameter.
|
209
|
-
weight_vec = weight_optimizer.call(weight_vec, weight_gradient(loss_grad, ex_data, weight_vec))
|
210
|
-
@params[:n_factors].times do |n|
|
211
|
-
factor_mat[n, true] = factor_optimizers[n].call(factor_mat[n, true],
|
212
|
-
factor_gradient(loss_grad, data, factor_mat[n, true]))
|
213
|
-
end
|
214
|
-
end
|
215
|
-
[factor_mat, *split_weight_vec_bias(weight_vec)]
|
216
|
-
end
|
217
|
-
|
218
|
-
def bin_decision_function(x, ex_x, factor, weight)
|
219
|
-
ex_x.dot(weight) + 0.5 * (factor.dot(x.transpose)**2 - (factor**2).dot(x.transpose**2)).sum(0)
|
220
|
-
end
|
221
|
-
|
222
|
-
def hinge_loss_gradient(x, ex_x, y, factor, weight)
|
223
|
-
evaluated = y * bin_decision_function(x, ex_x, factor, weight)
|
224
|
-
gradient = Numo::DFloat.zeros(evaluated.size)
|
225
|
-
gradient[evaluated < 1.0] = -y[evaluated < 1.0]
|
226
|
-
gradient
|
227
|
-
end
|
228
|
-
|
229
|
-
def logistic_loss_gradient(x, ex_x, y, factor, weight)
|
230
|
-
evaluated = y * bin_decision_function(x, ex_x, factor, weight)
|
231
|
-
sigmoid_func = 1.0 / (Numo::NMath.exp(-evaluated) + 1.0)
|
232
|
-
(sigmoid_func - 1.0) * y
|
233
|
-
end
|
234
|
-
|
235
|
-
def loss_gradient(x, ex_x, y, factor, weight)
|
236
|
-
if @params[:loss] == 'hinge'
|
237
|
-
hinge_loss_gradient(x, ex_x, y, factor, weight)
|
238
|
-
else
|
239
|
-
logistic_loss_gradient(x, ex_x, y, factor, weight)
|
240
|
-
end
|
241
|
-
end
|
242
|
-
|
243
|
-
def weight_gradient(loss_grad, data, weight)
|
244
|
-
(loss_grad.expand_dims(1) * data).mean(0) + @params[:reg_param_linear] * weight
|
245
|
-
end
|
246
|
-
|
247
|
-
def factor_gradient(loss_grad, data, factor)
|
248
|
-
(loss_grad.expand_dims(1) * (data * data.dot(factor).expand_dims(1) - factor * (data**2))).mean(0) + @params[:reg_param_factor] * factor
|
249
|
-
end
|
250
|
-
|
251
|
-
def expand_feature(x)
|
252
|
-
Numo::NArray.hstack([x, Numo::DFloat.ones([x.shape[0], 1])])
|
253
|
-
end
|
254
|
-
|
255
|
-
def split_weight_vec_bias(weight_vec)
|
256
|
-
weights = weight_vec[0...-1].dup
|
257
|
-
bias = weight_vec[-1]
|
258
|
-
[weights, bias]
|
259
|
-
end
|
260
|
-
end
|
261
|
-
end
|
262
|
-
end
|