svmkit 0.7.3 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +0 -9
  3. data/.rspec +1 -0
  4. data/.travis.yml +4 -12
  5. data/LICENSE.txt +1 -1
  6. data/README.md +11 -13
  7. data/lib/svmkit.rb +3 -66
  8. data/svmkit.gemspec +12 -7
  9. metadata +16 -81
  10. data/.coveralls.yml +0 -1
  11. data/.rubocop.yml +0 -47
  12. data/.rubocop_todo.yml +0 -58
  13. data/HISTORY.md +0 -168
  14. data/lib/svmkit/base/base_estimator.rb +0 -13
  15. data/lib/svmkit/base/classifier.rb +0 -34
  16. data/lib/svmkit/base/cluster_analyzer.rb +0 -29
  17. data/lib/svmkit/base/evaluator.rb +0 -13
  18. data/lib/svmkit/base/regressor.rb +0 -34
  19. data/lib/svmkit/base/splitter.rb +0 -17
  20. data/lib/svmkit/base/transformer.rb +0 -18
  21. data/lib/svmkit/clustering/dbscan.rb +0 -127
  22. data/lib/svmkit/clustering/k_means.rb +0 -140
  23. data/lib/svmkit/dataset.rb +0 -109
  24. data/lib/svmkit/decomposition/nmf.rb +0 -147
  25. data/lib/svmkit/decomposition/pca.rb +0 -150
  26. data/lib/svmkit/ensemble/ada_boost_classifier.rb +0 -198
  27. data/lib/svmkit/ensemble/ada_boost_regressor.rb +0 -180
  28. data/lib/svmkit/ensemble/random_forest_classifier.rb +0 -182
  29. data/lib/svmkit/ensemble/random_forest_regressor.rb +0 -143
  30. data/lib/svmkit/evaluation_measure/accuracy.rb +0 -30
  31. data/lib/svmkit/evaluation_measure/f_score.rb +0 -51
  32. data/lib/svmkit/evaluation_measure/log_loss.rb +0 -46
  33. data/lib/svmkit/evaluation_measure/mean_absolute_error.rb +0 -30
  34. data/lib/svmkit/evaluation_measure/mean_squared_error.rb +0 -30
  35. data/lib/svmkit/evaluation_measure/normalized_mutual_information.rb +0 -63
  36. data/lib/svmkit/evaluation_measure/precision.rb +0 -51
  37. data/lib/svmkit/evaluation_measure/precision_recall.rb +0 -91
  38. data/lib/svmkit/evaluation_measure/purity.rb +0 -41
  39. data/lib/svmkit/evaluation_measure/r2_score.rb +0 -44
  40. data/lib/svmkit/evaluation_measure/recall.rb +0 -51
  41. data/lib/svmkit/kernel_approximation/rbf.rb +0 -136
  42. data/lib/svmkit/kernel_machine/kernel_svc.rb +0 -194
  43. data/lib/svmkit/linear_model/lasso.rb +0 -138
  44. data/lib/svmkit/linear_model/linear_regression.rb +0 -112
  45. data/lib/svmkit/linear_model/logistic_regression.rb +0 -161
  46. data/lib/svmkit/linear_model/ridge.rb +0 -112
  47. data/lib/svmkit/linear_model/sgd_linear_estimator.rb +0 -89
  48. data/lib/svmkit/linear_model/svc.rb +0 -184
  49. data/lib/svmkit/linear_model/svr.rb +0 -123
  50. data/lib/svmkit/model_selection/cross_validation.rb +0 -121
  51. data/lib/svmkit/model_selection/grid_search_cv.rb +0 -247
  52. data/lib/svmkit/model_selection/k_fold.rb +0 -77
  53. data/lib/svmkit/model_selection/stratified_k_fold.rb +0 -95
  54. data/lib/svmkit/multiclass/one_vs_rest_classifier.rb +0 -101
  55. data/lib/svmkit/naive_bayes/naive_bayes.rb +0 -316
  56. data/lib/svmkit/nearest_neighbors/k_neighbors_classifier.rb +0 -112
  57. data/lib/svmkit/nearest_neighbors/k_neighbors_regressor.rb +0 -94
  58. data/lib/svmkit/optimizer/nadam.rb +0 -90
  59. data/lib/svmkit/optimizer/rmsprop.rb +0 -69
  60. data/lib/svmkit/optimizer/sgd.rb +0 -65
  61. data/lib/svmkit/optimizer/yellow_fin.rb +0 -144
  62. data/lib/svmkit/pairwise_metric.rb +0 -91
  63. data/lib/svmkit/pipeline/pipeline.rb +0 -197
  64. data/lib/svmkit/polynomial_model/factorization_machine_classifier.rb +0 -262
  65. data/lib/svmkit/polynomial_model/factorization_machine_regressor.rb +0 -194
  66. data/lib/svmkit/preprocessing/l2_normalizer.rb +0 -63
  67. data/lib/svmkit/preprocessing/label_encoder.rb +0 -95
  68. data/lib/svmkit/preprocessing/min_max_scaler.rb +0 -93
  69. data/lib/svmkit/preprocessing/one_hot_encoder.rb +0 -99
  70. data/lib/svmkit/preprocessing/standard_scaler.rb +0 -87
  71. data/lib/svmkit/probabilistic_output.rb +0 -112
  72. data/lib/svmkit/tree/decision_tree_classifier.rb +0 -276
  73. data/lib/svmkit/tree/decision_tree_regressor.rb +0 -251
  74. data/lib/svmkit/tree/node.rb +0 -70
  75. data/lib/svmkit/utils.rb +0 -22
  76. data/lib/svmkit/validation.rb +0 -79
  77. data/lib/svmkit/values.rb +0 -13
  78. data/lib/svmkit/version.rb +0 -7
@@ -1,90 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'svmkit/validation'
4
- require 'svmkit/base/base_estimator'
5
-
6
- module SVMKit
7
- # This module consists of the classes that implement optimizers adaptively tuning hyperparameters.
8
- module Optimizer
9
- # Nadam is a class that implements Nadam optimizer.
10
- #
11
- # @example
12
- # optimizer = SVMKit::Optimizer::Nadam.new(learning_rate: 0.01, momentum: 0.9, decay1: 0.9, decay2: 0.999)
13
- # estimator = SVMKit::LinearModel::LinearRegression.new(optimizer: optimizer, random_seed: 1)
14
- # estimator.fit(samples, values)
15
- #
16
- # *Reference*
17
- # - T. Dozat, "Incorporating Nesterov Momentum into Adam," Tech. Repo. Stanford University, 2015.
18
- class Nadam
19
- include Base::BaseEstimator
20
- include Validation
21
-
22
- # Create a new optimizer with Nadam
23
- #
24
- # @param learning_rate [Float] The initial value of learning rate.
25
- # @param momentum [Float] The initial value of momentum.
26
- # @param decay1 [Float] The smoothing parameter for the first moment.
27
- # @param decay2 [Float] The smoothing parameter for the second moment.
28
- def initialize(learning_rate: 0.01, momentum: 0.9, decay1: 0.9, decay2: 0.999)
29
- check_params_float(learning_rate: learning_rate, momentum: momentum, decay1: decay1, decay2: decay2)
30
- check_params_positive(learning_rate: learning_rate, momentum: momentum, decay1: decay1, decay2: decay2)
31
- @params = {}
32
- @params[:learning_rate] = learning_rate
33
- @params[:momentum] = momentum
34
- @params[:decay1] = decay1
35
- @params[:decay2] = decay2
36
- @fst_moment = nil
37
- @sec_moment = nil
38
- @decay1_prod = 1.0
39
- @iter = 0
40
- end
41
-
42
- # Calculate the updated weight with Nadam adaptive learning rate.
43
- #
44
- # @param weight [Numo::DFloat] (shape: [n_features]) The weight to be updated.
45
- # @param gradient [Numo::DFloat] (shape: [n_features]) The gradient for updating the weight.
46
- # @return [Numo::DFloat] (shape: [n_feautres]) The updated weight.
47
- def call(weight, gradient)
48
- @fst_moment ||= Numo::DFloat.zeros(weight.shape[0])
49
- @sec_moment ||= Numo::DFloat.zeros(weight.shape[0])
50
-
51
- @iter += 1
52
-
53
- decay1_curr = @params[:decay1] * (1.0 - 0.5 * 0.96**(@iter * 0.004))
54
- decay1_next = @params[:decay1] * (1.0 - 0.5 * 0.96**((@iter + 1) * 0.004))
55
- decay1_prod_curr = @decay1_prod * decay1_curr
56
- decay1_prod_next = @decay1_prod * decay1_curr * decay1_next
57
- @decay1_prod = decay1_prod_curr
58
-
59
- @fst_moment = @params[:decay1] * @fst_moment + (1.0 - @params[:decay1]) * gradient
60
- @sec_moment = @params[:decay2] * @sec_moment + (1.0 - @params[:decay2]) * gradient**2
61
- nm_gradient = gradient / (1.0 - decay1_prod_curr)
62
- nm_fst_moment = @fst_moment / (1.0 - decay1_prod_next)
63
- nm_sec_moment = @sec_moment / (1.0 - @params[:decay2]**@iter)
64
-
65
- weight - (@params[:learning_rate] / (nm_sec_moment**0.5 + 1e-8)) * ((1 - decay1_curr) * nm_gradient + decay1_next * nm_fst_moment)
66
- end
67
-
68
- # Dump marshal data.
69
- # @return [Hash] The marshal data.
70
- def marshal_dump
71
- { params: @params,
72
- fst_moment: @fst_moment,
73
- sec_moment: @sec_moment,
74
- decay1_prod: @decay1_prod,
75
- iter: @iter }
76
- end
77
-
78
- # Load marshal data.
79
- # @return [nil]
80
- def marshal_load(obj)
81
- @params = obj[:params]
82
- @fst_moment = obj[:fst_moment]
83
- @sec_moment = obj[:sec_moment]
84
- @decay1_prod = obj[:decay1_prod]
85
- @iter = obj[:iter]
86
- nil
87
- end
88
- end
89
- end
90
- end
@@ -1,69 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'svmkit/validation'
4
- require 'svmkit/base/base_estimator'
5
-
6
- module SVMKit
7
- module Optimizer
8
- # RMSProp is a class that implements RMSProp optimizer.
9
- #
10
- # @example
11
- # optimizer = SVMKit::Optimizer::RMSProp.new(learning_rate: 0.01, momentum: 0.9, decay: 0.9)
12
- # estimator = SVMKit::LinearModel::LinearRegression.new(optimizer: optimizer, random_seed: 1)
13
- # estimator.fit(samples, values)
14
- #
15
- # *Reference*
16
- # - I. Sutskever, J. Martens, G. Dahl, and G. Hinton, "On the importance of initialization and momentum in deep learning," Proc. ICML' 13, pp. 1139--1147, 2013.
17
- # - G. Hinton, N. Srivastava, and K. Swersky, "Lecture 6e rmsprop," Neural Networks for Machine Learning, 2012.
18
- class RMSProp
19
- include Base::BaseEstimator
20
- include Validation
21
-
22
- # Create a new optimizer with RMSProp.
23
- #
24
- # @param learning_rate [Float] The initial value of learning rate.
25
- # @param momentum [Float] The initial value of momentum.
26
- # @param decay [Float] The smooting parameter.
27
- def initialize(learning_rate: 0.01, momentum: 0.9, decay: 0.9)
28
- check_params_float(learning_rate: learning_rate, momentum: momentum, decay: decay)
29
- check_params_positive(learning_rate: learning_rate, momentum: momentum, decay: decay)
30
- @params = {}
31
- @params[:learning_rate] = learning_rate
32
- @params[:momentum] = momentum
33
- @params[:decay] = decay
34
- @moment = nil
35
- @update = nil
36
- end
37
-
38
- # Calculate the updated weight with RMSProp adaptive learning rate.
39
- #
40
- # @param weight [Numo::DFloat] (shape: [n_features]) The weight to be updated.
41
- # @param gradient [Numo::DFloat] (shape: [n_features]) The gradient for updating the weight.
42
- # @return [Numo::DFloat] (shape: [n_feautres]) The updated weight.
43
- def call(weight, gradient)
44
- @moment ||= Numo::DFloat.zeros(weight.shape[0])
45
- @update ||= Numo::DFloat.zeros(weight.shape[0])
46
- @moment = @params[:decay] * @moment + (1.0 - @params[:decay]) * gradient**2
47
- @update = @params[:momentum] * @update - (@params[:learning_rate] / (@moment**0.5 + 1.0e-8)) * gradient
48
- weight + @update
49
- end
50
-
51
- # Dump marshal data.
52
- # @return [Hash] The marshal data.
53
- def marshal_dump
54
- { params: @params,
55
- moment: @moment,
56
- update: @update }
57
- end
58
-
59
- # Load marshal data.
60
- # @return [nil]
61
- def marshal_load(obj)
62
- @params = obj[:params]
63
- @moment = obj[:moment]
64
- @update = obj[:update]
65
- nil
66
- end
67
- end
68
- end
69
- end
@@ -1,65 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'svmkit/validation'
4
- require 'svmkit/base/base_estimator'
5
-
6
- module SVMKit
7
- module Optimizer
8
- # SGD is a class that implements SGD optimizer.
9
- #
10
- # @example
11
- # optimizer = SVMKit::Optimizer::SGD.new(learning_rate: 0.01, momentum: 0.9, decay: 0.9)
12
- # estimator = SVMKit::LinearModel::LinearRegression.new(optimizer: optimizer, random_seed: 1)
13
- # estimator.fit(samples, values)
14
- class SGD
15
- include Base::BaseEstimator
16
- include Validation
17
-
18
- # Create a new optimizer with SGD.
19
- #
20
- # @param learning_rate [Float] The initial value of learning rate.
21
- # @param momentum [Float] The initial value of momentum.
22
- # @param decay [Float] The smooting parameter.
23
- def initialize(learning_rate: 0.01, momentum: 0.0, decay: 0.0)
24
- check_params_float(learning_rate: learning_rate, momentum: momentum, decay: decay)
25
- check_params_positive(learning_rate: learning_rate, momentum: momentum, decay: decay)
26
- @params = {}
27
- @params[:learning_rate] = learning_rate
28
- @params[:momentum] = momentum
29
- @params[:decay] = decay
30
- @iter = 0
31
- @update = nil
32
- end
33
-
34
- # Calculate the updated weight with SGD.
35
- #
36
- # @param weight [Numo::DFloat] (shape: [n_features]) The weight to be updated.
37
- # @param gradient [Numo::DFloat] (shape: [n_features]) The gradient for updating the weight.
38
- # @return [Numo::DFloat] (shape: [n_feautres]) The updated weight.
39
- def call(weight, gradient)
40
- @update ||= Numo::DFloat.zeros(weight.shape[0])
41
- current_learning_rate = @params[:learning_rate] / (1.0 + @params[:decay] * @iter)
42
- @iter += 1
43
- @update = @params[:momentum] * @update - current_learning_rate * gradient
44
- weight + @update
45
- end
46
-
47
- # Dump marshal data.
48
- # @return [Hash] The marshal data.
49
- def marshal_dump
50
- { params: @params,
51
- iter: @iter,
52
- update: @update }
53
- end
54
-
55
- # Load marshal data.
56
- # @return [nil]
57
- def marshal_load(obj)
58
- @params = obj[:params]
59
- @iter = obj[:iter]
60
- @update = obj[:update]
61
- nil
62
- end
63
- end
64
- end
65
- end
@@ -1,144 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'svmkit/validation'
4
- require 'svmkit/base/base_estimator'
5
-
6
- module SVMKit
7
- module Optimizer
8
- # YellowFin is a class that implements YellowFin optimizer.
9
- #
10
- # @example
11
- # optimizer = SVMKit::Optimizer::YellowFin.new(learning_rate: 0.01, momentum: 0.9, decay: 0.999, window_width: 20)
12
- # estimator = SVMKit::LinearModel::LinearRegression.new(optimizer: optimizer, random_seed: 1)
13
- # estimator.fit(samples, values)
14
- #
15
- # *Reference*
16
- # - J. Zhang and I. Mitliagkas, "YellowFin and the Art of Momentum Tuning," CoRR abs/1706.03471, 2017.
17
- class YellowFin
18
- include Base::BaseEstimator
19
- include Validation
20
-
21
- # Create a new optimizer with YellowFin.
22
- #
23
- # @param learning_rate [Float] The initial value of learning rate.
24
- # @param momentum [Float] The initial value of momentum.
25
- # @param decay [Float] The smooting parameter.
26
- # @param window_width [Integer] The sliding window width for searching curvature range.
27
- def initialize(learning_rate: 0.01, momentum: 0.9, decay: 0.999, window_width: 20)
28
- check_params_float(learning_rate: learning_rate, momentum: momentum, decay: decay)
29
- check_params_integer(window_width: window_width)
30
- check_params_positive(learning_rate: learning_rate, momentum: momentum, decay: decay, window_width: window_width)
31
- @params = {}
32
- @params[:learning_rate] = learning_rate
33
- @params[:momentum] = momentum
34
- @params[:decay] = decay
35
- @params[:window_width] = window_width
36
- @smth_learning_rate = learning_rate
37
- @smth_momentum = momentum
38
- @grad_norms = nil
39
- @grad_norm_min = 0.0
40
- @grad_norm_max = 0.0
41
- @grad_mean_sqr = 0.0
42
- @grad_mean = 0.0
43
- @grad_var = 0.0
44
- @grad_norm_mean = 0.0
45
- @curve_mean = 0.0
46
- @distance_mean = 0.0
47
- @update = nil
48
- end
49
-
50
- # Calculate the updated weight with adaptive momentum coefficient and learning rate.
51
- #
52
- # @param weight [Numo::DFloat] (shape: [n_features]) The weight to be updated.
53
- # @param gradient [Numo::DFloat] (shape: [n_features]) The gradient for updating the weight.
54
- # @return [Numo::DFloat] (shape: [n_feautres]) The updated weight.
55
- def call(weight, gradient)
56
- @update ||= Numo::DFloat.zeros(weight.shape[0])
57
- curvature_range(gradient)
58
- gradient_variance(gradient)
59
- distance_to_optimum(gradient)
60
- @smth_momentum = @params[:decay] * @smth_momentum + (1 - @params[:decay]) * current_momentum
61
- @smth_learning_rate = @params[:decay] * @smth_learning_rate + (1 - @params[:decay]) * current_learning_rate
62
- @update = @smth_momentum * @update - @smth_learning_rate * gradient
63
- weight + @update
64
- end
65
-
66
- private
67
-
68
- def current_momentum
69
- dr = Math.sqrt(@grad_norm_max / @grad_norm_min + 1.0e-8)
70
- [cubic_root**2, ((dr - 1) / (dr + 1))**2].max
71
- end
72
-
73
- def current_learning_rate
74
- (1.0 - Math.sqrt(@params[:momentum]))**2 / (@grad_norm_min + 1.0e-8)
75
- end
76
-
77
- def cubic_root
78
- p = (@distance_mean**2 * @grad_norm_min**2) / (2 * @grad_var + 1.0e-8)
79
- w3 = (-Math.sqrt(p**2 + 4.fdiv(27) * p**3) - p).fdiv(2)
80
- w = (w3 >= 0.0 ? 1 : -1) * w3.abs**1.fdiv(3)
81
- y = w - p / (3 * w + 1.0e-8)
82
- y + 1
83
- end
84
-
85
- def curvature_range(gradient)
86
- @grad_norms ||= []
87
- @grad_norms.push((gradient**2).sum)
88
- @grad_norms.shift(@grad_norms.size - @params[:window_width]) if @grad_norms.size > @params[:window_width]
89
- @grad_norm_min = @params[:decay] * @grad_norm_min + (1 - @params[:decay]) * @grad_norms.min
90
- @grad_norm_max = @params[:decay] * @grad_norm_max + (1 - @params[:decay]) * @grad_norms.max
91
- end
92
-
93
- def gradient_variance(gradient)
94
- @grad_mean_sqr = @params[:decay] * @grad_mean_sqr + (1 - @params[:decay]) * gradient**2
95
- @grad_mean = @params[:decay] * @grad_mean + (1 - @params[:decay]) * gradient
96
- @grad_var = (@grad_mean_sqr - @grad_mean**2).sum
97
- end
98
-
99
- def distance_to_optimum(gradient)
100
- grad_sqr = (gradient**2).sum
101
- @grad_norm_mean = @params[:decay] * @grad_norm_mean + (1 - @params[:decay]) * Math.sqrt(grad_sqr + 1.0e-8)
102
- @curve_mean = @params[:decay] * @curve_mean + (1 - @params[:decay]) * grad_sqr
103
- @distance_mean = @params[:decay] * @distance_mean + (1 - @params[:decay]) * (@grad_norm_mean / @curve_mean)
104
- end
105
-
106
- # Dump marshal data.
107
- # @return [Hash] The marshal data.
108
- def marshal_dump
109
- { params: @params,
110
- smth_learning_rate: @smth_learning_rate,
111
- smth_momentum: @smth_momentum,
112
- grad_norms: @grad_norms,
113
- grad_norm_min: @grad_norm_min,
114
- grad_norm_max: @grad_norm_max,
115
- grad_mean_sqr: @grad_mean_sqr,
116
- grad_mean: @grad_mean,
117
- grad_var: @grad_var,
118
- grad_norm_mean: @grad_norm_mean,
119
- curve_mean: @curve_mean,
120
- distance_mean: @distance_mean,
121
- update: @update }
122
- end
123
-
124
- # Load marshal data.
125
- # @return [nis]
126
- def marshal_load(obj)
127
- @params = obj[:params]
128
- @smth_learning_rate = obj[:smth_learning_rate]
129
- @smth_momentum = obj[:smth_momentum]
130
- @grad_norms = obj[:grad_norms]
131
- @grad_norm_min = obj[:grad_norm_min]
132
- @grad_norm_max = obj[:grad_norm_max]
133
- @grad_mean_sqr = obj[:grad_mean_sqr]
134
- @grad_mean = obj[:grad_mean]
135
- @grad_var = obj[:grad_var]
136
- @grad_norm_mean = obj[:grad_norm_mean]
137
- @curve_mean = obj[:curve_mean]
138
- @distance_mean = obj[:distance_mean]
139
- @update = obj[:update]
140
- nil
141
- end
142
- end
143
- end
144
- end
@@ -1,91 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'svmkit/validation'
4
-
5
- module SVMKit
6
- # Module for calculating pairwise distances, similarities, and kernels.
7
- module PairwiseMetric
8
- class << self
9
- # Calculate the pairwise euclidean distances between x and y.
10
- #
11
- # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
12
- # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
13
- # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
14
- def euclidean_distance(x, y = nil)
15
- y = x if y.nil?
16
- SVMKit::Validation.check_sample_array(x)
17
- SVMKit::Validation.check_sample_array(y)
18
- sum_x_vec = (x**2).sum(1)
19
- sum_y_vec = (y**2).sum(1)
20
- dot_xy_mat = x.dot(y.transpose)
21
- distance_matrix = dot_xy_mat * -2.0 +
22
- sum_x_vec.tile(y.shape[0], 1).transpose +
23
- sum_y_vec.tile(x.shape[0], 1)
24
- Numo::NMath.sqrt(distance_matrix.abs)
25
- end
26
-
27
- # Calculate the rbf kernel between x and y.
28
- #
29
- # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
30
- # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
31
- # @param gamma [Float] The parameter of rbf kernel, if nil it is 1 / n_features.
32
- # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
33
- def rbf_kernel(x, y = nil, gamma = nil)
34
- y = x if y.nil?
35
- gamma ||= 1.0 / x.shape[1]
36
- SVMKit::Validation.check_sample_array(x)
37
- SVMKit::Validation.check_sample_array(y)
38
- SVMKit::Validation.check_params_float(gamma: gamma)
39
- distance_matrix = euclidean_distance(x, y)
40
- Numo::NMath.exp((distance_matrix**2) * -gamma)
41
- end
42
-
43
- # Calculate the linear kernel between x and y.
44
- #
45
- # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
46
- # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
47
- # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
48
- def linear_kernel(x, y = nil)
49
- y = x if y.nil?
50
- SVMKit::Validation.check_sample_array(x)
51
- SVMKit::Validation.check_sample_array(y)
52
- x.dot(y.transpose)
53
- end
54
-
55
- # Calculate the polynomial kernel between x and y.
56
- #
57
- # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
58
- # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
59
- # @param degree [Integer] The parameter of polynomial kernel.
60
- # @param gamma [Float] The parameter of polynomial kernel, if nil it is 1 / n_features.
61
- # @param coef [Integer] The parameter of polynomial kernel.
62
- # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
63
- def polynomial_kernel(x, y = nil, degree = 3, gamma = nil, coef = 1)
64
- y = x if y.nil?
65
- gamma ||= 1.0 / x.shape[1]
66
- SVMKit::Validation.check_sample_array(x)
67
- SVMKit::Validation.check_sample_array(y)
68
- SVMKit::Validation.check_params_float(gamma: gamma)
69
- SVMKit::Validation.check_params_integer(degree: degree, coef: coef)
70
- (x.dot(y.transpose) * gamma + coef)**degree
71
- end
72
-
73
- # Calculate the sigmoid kernel between x and y.
74
- #
75
- # @param x [Numo::DFloat] (shape: [n_samples_x, n_features])
76
- # @param y [Numo::DFloat] (shape: [n_samples_y, n_features])
77
- # @param gamma [Float] The parameter of polynomial kernel, if nil it is 1 / n_features.
78
- # @param coef [Integer] The parameter of polynomial kernel.
79
- # @return [Numo::DFloat] (shape: [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y] if y is given)
80
- def sigmoid_kernel(x, y = nil, gamma = nil, coef = 1)
81
- y = x if y.nil?
82
- gamma ||= 1.0 / x.shape[1]
83
- SVMKit::Validation.check_sample_array(x)
84
- SVMKit::Validation.check_sample_array(y)
85
- SVMKit::Validation.check_params_float(gamma: gamma)
86
- SVMKit::Validation.check_params_integer(coef: coef)
87
- Numo::NMath.tanh(x.dot(y.transpose) * gamma + coef)
88
- end
89
- end
90
- end
91
- end