rumale-linear_model 0.24.0 → 0.26.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3a7999fbdb27dc6ed43710083da0cecf2336bfb08277fa4240be7b56c2603c9f
4
- data.tar.gz: 6bc61b3d80fe71c1d7ed806c810a0eb454244033506ab2078ccd0987e7891455
3
+ metadata.gz: 68480352c1ab25d2242da8364a3377ee6ef0a1241c3299b5ffda0d3b699e81be
4
+ data.tar.gz: c02626de09115b2ce65dd6d4174d89fe1854a8385c33fa0c942822ba6d0a4c54
5
5
  SHA512:
6
- metadata.gz: 4c73e2b03dfb0f14c94b880769103bbb05f77de7ac08f8b9af2dddc0ab6bcada758c0c7c51353a79e052e051999b4afc7610012caf4dbb7c4edc505152fdf1d6
7
- data.tar.gz: 91ce194539b8abc95fb3ec7335bd4843aea126774ebb566eb9953955f775427afd032f9d02894c8507337d20f03baafa39903eb9dd9f9157ccf6a8d22028fe8b
6
+ metadata.gz: 23ed6fe1871d0fc6e8f10e32076257624683954f6f13fbd164289297ca845eaceb9829ae64e5a2af66b119bfa9a13cc3d609a767d9e97fc39ec88a59fcceaa7f
7
+ data.tar.gz: ab43f09a3fcecbe7b8c82a29f33380d363a4bfa056392f83cca931a4d839805250ad05640b0f1219cd78b72e824c6303427b62c02a362a26d08a1c0f9b220bfe
data/LICENSE.txt CHANGED
@@ -1,4 +1,4 @@
1
- Copyright (c) 2022 Atsushi Tatsuma
1
+ Copyright (c) 2022-2023 Atsushi Tatsuma
2
2
  All rights reserved.
3
3
 
4
4
  Redistribution and use in source and binary forms, with or without
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rumale/base/estimator'
4
+
5
+ module Rumale
6
+ # This module consists of the classes that implement generalized linear models.
7
+ module LinearModel
8
+ # BaseEstimator is an abstract class for implementation of linear model. This class is used internally.
9
+ class BaseEstimator < Rumale::Base::Estimator
10
+ # Return the weight vector.
11
+ # @return [Numo::DFloat] (shape: [n_outputs/n_classes, n_features])
12
+ attr_reader :weight_vec
13
+
14
+ # Return the bias term (a.k.a. intercept).
15
+ # @return [Numo::DFloat] (shape: [n_outputs/n_classes])
16
+ attr_reader :bias_term
17
+
18
+ # Create an initial linear model.
19
+
20
+ private
21
+
22
+ def expand_feature(x)
23
+ n_samples = x.shape[0]
24
+ Numo::NArray.hstack([x, Numo::DFloat.ones([n_samples, 1]) * @params[:bias_scale]])
25
+ end
26
+
27
+ def split_weight(w)
28
+ if w.ndim == 1
29
+ if fit_bias?
30
+ [w[0...-1].dup, w[-1]]
31
+ else
32
+ [w, 0.0]
33
+ end
34
+ elsif fit_bias?
35
+ [w[true, 0...-1].dup, w[true, -1].dup]
36
+ else
37
+ [w, Numo::DFloat.zeros(w.shape[0])]
38
+ end
39
+ end
40
+
41
+ def fit_bias?
42
+ @params[:fit_bias] == true
43
+ end
44
+ end
45
+ end
46
+ end
@@ -1,48 +1,34 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'rumale/base/regressor'
4
+ require 'rumale/utils'
4
5
  require 'rumale/validation'
5
- require 'rumale/linear_model/base_sgd'
6
+
7
+ require_relative 'base_estimator'
6
8
 
7
9
  module Rumale
8
10
  module LinearModel
9
- # ElasticNet is a class that implements Elastic-net Regression
10
- # with stochastic gradient descent (SGD) optimization.
11
+ # ElasticNet is a class that implements Elastic-net Regression with cordinate descent optimization.
11
12
  #
12
13
  # @example
13
14
  # require 'rumale/linear_model/elastic_net'
14
15
  #
15
- # estimator =
16
- # Rumale::LinearModel::ElasticNet.new(reg_param: 0.1, l1_ratio: 0.5, max_iter: 1000, batch_size: 50, random_seed: 1)
16
+ # estimator = Rumale::LinearModel::ElasticNet.new(reg_param: 0.1, l1_ratio: 0.5)
17
17
  # estimator.fit(training_samples, traininig_values)
18
18
  # results = estimator.predict(testing_samples)
19
19
  #
20
20
  # *Reference*
21
- # - Shalev-Shwartz, S., and Singer, Y., "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Proc. ICML'07, pp. 807--814, 2007.
22
- # - Tsuruoka, Y., Tsujii, J., and Ananiadou, S., "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty," Proc. ACL'09, pp. 477--485, 2009.
23
- # - Bottou, L., "Large-Scale Machine Learning with Stochastic Gradient Descent," Proc. COMPSTAT'10, pp. 177--186, 2010.
24
- class ElasticNet < BaseSGD
25
- include ::Rumale::Base::Regressor
26
-
27
- # Return the weight vector.
28
- # @return [Numo::DFloat] (shape: [n_outputs, n_features])
29
- attr_reader :weight_vec
30
-
31
- # Return the bias term (a.k.a. intercept).
32
- # @return [Numo::DFloat] (shape: [n_outputs])
33
- attr_reader :bias_term
21
+ # - Friedman, J., Hastie, T., and Tibshirani, R., "Regularization Paths for Generalized Linear Models via Coordinate Descent," Journal of Statistical Software, 33 (1), pp. 1--22, 2010.
22
+ # - Simon, N., Friedman, J., and Hastie, T., "A Blockwise Descent Algorithm for Group-penalized Multiresponse and Multinomial Regression," arXiv preprint arXiv:1311.6529, 2013.
23
+ class ElasticNet < Rumale::LinearModel::BaseEstimator
24
+ include Rumale::Base::Regressor
34
25
 
35
- # Return the random generator for random sampling.
36
- # @return [Random]
37
- attr_reader :rng
26
+ # Return the number of iterations performed in coordinate descent optimization.
27
+ # @return [Integer]
28
+ attr_reader :n_iter
38
29
 
39
30
  # Create a new Elastic-net regressor.
40
31
  #
41
- # @param learning_rate [Float] The initial value of learning rate.
42
- # The learning rate decreases as the iteration proceeds according to the equation: learning_rate / (1 + decay * t).
43
- # @param decay [Float] The smoothing parameter for decreasing learning rate as the iteration proceeds.
44
- # If nil is given, the decay sets to 'reg_param * learning_rate'.
45
- # @param momentum [Float] The momentum factor.
46
32
  # @param reg_param [Float] The regularization parameter.
47
33
  # @param l1_ratio [Float] The elastic-net mixing parameter.
48
34
  # If l1_ratio = 1, the regularization is similar to Lasso.
@@ -52,25 +38,17 @@ module Rumale
52
38
  # @param bias_scale [Float] The scale of the bias term.
53
39
  # @param max_iter [Integer] The maximum number of epochs that indicates
54
40
  # how many times the whole data is given to the training process.
55
- # @param batch_size [Integer] The size of the mini batches.
56
41
  # @param tol [Float] The tolerance of loss for terminating optimization.
57
- # @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
58
- # If nil is given, the method does not execute in parallel.
59
- # If zero or less is given, it becomes equal to the number of processors.
60
- # This parameter is ignored if the Parallel gem is not loaded.
61
- # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
62
- # @param random_seed [Integer] The seed value using to initialize the random generator.
63
- def initialize(learning_rate: 0.01, decay: nil, momentum: 0.9,
64
- reg_param: 1.0, l1_ratio: 0.5, fit_bias: true, bias_scale: 1.0,
65
- max_iter: 1000, batch_size: 50, tol: 1e-4,
66
- n_jobs: nil, verbose: false, random_seed: nil)
42
+ def initialize(reg_param: 1.0, l1_ratio: 0.5, fit_bias: true, bias_scale: 1.0, max_iter: 1000, tol: 1e-4)
67
43
  super()
68
- @params.merge!(method(:initialize).parameters.to_h { |_t, arg| [arg, binding.local_variable_get(arg)] })
69
- @params[:decay] ||= @params[:reg_param] * @params[:learning_rate]
70
- @params[:random_seed] ||= srand
71
- @rng = Random.new(@params[:random_seed])
72
- @penalty_type = ELASTICNET_PENALTY
73
- @loss_func = ::Rumale::LinearModel::Loss::MeanSquaredError.new
44
+ @params = {
45
+ reg_param: reg_param,
46
+ l1_ratio: l1_ratio,
47
+ fit_bias: fit_bias,
48
+ bias_scale: bias_scale,
49
+ max_iter: max_iter,
50
+ tol: tol
51
+ }
74
52
  end
75
53
 
76
54
  # Fit the model with given training data.
@@ -79,25 +57,19 @@ module Rumale
79
57
  # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
80
58
  # @return [ElasticNet] The learned regressor itself.
81
59
  def fit(x, y)
82
- x = ::Rumale::Validation.check_convert_sample_array(x)
83
- y = ::Rumale::Validation.check_convert_target_value_array(y)
84
- ::Rumale::Validation.check_sample_size(x, y)
60
+ x = Rumale::Validation.check_convert_sample_array(x)
61
+ y = Rumale::Validation.check_convert_target_value_array(y)
62
+ Rumale::Validation.check_sample_size(x, y)
85
63
 
86
- n_outputs = y.shape[1].nil? ? 1 : y.shape[1]
87
- n_features = x.shape[1]
64
+ @n_iter = 0
65
+ x = expand_feature(x) if fit_bias?
66
+
67
+ @weight_vec, @bias_term = if single_target?(y)
68
+ partial_fit(x, y)
69
+ else
70
+ partial_fit_multi(x, y)
71
+ end
88
72
 
89
- if n_outputs > 1
90
- @weight_vec = Numo::DFloat.zeros(n_outputs, n_features)
91
- @bias_term = Numo::DFloat.zeros(n_outputs)
92
- if enable_parallel?
93
- models = parallel_map(n_outputs) { |n| partial_fit(x, y[true, n]) }
94
- n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] = models[n] }
95
- else
96
- n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] = partial_fit(x, y[true, n]) }
97
- end
98
- else
99
- @weight_vec, @bias_term = partial_fit(x, y)
100
- end
101
73
  self
102
74
  end
103
75
 
@@ -106,10 +78,89 @@ module Rumale
106
78
  # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
107
79
  # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
108
80
  def predict(x)
109
- x = ::Rumale::Validation.check_convert_sample_array(x)
81
+ x = Rumale::Validation.check_convert_sample_array(x)
110
82
 
111
83
  x.dot(@weight_vec.transpose) + @bias_term
112
84
  end
85
+
86
+ private
87
+
88
+ def partial_fit(x, y)
89
+ l1_reg = @params[:reg_param] * @params[:l1_ratio]
90
+ l2_reg = @params[:reg_param] * (1.0 - @params[:l1_ratio])
91
+ n_features = x.shape[1]
92
+ w = Numo::DFloat.zeros(n_features)
93
+ x_norms = (x**2).sum(axis: 0)
94
+ residual = y - x.dot(w)
95
+
96
+ @params[:max_iter].times do |iter|
97
+ w_err = 0.0
98
+ n_features.times do |j|
99
+ next if x_norms[j].zero?
100
+
101
+ w_prev = w[j]
102
+
103
+ residual += w[j] * x[true, j]
104
+ z = x[true, j].dot(residual)
105
+ w[j] = soft_threshold(z, l1_reg).fdiv(x_norms[j] + l2_reg)
106
+ residual -= w[j] * x[true, j]
107
+
108
+ w_err = [w_err, (w[j] - w_prev).abs].max
109
+ end
110
+
111
+ @n_iter = iter + 1
112
+
113
+ break if w_err <= @params[:tol]
114
+ end
115
+
116
+ split_weight(w)
117
+ end
118
+
119
+ def partial_fit_multi(x, y)
120
+ l1_reg = @params[:reg_param] * @params[:l1_ratio]
121
+ l2_reg = @params[:reg_param] * (1.0 - @params[:l1_ratio])
122
+ n_features = x.shape[1]
123
+ n_outputs = y.shape[1]
124
+ w = Numo::DFloat.zeros(n_outputs, n_features)
125
+ x_norms = (x**2).sum(axis: 0)
126
+ residual = y - x.dot(w.transpose)
127
+
128
+ @params[:max_iter].times do |iter|
129
+ w_err = 0.0
130
+ n_features.times do |j|
131
+ next if x_norms[j].zero?
132
+
133
+ w_prev = w[true, j]
134
+
135
+ residual += x[true, j].expand_dims(1) * w[true, j]
136
+ z = x[true, j].dot(residual)
137
+ w[true, j] = [1.0 - l1_reg.fdiv(Math.sqrt((z**2).sum)), 0.0].max.fdiv(x_norms[j] + l2_reg) * z
138
+ residual -= x[true, j].expand_dims(1) * w[true, j]
139
+
140
+ w_err = [w_err, (w[true, j] - w_prev).abs.max].max
141
+ end
142
+
143
+ @n_iter = iter + 1
144
+
145
+ break if w_err <= @params[:tol]
146
+ end
147
+
148
+ split_weight(w)
149
+ end
150
+
151
+ def soft_threshold(z, threshold)
152
+ sign(z) * [z.abs - threshold, 0].max
153
+ end
154
+
155
+ def sign(z)
156
+ return 0.0 if z.zero?
157
+
158
+ z.positive? ? 1.0 : -1.0
159
+ end
160
+
161
+ def single_target?(y)
162
+ y.ndim == 1
163
+ end
113
164
  end
114
165
  end
115
166
  end
@@ -1,72 +1,49 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'rumale/base/estimator'
3
4
  require 'rumale/base/regressor'
4
5
  require 'rumale/validation'
5
- require 'rumale/linear_model/base_sgd'
6
+
7
+ require_relative 'base_estimator'
6
8
 
7
9
  module Rumale
8
10
  module LinearModel
9
- # Lasso is a class that implements Lasso Regression
10
- # with stochastic gradient descent (SGD) optimization.
11
+ # Lasso is a class that implements Lasso Regression with coordinate descent optimization.
11
12
  #
12
13
  # @example
13
14
  # require 'rumale/linear_model/lasso'
14
15
  #
15
- # estimator =
16
- # Rumale::LinearModel::Lasso.new(reg_param: 0.1, max_iter: 1000, batch_size: 20, random_seed: 1)
16
+ # estimator = Rumale::LinearModel::Lasso.new(reg_param: 0.1)
17
17
  # estimator.fit(training_samples, traininig_values)
18
18
  # results = estimator.predict(testing_samples)
19
19
  #
20
20
  # *Reference*
21
- # - Shalev-Shwartz, S., and Singer, Y., "Pegasos: Primal Estimated sub-GrAdient SOlver for SVM," Proc. ICML'07, pp. 807--814, 2007.
22
- # - Tsuruoka, Y., Tsujii, J., and Ananiadou, S., "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty," Proc. ACL'09, pp. 477--485, 2009.
23
- # - Bottou, L., "Large-Scale Machine Learning with Stochastic Gradient Descent," Proc. COMPSTAT'10, pp. 177--186, 2010.
24
- class Lasso < BaseSGD
25
- include ::Rumale::Base::Regressor
26
-
27
- # Return the weight vector.
28
- # @return [Numo::DFloat] (shape: [n_outputs, n_features])
29
- attr_reader :weight_vec
30
-
31
- # Return the bias term (a.k.a. intercept).
32
- # @return [Numo::DFloat] (shape: [n_outputs])
33
- attr_reader :bias_term
21
+ # - Friedman, J., Hastie, T., and Tibshirani, R., "Regularization Paths for Generalized Linear Models via Coordinate Descent," Journal of Statistical Software, 33 (1), pp. 1--22, 2010.
22
+ # - Simon, N., Friedman, J., and Hastie, T., "A Blockwise Descent Algorithm for Group-penalized Multiresponse and Multinomial Regression," arXiv preprint arXiv:1311.6529, 2013.
23
+ class Lasso < Rumale::LinearModel::BaseEstimator
24
+ include Rumale::Base::Regressor
34
25
 
35
- # Return the random generator for random sampling.
36
- # @return [Random]
37
- attr_reader :rng
26
+ # Return the number of iterations performed in coordinate descent optimization.
27
+ # @return [Integer]
28
+ attr_reader :n_iter
38
29
 
39
30
  # Create a new Lasso regressor.
40
31
  #
41
- # @param learning_rate [Float] The initial value of learning rate.
42
- # The learning rate decreases as the iteration proceeds according to the equation: learning_rate / (1 + decay * t).
43
- # @param decay [Float] The smoothing parameter for decreasing learning rate as the iteration proceeds.
44
- # If nil is given, the decay sets to 'reg_param * learning_rate'.
45
- # @param momentum [Float] The momentum factor.
46
32
  # @param reg_param [Float] The regularization parameter.
47
33
  # @param fit_bias [Boolean] The flag indicating whether to fit the bias term.
48
34
  # @param bias_scale [Float] The scale of the bias term.
49
35
  # @param max_iter [Integer] The maximum number of epochs that indicates
50
36
  # how many times the whole data is given to the training process.
51
- # @param batch_size [Integer] The size of the mini batches.
52
37
  # @param tol [Float] The tolerance of loss for terminating optimization.
53
- # @param n_jobs [Integer] The number of jobs for running the fit method in parallel.
54
- # If nil is given, the method does not execute in parallel.
55
- # If zero or less is given, it becomes equal to the number of processors.
56
- # This parameter is ignored if the Parallel gem is not loaded.
57
- # @param verbose [Boolean] The flag indicating whether to output loss during iteration.
58
- # @param random_seed [Integer] The seed value using to initialize the random generator.
59
- def initialize(learning_rate: 0.01, decay: nil, momentum: 0.9,
60
- reg_param: 1.0, fit_bias: true, bias_scale: 1.0,
61
- max_iter: 1000, batch_size: 50, tol: 1e-4,
62
- n_jobs: nil, verbose: false, random_seed: nil)
38
+ def initialize(reg_param: 1.0, fit_bias: true, bias_scale: 1.0, max_iter: 1000, tol: 1e-4)
63
39
  super()
64
- @params.merge!(method(:initialize).parameters.to_h { |_t, arg| [arg, binding.local_variable_get(arg)] })
65
- @params[:decay] ||= @params[:reg_param] * @params[:learning_rate]
66
- @params[:random_seed] ||= srand
67
- @rng = Random.new(@params[:random_seed])
68
- @penalty_type = L1_PENALTY
69
- @loss_func = ::Rumale::LinearModel::Loss::MeanSquaredError.new
40
+ @params = {
41
+ reg_param: reg_param,
42
+ fit_bias: fit_bias,
43
+ bias_scale: bias_scale,
44
+ max_iter: max_iter,
45
+ tol: tol
46
+ }
70
47
  end
71
48
 
72
49
  # Fit the model with given training data.
@@ -75,25 +52,19 @@ module Rumale
75
52
  # @param y [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values to be used for fitting the model.
76
53
  # @return [Lasso] The learned regressor itself.
77
54
  def fit(x, y)
78
- x = ::Rumale::Validation.check_convert_sample_array(x)
79
- y = ::Rumale::Validation.check_convert_target_value_array(y)
80
- ::Rumale::Validation.check_sample_size(x, y)
55
+ x = Rumale::Validation.check_convert_sample_array(x)
56
+ y = Rumale::Validation.check_convert_target_value_array(y)
57
+ Rumale::Validation.check_sample_size(x, y)
81
58
 
82
- n_outputs = y.shape[1].nil? ? 1 : y.shape[1]
83
- n_features = x.shape[1]
59
+ @n_iter = 0
60
+ x = expand_feature(x) if fit_bias?
61
+
62
+ @weight_vec, @bias_term = if single_target?(y)
63
+ partial_fit(x, y)
64
+ else
65
+ partial_fit_multi(x, y)
66
+ end
84
67
 
85
- if n_outputs > 1
86
- @weight_vec = Numo::DFloat.zeros(n_outputs, n_features)
87
- @bias_term = Numo::DFloat.zeros(n_outputs)
88
- if enable_parallel?
89
- models = parallel_map(n_outputs) { |n| partial_fit(x, y[true, n]) }
90
- n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] = models[n] }
91
- else
92
- n_outputs.times { |n| @weight_vec[n, true], @bias_term[n] = partial_fit(x, y[true, n]) }
93
- end
94
- else
95
- @weight_vec, @bias_term = partial_fit(x, y)
96
- end
97
68
  self
98
69
  end
99
70
 
@@ -102,10 +73,85 @@ module Rumale
102
73
  # @param x [Numo::DFloat] (shape: [n_samples, n_features]) The samples to predict the values.
103
74
  # @return [Numo::DFloat] (shape: [n_samples, n_outputs]) Predicted values per sample.
104
75
  def predict(x)
105
- x = ::Rumale::Validation.check_convert_sample_array(x)
76
+ x = Rumale::Validation.check_convert_sample_array(x)
106
77
 
107
78
  x.dot(@weight_vec.transpose) + @bias_term
108
79
  end
80
+
81
+ private
82
+
83
+ def partial_fit(x, y)
84
+ n_features = x.shape[1]
85
+ w = Numo::DFloat.zeros(n_features)
86
+ x_norms = (x**2).sum(axis: 0)
87
+ residual = y - x.dot(w)
88
+
89
+ @params[:max_iter].times do |iter|
90
+ w_err = 0.0
91
+ n_features.times do |j|
92
+ next if x_norms[j].zero?
93
+
94
+ w_prev = w[j]
95
+
96
+ residual += w[j] * x[true, j]
97
+ z = x[true, j].dot(residual)
98
+ w[j] = soft_threshold(z, @params[:reg_param]).fdiv(x_norms[j])
99
+ residual -= w[j] * x[true, j]
100
+
101
+ w_err = [w_err, (w[j] - w_prev).abs].max
102
+ end
103
+
104
+ @n_iter = iter + 1
105
+
106
+ break if w_err <= @params[:tol]
107
+ end
108
+
109
+ split_weight(w)
110
+ end
111
+
112
+ def partial_fit_multi(x, y)
113
+ n_features = x.shape[1]
114
+ n_outputs = y.shape[1]
115
+ w = Numo::DFloat.zeros(n_outputs, n_features)
116
+ x_norms = (x**2).sum(axis: 0)
117
+ residual = y - x.dot(w.transpose)
118
+
119
+ @params[:max_iter].times do |iter|
120
+ w_err = 0.0
121
+ n_features.times do |j|
122
+ next if x_norms[j].zero?
123
+
124
+ w_prev = w[true, j]
125
+
126
+ residual += x[true, j].expand_dims(1) * w[true, j]
127
+ z = x[true, j].dot(residual)
128
+ w[true, j] = [1.0 - @params[:reg_param].fdiv(Math.sqrt((z**2).sum)), 0.0].max.fdiv(x_norms[j]) * z
129
+ residual -= x[true, j].expand_dims(1) * w[true, j]
130
+
131
+ w_err = [w_err, (w[true, j] - w_prev).abs.max].max
132
+ end
133
+
134
+ @n_iter = iter + 1
135
+
136
+ break if w_err <= @params[:tol]
137
+ end
138
+
139
+ split_weight(w)
140
+ end
141
+
142
+ def soft_threshold(z, threshold)
143
+ sign(z) * [z.abs - threshold, 0].max
144
+ end
145
+
146
+ def sign(z)
147
+ return 0.0 if z.zero?
148
+
149
+ z.positive? ? 1.0 : -1.0
150
+ end
151
+
152
+ def single_target?(y)
153
+ y.ndim == 1
154
+ end
109
155
  end
110
156
  end
111
157
  end