snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.ensemble".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class GradientBoostingClassifier(BaseTransformer):
|
57
58
|
r"""Gradient Boosting for classification
|
58
59
|
For more details on this class, see [sklearn.ensemble.GradientBoostingClassifier]
|
@@ -60,6 +61,51 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
A string or list of strings representing column names that contain labels.
|
74
|
+
Label columns must be specified with this parameter during initialization
|
75
|
+
or with the `set_label_cols` method before fitting.
|
76
|
+
|
77
|
+
output_cols: Optional[Union[str, List[str]]]
|
78
|
+
A string or list of strings representing column names that will store the
|
79
|
+
output of predict and transform operations. The length of output_cols must
|
80
|
+
match the expected number of output columns from the specific predictor or
|
81
|
+
transformer class used.
|
82
|
+
If you omit this parameter, output column names are derived by adding an
|
83
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
84
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
85
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
86
|
+
In general, explicitly specifying output column names is clearer, especially
|
87
|
+
if you don’t specify the input column names.
|
88
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
89
|
+
be set explicitly for transformers. Output columns can also be set after
|
90
|
+
initialization with the `set_output_cols` method.
|
91
|
+
|
92
|
+
sample_weight_col: Optional[str]
|
93
|
+
A string representing the column name containing the sample weights.
|
94
|
+
This argument is only required when working with weighted datasets. Sample
|
95
|
+
weight column can also be set after initialization with the
|
96
|
+
`set_sample_weight_col` method.
|
97
|
+
|
98
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
99
|
+
A string or a list of strings indicating column names to be excluded from any
|
100
|
+
operations (such as train, transform, or inference). These specified column(s)
|
101
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
102
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
103
|
+
columns, like index columns, during training or inference. Passthrough columns
|
104
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
105
|
+
|
106
|
+
drop_input_cols: Optional[bool], default=False
|
107
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
108
|
+
|
63
109
|
loss: {'log_loss', 'exponential'}, default='log_loss'
|
64
110
|
The loss function to be optimized. 'log_loss' refers to binomial and
|
65
111
|
multinomial deviance, the same as used in logistic regression.
|
@@ -219,35 +265,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
219
265
|
``ccp_alpha`` will be chosen. By default, no pruning is performed.
|
220
266
|
Values must be in the range `[0.0, inf)`.
|
221
267
|
See :ref:`minimal_cost_complexity_pruning` for details.
|
222
|
-
|
223
|
-
input_cols: Optional[Union[str, List[str]]]
|
224
|
-
A string or list of strings representing column names that contain features.
|
225
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
226
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
227
|
-
considered input columns.
|
228
|
-
|
229
|
-
label_cols: Optional[Union[str, List[str]]]
|
230
|
-
A string or list of strings representing column names that contain labels.
|
231
|
-
This is a required param for estimators, as there is no way to infer these
|
232
|
-
columns. If this parameter is not specified, then object is fitted without
|
233
|
-
labels (like a transformer).
|
234
|
-
|
235
|
-
output_cols: Optional[Union[str, List[str]]]
|
236
|
-
A string or list of strings representing column names that will store the
|
237
|
-
output of predict and transform operations. The length of output_cols must
|
238
|
-
match the expected number of output columns from the specific estimator or
|
239
|
-
transformer class used.
|
240
|
-
If this parameter is not specified, output column names are derived by
|
241
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
242
|
-
column names work for estimator's predict() method, but output_cols must
|
243
|
-
be set explicitly for transformers.
|
244
|
-
|
245
|
-
sample_weight_col: Optional[str]
|
246
|
-
A string representing the column name containing the sample weights.
|
247
|
-
This argument is only required when working with weighted datasets.
|
248
|
-
|
249
|
-
drop_input_cols: Optional[bool], default=False
|
250
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
251
268
|
"""
|
252
269
|
|
253
270
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -276,6 +293,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
276
293
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
277
294
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
278
295
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
296
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
279
297
|
drop_input_cols: Optional[bool] = False,
|
280
298
|
sample_weight_col: Optional[str] = None,
|
281
299
|
) -> None:
|
@@ -284,9 +302,10 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
284
302
|
self.set_input_cols(input_cols)
|
285
303
|
self.set_output_cols(output_cols)
|
286
304
|
self.set_label_cols(label_cols)
|
305
|
+
self.set_passthrough_cols(passthrough_cols)
|
287
306
|
self.set_drop_input_cols(drop_input_cols)
|
288
307
|
self.set_sample_weight_col(sample_weight_col)
|
289
|
-
deps = set(
|
308
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
290
309
|
|
291
310
|
self._deps = list(deps)
|
292
311
|
|
@@ -314,13 +333,14 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
314
333
|
args=init_args,
|
315
334
|
klass=sklearn.ensemble.GradientBoostingClassifier
|
316
335
|
)
|
317
|
-
self._sklearn_object = sklearn.ensemble.GradientBoostingClassifier(
|
336
|
+
self._sklearn_object: Any = sklearn.ensemble.GradientBoostingClassifier(
|
318
337
|
**cleaned_up_init_args,
|
319
338
|
)
|
320
339
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
321
340
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
322
341
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
323
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=GradientBoostingClassifier.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
342
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=GradientBoostingClassifier.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
343
|
+
self._autogenerated = True
|
324
344
|
|
325
345
|
def _get_rand_id(self) -> str:
|
326
346
|
"""
|
@@ -331,24 +351,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
331
351
|
"""
|
332
352
|
return str(uuid4()).replace("-", "_").upper()
|
333
353
|
|
334
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
335
|
-
"""
|
336
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
337
|
-
|
338
|
-
Args:
|
339
|
-
dataset: Input dataset.
|
340
|
-
"""
|
341
|
-
if not self.input_cols:
|
342
|
-
cols = [
|
343
|
-
c for c in dataset.columns
|
344
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
345
|
-
]
|
346
|
-
self.set_input_cols(input_cols=cols)
|
347
|
-
|
348
|
-
if not self.output_cols:
|
349
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
350
|
-
self.set_output_cols(output_cols=cols)
|
351
|
-
|
352
354
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "GradientBoostingClassifier":
|
353
355
|
"""
|
354
356
|
Input columns setter.
|
@@ -394,54 +396,48 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
394
396
|
self
|
395
397
|
"""
|
396
398
|
self._infer_input_output_cols(dataset)
|
397
|
-
if isinstance(dataset,
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
self.
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
399
|
+
if isinstance(dataset, DataFrame):
|
400
|
+
session = dataset._session
|
401
|
+
assert session is not None # keep mypy happy
|
402
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
403
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
404
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
405
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
406
|
+
|
407
|
+
# Specify input columns so column pruning will be enforced
|
408
|
+
selected_cols = self._get_active_columns()
|
409
|
+
if len(selected_cols) > 0:
|
410
|
+
dataset = dataset.select(selected_cols)
|
411
|
+
|
412
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
413
|
+
|
414
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
415
|
+
if SNOWML_SPROC_ENV in os.environ:
|
416
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
417
|
+
project=_PROJECT,
|
418
|
+
subproject=_SUBPROJECT,
|
419
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), GradientBoostingClassifier.__class__.__name__),
|
420
|
+
api_calls=[Session.call],
|
421
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
422
|
+
)
|
423
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
424
|
+
pd_df.columns = dataset.columns
|
425
|
+
dataset = pd_df
|
426
|
+
|
427
|
+
model_trainer = ModelTrainerBuilder.build(
|
428
|
+
estimator=self._sklearn_object,
|
429
|
+
dataset=dataset,
|
430
|
+
input_cols=self.input_cols,
|
431
|
+
label_cols=self.label_cols,
|
432
|
+
sample_weight_col=self.sample_weight_col,
|
433
|
+
autogenerated=self._autogenerated,
|
434
|
+
subproject=_SUBPROJECT
|
435
|
+
)
|
436
|
+
self._sklearn_object = model_trainer.train()
|
413
437
|
self._is_fitted = True
|
414
438
|
self._get_model_signatures(dataset)
|
415
439
|
return self
|
416
440
|
|
417
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
418
|
-
session = dataset._session
|
419
|
-
assert session is not None # keep mypy happy
|
420
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
421
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
422
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
423
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
424
|
-
|
425
|
-
# Specify input columns so column pruning will be enforced
|
426
|
-
selected_cols = self._get_active_columns()
|
427
|
-
if len(selected_cols) > 0:
|
428
|
-
dataset = dataset.select(selected_cols)
|
429
|
-
|
430
|
-
estimator = self._sklearn_object
|
431
|
-
assert estimator is not None # Keep mypy happy
|
432
|
-
|
433
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
434
|
-
|
435
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
436
|
-
dataset,
|
437
|
-
session,
|
438
|
-
estimator,
|
439
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
440
|
-
self.input_cols,
|
441
|
-
self.label_cols,
|
442
|
-
self.sample_weight_col,
|
443
|
-
)
|
444
|
-
|
445
441
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
446
442
|
if self._drop_input_cols:
|
447
443
|
return []
|
@@ -629,11 +625,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
629
625
|
subproject=_SUBPROJECT,
|
630
626
|
custom_tags=dict([("autogen", True)]),
|
631
627
|
)
|
632
|
-
@telemetry.add_stmt_params_to_df(
|
633
|
-
project=_PROJECT,
|
634
|
-
subproject=_SUBPROJECT,
|
635
|
-
custom_tags=dict([("autogen", True)]),
|
636
|
-
)
|
637
628
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
638
629
|
"""Predict class for X
|
639
630
|
For more details on this function, see [sklearn.ensemble.GradientBoostingClassifier.predict]
|
@@ -687,11 +678,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
687
678
|
subproject=_SUBPROJECT,
|
688
679
|
custom_tags=dict([("autogen", True)]),
|
689
680
|
)
|
690
|
-
@telemetry.add_stmt_params_to_df(
|
691
|
-
project=_PROJECT,
|
692
|
-
subproject=_SUBPROJECT,
|
693
|
-
custom_tags=dict([("autogen", True)]),
|
694
|
-
)
|
695
681
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
696
682
|
"""Method not supported for this class.
|
697
683
|
|
@@ -748,7 +734,8 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
748
734
|
if False:
|
749
735
|
self.fit(dataset)
|
750
736
|
assert self._sklearn_object is not None
|
751
|
-
|
737
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
738
|
+
return labels
|
752
739
|
else:
|
753
740
|
raise NotImplementedError
|
754
741
|
|
@@ -784,6 +771,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
784
771
|
output_cols = []
|
785
772
|
|
786
773
|
# Make sure column names are valid snowflake identifiers.
|
774
|
+
assert output_cols is not None # Make MyPy happy
|
787
775
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
788
776
|
|
789
777
|
return rv
|
@@ -794,11 +782,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
794
782
|
subproject=_SUBPROJECT,
|
795
783
|
custom_tags=dict([("autogen", True)]),
|
796
784
|
)
|
797
|
-
@telemetry.add_stmt_params_to_df(
|
798
|
-
project=_PROJECT,
|
799
|
-
subproject=_SUBPROJECT,
|
800
|
-
custom_tags=dict([("autogen", True)]),
|
801
|
-
)
|
802
785
|
def predict_proba(
|
803
786
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
804
787
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -841,11 +824,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
841
824
|
subproject=_SUBPROJECT,
|
842
825
|
custom_tags=dict([("autogen", True)]),
|
843
826
|
)
|
844
|
-
@telemetry.add_stmt_params_to_df(
|
845
|
-
project=_PROJECT,
|
846
|
-
subproject=_SUBPROJECT,
|
847
|
-
custom_tags=dict([("autogen", True)]),
|
848
|
-
)
|
849
827
|
def predict_log_proba(
|
850
828
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
851
829
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -884,16 +862,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
884
862
|
return output_df
|
885
863
|
|
886
864
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
887
|
-
@telemetry.send_api_usage_telemetry(
|
888
|
-
project=_PROJECT,
|
889
|
-
subproject=_SUBPROJECT,
|
890
|
-
custom_tags=dict([("autogen", True)]),
|
891
|
-
)
|
892
|
-
@telemetry.add_stmt_params_to_df(
|
893
|
-
project=_PROJECT,
|
894
|
-
subproject=_SUBPROJECT,
|
895
|
-
custom_tags=dict([("autogen", True)]),
|
896
|
-
)
|
897
865
|
def decision_function(
|
898
866
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
899
867
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -996,11 +964,6 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
996
964
|
subproject=_SUBPROJECT,
|
997
965
|
custom_tags=dict([("autogen", True)]),
|
998
966
|
)
|
999
|
-
@telemetry.add_stmt_params_to_df(
|
1000
|
-
project=_PROJECT,
|
1001
|
-
subproject=_SUBPROJECT,
|
1002
|
-
custom_tags=dict([("autogen", True)]),
|
1003
|
-
)
|
1004
967
|
def kneighbors(
|
1005
968
|
self,
|
1006
969
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -1060,18 +1023,28 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
1060
1023
|
# For classifier, the type of predict is the same as the type of label
|
1061
1024
|
if self._sklearn_object._estimator_type == 'classifier':
|
1062
1025
|
# label columns is the desired type for output
|
1063
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
1026
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1064
1027
|
# rename the output columns
|
1065
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
1028
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1066
1029
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1067
1030
|
([] if self._drop_input_cols else inputs)
|
1068
1031
|
+ outputs)
|
1032
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1033
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1034
|
+
# Clusterer returns int64 cluster labels.
|
1035
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1036
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1037
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1038
|
+
([] if self._drop_input_cols else inputs)
|
1039
|
+
+ outputs)
|
1040
|
+
|
1069
1041
|
# For regressor, the type of predict is float64
|
1070
1042
|
elif self._sklearn_object._estimator_type == 'regressor':
|
1071
1043
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1072
1044
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1073
1045
|
([] if self._drop_input_cols else inputs)
|
1074
1046
|
+ outputs)
|
1047
|
+
|
1075
1048
|
for prob_func in PROB_FUNCTIONS:
|
1076
1049
|
if hasattr(self, prob_func):
|
1077
1050
|
output_cols_prefix: str = f"{prob_func}_"
|