snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -23,17 +23,19 @@ from sklearn.utils.metaestimators import available_if
|
|
23
23
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
24
24
|
from snowflake.ml._internal import telemetry
|
25
25
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
26
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
26
27
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
27
|
-
from snowflake.snowpark import DataFrame
|
28
|
+
from snowflake.snowpark import DataFrame, Session
|
28
29
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
29
30
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
32
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
30
33
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
31
34
|
gather_dependencies,
|
32
35
|
original_estimator_has_callable,
|
33
36
|
transform_snowml_obj_to_sklearn_obj,
|
34
37
|
validate_sklearn_args,
|
35
38
|
)
|
36
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
37
39
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
38
40
|
|
39
41
|
from snowflake.ml.model.model_signature import (
|
@@ -53,7 +55,6 @@ _PROJECT = "ModelDevelopment"
|
|
53
55
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.feature_selection".replace("sklearn.", "").split("_")])
|
54
56
|
|
55
57
|
|
56
|
-
|
57
58
|
class SelectPercentile(BaseTransformer):
|
58
59
|
r"""Select features according to a percentile of the highest scores
|
59
60
|
For more details on this class, see [sklearn.feature_selection.SelectPercentile]
|
@@ -61,43 +62,59 @@ class SelectPercentile(BaseTransformer):
|
|
61
62
|
|
62
63
|
Parameters
|
63
64
|
----------
|
64
|
-
score_func: callable, default=f_classif
|
65
|
-
Function taking two arrays X and y, and returning a pair of arrays
|
66
|
-
(scores, pvalues) or a single array with scores.
|
67
|
-
Default is f_classif (see below "See Also"). The default function only
|
68
|
-
works with classification tasks.
|
69
|
-
|
70
|
-
percentile: int, default=10
|
71
|
-
Percent of features to keep.
|
72
65
|
|
73
66
|
input_cols: Optional[Union[str, List[str]]]
|
74
67
|
A string or list of strings representing column names that contain features.
|
75
68
|
If this parameter is not specified, all columns in the input DataFrame except
|
76
|
-
the columns specified by label_cols
|
77
|
-
considered input columns.
|
78
|
-
|
69
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
70
|
+
parameters are considered input columns. Input columns can also be set after
|
71
|
+
initialization with the `set_input_cols` method.
|
72
|
+
|
79
73
|
label_cols: Optional[Union[str, List[str]]]
|
80
74
|
A string or list of strings representing column names that contain labels.
|
81
|
-
|
82
|
-
|
83
|
-
labels (like a transformer).
|
75
|
+
Label columns must be specified with this parameter during initialization
|
76
|
+
or with the `set_label_cols` method before fitting.
|
84
77
|
|
85
78
|
output_cols: Optional[Union[str, List[str]]]
|
86
79
|
A string or list of strings representing column names that will store the
|
87
80
|
output of predict and transform operations. The length of output_cols must
|
88
|
-
match the expected number of output columns from the specific
|
81
|
+
match the expected number of output columns from the specific predictor or
|
89
82
|
transformer class used.
|
90
|
-
If this parameter
|
91
|
-
|
92
|
-
|
93
|
-
be set explicitly for transformers.
|
83
|
+
If you omit this parameter, output column names are derived by adding an
|
84
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
85
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
86
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
87
|
+
In general, explicitly specifying output column names is clearer, especially
|
88
|
+
if you don’t specify the input column names.
|
89
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
90
|
+
be set explicitly for transformers. Output columns can also be set after
|
91
|
+
initialization with the `set_output_cols` method.
|
94
92
|
|
95
93
|
sample_weight_col: Optional[str]
|
96
94
|
A string representing the column name containing the sample weights.
|
97
|
-
This argument is only required when working with weighted datasets.
|
95
|
+
This argument is only required when working with weighted datasets. Sample
|
96
|
+
weight column can also be set after initialization with the
|
97
|
+
`set_sample_weight_col` method.
|
98
|
+
|
99
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
100
|
+
A string or a list of strings indicating column names to be excluded from any
|
101
|
+
operations (such as train, transform, or inference). These specified column(s)
|
102
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
103
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
104
|
+
columns, like index columns, during training or inference. Passthrough columns
|
105
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
98
106
|
|
99
107
|
drop_input_cols: Optional[bool], default=False
|
100
108
|
If set, the response of predict(), transform() methods will not contain input columns.
|
109
|
+
|
110
|
+
score_func: callable, default=f_classif
|
111
|
+
Function taking two arrays X and y, and returning a pair of arrays
|
112
|
+
(scores, pvalues) or a single array with scores.
|
113
|
+
Default is f_classif (see below "See Also"). The default function only
|
114
|
+
works with classification tasks.
|
115
|
+
|
116
|
+
percentile: int, default=10
|
117
|
+
Percent of features to keep.
|
101
118
|
"""
|
102
119
|
|
103
120
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -108,6 +125,7 @@ class SelectPercentile(BaseTransformer):
|
|
108
125
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
109
126
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
110
127
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
128
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
111
129
|
drop_input_cols: Optional[bool] = False,
|
112
130
|
sample_weight_col: Optional[str] = None,
|
113
131
|
) -> None:
|
@@ -116,9 +134,10 @@ class SelectPercentile(BaseTransformer):
|
|
116
134
|
self.set_input_cols(input_cols)
|
117
135
|
self.set_output_cols(output_cols)
|
118
136
|
self.set_label_cols(label_cols)
|
137
|
+
self.set_passthrough_cols(passthrough_cols)
|
119
138
|
self.set_drop_input_cols(drop_input_cols)
|
120
139
|
self.set_sample_weight_col(sample_weight_col)
|
121
|
-
deps = set(
|
140
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
122
141
|
|
123
142
|
self._deps = list(deps)
|
124
143
|
|
@@ -128,13 +147,14 @@ class SelectPercentile(BaseTransformer):
|
|
128
147
|
args=init_args,
|
129
148
|
klass=sklearn.feature_selection.SelectPercentile
|
130
149
|
)
|
131
|
-
self._sklearn_object = sklearn.feature_selection.SelectPercentile(
|
150
|
+
self._sklearn_object: Any = sklearn.feature_selection.SelectPercentile(
|
132
151
|
**cleaned_up_init_args,
|
133
152
|
)
|
134
153
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
135
154
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
136
155
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
137
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=SelectPercentile.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
156
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=SelectPercentile.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
157
|
+
self._autogenerated = True
|
138
158
|
|
139
159
|
def _get_rand_id(self) -> str:
|
140
160
|
"""
|
@@ -145,24 +165,6 @@ class SelectPercentile(BaseTransformer):
|
|
145
165
|
"""
|
146
166
|
return str(uuid4()).replace("-", "_").upper()
|
147
167
|
|
148
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
149
|
-
"""
|
150
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
151
|
-
|
152
|
-
Args:
|
153
|
-
dataset: Input dataset.
|
154
|
-
"""
|
155
|
-
if not self.input_cols:
|
156
|
-
cols = [
|
157
|
-
c for c in dataset.columns
|
158
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
159
|
-
]
|
160
|
-
self.set_input_cols(input_cols=cols)
|
161
|
-
|
162
|
-
if not self.output_cols:
|
163
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
164
|
-
self.set_output_cols(output_cols=cols)
|
165
|
-
|
166
168
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "SelectPercentile":
|
167
169
|
"""
|
168
170
|
Input columns setter.
|
@@ -208,54 +210,48 @@ class SelectPercentile(BaseTransformer):
|
|
208
210
|
self
|
209
211
|
"""
|
210
212
|
self._infer_input_output_cols(dataset)
|
211
|
-
if isinstance(dataset,
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
self.
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
213
|
+
if isinstance(dataset, DataFrame):
|
214
|
+
session = dataset._session
|
215
|
+
assert session is not None # keep mypy happy
|
216
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
217
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
218
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
219
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
220
|
+
|
221
|
+
# Specify input columns so column pruning will be enforced
|
222
|
+
selected_cols = self._get_active_columns()
|
223
|
+
if len(selected_cols) > 0:
|
224
|
+
dataset = dataset.select(selected_cols)
|
225
|
+
|
226
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
227
|
+
|
228
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
229
|
+
if SNOWML_SPROC_ENV in os.environ:
|
230
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
231
|
+
project=_PROJECT,
|
232
|
+
subproject=_SUBPROJECT,
|
233
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), SelectPercentile.__class__.__name__),
|
234
|
+
api_calls=[Session.call],
|
235
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
236
|
+
)
|
237
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
238
|
+
pd_df.columns = dataset.columns
|
239
|
+
dataset = pd_df
|
240
|
+
|
241
|
+
model_trainer = ModelTrainerBuilder.build(
|
242
|
+
estimator=self._sklearn_object,
|
243
|
+
dataset=dataset,
|
244
|
+
input_cols=self.input_cols,
|
245
|
+
label_cols=self.label_cols,
|
246
|
+
sample_weight_col=self.sample_weight_col,
|
247
|
+
autogenerated=self._autogenerated,
|
248
|
+
subproject=_SUBPROJECT
|
249
|
+
)
|
250
|
+
self._sklearn_object = model_trainer.train()
|
227
251
|
self._is_fitted = True
|
228
252
|
self._get_model_signatures(dataset)
|
229
253
|
return self
|
230
254
|
|
231
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
232
|
-
session = dataset._session
|
233
|
-
assert session is not None # keep mypy happy
|
234
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
235
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
236
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
237
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
238
|
-
|
239
|
-
# Specify input columns so column pruning will be enforced
|
240
|
-
selected_cols = self._get_active_columns()
|
241
|
-
if len(selected_cols) > 0:
|
242
|
-
dataset = dataset.select(selected_cols)
|
243
|
-
|
244
|
-
estimator = self._sklearn_object
|
245
|
-
assert estimator is not None # Keep mypy happy
|
246
|
-
|
247
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
248
|
-
|
249
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
250
|
-
dataset,
|
251
|
-
session,
|
252
|
-
estimator,
|
253
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
254
|
-
self.input_cols,
|
255
|
-
self.label_cols,
|
256
|
-
self.sample_weight_col,
|
257
|
-
)
|
258
|
-
|
259
255
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
260
256
|
if self._drop_input_cols:
|
261
257
|
return []
|
@@ -443,11 +439,6 @@ class SelectPercentile(BaseTransformer):
|
|
443
439
|
subproject=_SUBPROJECT,
|
444
440
|
custom_tags=dict([("autogen", True)]),
|
445
441
|
)
|
446
|
-
@telemetry.add_stmt_params_to_df(
|
447
|
-
project=_PROJECT,
|
448
|
-
subproject=_SUBPROJECT,
|
449
|
-
custom_tags=dict([("autogen", True)]),
|
450
|
-
)
|
451
442
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
452
443
|
"""Method not supported for this class.
|
453
444
|
|
@@ -499,11 +490,6 @@ class SelectPercentile(BaseTransformer):
|
|
499
490
|
subproject=_SUBPROJECT,
|
500
491
|
custom_tags=dict([("autogen", True)]),
|
501
492
|
)
|
502
|
-
@telemetry.add_stmt_params_to_df(
|
503
|
-
project=_PROJECT,
|
504
|
-
subproject=_SUBPROJECT,
|
505
|
-
custom_tags=dict([("autogen", True)]),
|
506
|
-
)
|
507
493
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
508
494
|
"""Reduce X to the selected features
|
509
495
|
For more details on this function, see [sklearn.feature_selection.SelectPercentile.transform]
|
@@ -562,7 +548,8 @@ class SelectPercentile(BaseTransformer):
|
|
562
548
|
if False:
|
563
549
|
self.fit(dataset)
|
564
550
|
assert self._sklearn_object is not None
|
565
|
-
|
551
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
552
|
+
return labels
|
566
553
|
else:
|
567
554
|
raise NotImplementedError
|
568
555
|
|
@@ -598,6 +585,7 @@ class SelectPercentile(BaseTransformer):
|
|
598
585
|
output_cols = []
|
599
586
|
|
600
587
|
# Make sure column names are valid snowflake identifiers.
|
588
|
+
assert output_cols is not None # Make MyPy happy
|
601
589
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
602
590
|
|
603
591
|
return rv
|
@@ -608,11 +596,6 @@ class SelectPercentile(BaseTransformer):
|
|
608
596
|
subproject=_SUBPROJECT,
|
609
597
|
custom_tags=dict([("autogen", True)]),
|
610
598
|
)
|
611
|
-
@telemetry.add_stmt_params_to_df(
|
612
|
-
project=_PROJECT,
|
613
|
-
subproject=_SUBPROJECT,
|
614
|
-
custom_tags=dict([("autogen", True)]),
|
615
|
-
)
|
616
599
|
def predict_proba(
|
617
600
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
618
601
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -653,11 +636,6 @@ class SelectPercentile(BaseTransformer):
|
|
653
636
|
subproject=_SUBPROJECT,
|
654
637
|
custom_tags=dict([("autogen", True)]),
|
655
638
|
)
|
656
|
-
@telemetry.add_stmt_params_to_df(
|
657
|
-
project=_PROJECT,
|
658
|
-
subproject=_SUBPROJECT,
|
659
|
-
custom_tags=dict([("autogen", True)]),
|
660
|
-
)
|
661
639
|
def predict_log_proba(
|
662
640
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
663
641
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -694,16 +672,6 @@ class SelectPercentile(BaseTransformer):
|
|
694
672
|
return output_df
|
695
673
|
|
696
674
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
697
|
-
@telemetry.send_api_usage_telemetry(
|
698
|
-
project=_PROJECT,
|
699
|
-
subproject=_SUBPROJECT,
|
700
|
-
custom_tags=dict([("autogen", True)]),
|
701
|
-
)
|
702
|
-
@telemetry.add_stmt_params_to_df(
|
703
|
-
project=_PROJECT,
|
704
|
-
subproject=_SUBPROJECT,
|
705
|
-
custom_tags=dict([("autogen", True)]),
|
706
|
-
)
|
707
675
|
def decision_function(
|
708
676
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
709
677
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -802,11 +770,6 @@ class SelectPercentile(BaseTransformer):
|
|
802
770
|
subproject=_SUBPROJECT,
|
803
771
|
custom_tags=dict([("autogen", True)]),
|
804
772
|
)
|
805
|
-
@telemetry.add_stmt_params_to_df(
|
806
|
-
project=_PROJECT,
|
807
|
-
subproject=_SUBPROJECT,
|
808
|
-
custom_tags=dict([("autogen", True)]),
|
809
|
-
)
|
810
773
|
def kneighbors(
|
811
774
|
self,
|
812
775
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -866,18 +829,28 @@ class SelectPercentile(BaseTransformer):
|
|
866
829
|
# For classifier, the type of predict is the same as the type of label
|
867
830
|
if self._sklearn_object._estimator_type == 'classifier':
|
868
831
|
# label columns is the desired type for output
|
869
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
832
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
870
833
|
# rename the output columns
|
871
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
834
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
872
835
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
873
836
|
([] if self._drop_input_cols else inputs)
|
874
837
|
+ outputs)
|
838
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
839
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
840
|
+
# Clusterer returns int64 cluster labels.
|
841
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
842
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
843
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
844
|
+
([] if self._drop_input_cols else inputs)
|
845
|
+
+ outputs)
|
846
|
+
|
875
847
|
# For regressor, the type of predict is float64
|
876
848
|
elif self._sklearn_object._estimator_type == 'regressor':
|
877
849
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
878
850
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
879
851
|
([] if self._drop_input_cols else inputs)
|
880
852
|
+ outputs)
|
853
|
+
|
881
854
|
for prob_func in PROB_FUNCTIONS:
|
882
855
|
if hasattr(self, prob_func):
|
883
856
|
output_cols_prefix: str = f"{prob_func}_"
|