snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.kernel_approximation".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class PolynomialCountSketch(BaseTransformer):
|
57
58
|
r"""Polynomial kernel approximation via Tensor Sketch
|
58
59
|
For more details on this class, see [sklearn.kernel_approximation.PolynomialCountSketch]
|
@@ -60,6 +61,49 @@ class PolynomialCountSketch(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
gamma: float, default=1.0
|
64
108
|
Parameter of the polynomial kernel whose feature map
|
65
109
|
will be approximated.
|
@@ -83,35 +127,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
83
127
|
Determines random number generation for indexHash and bitHash
|
84
128
|
initialization. Pass an int for reproducible results across multiple
|
85
129
|
function calls. See :term:`Glossary <random_state>`.
|
86
|
-
|
87
|
-
input_cols: Optional[Union[str, List[str]]]
|
88
|
-
A string or list of strings representing column names that contain features.
|
89
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
90
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
91
|
-
considered input columns.
|
92
|
-
|
93
|
-
label_cols: Optional[Union[str, List[str]]]
|
94
|
-
A string or list of strings representing column names that contain labels.
|
95
|
-
This is a required param for estimators, as there is no way to infer these
|
96
|
-
columns. If this parameter is not specified, then object is fitted without
|
97
|
-
labels (like a transformer).
|
98
|
-
|
99
|
-
output_cols: Optional[Union[str, List[str]]]
|
100
|
-
A string or list of strings representing column names that will store the
|
101
|
-
output of predict and transform operations. The length of output_cols must
|
102
|
-
match the expected number of output columns from the specific estimator or
|
103
|
-
transformer class used.
|
104
|
-
If this parameter is not specified, output column names are derived by
|
105
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
106
|
-
column names work for estimator's predict() method, but output_cols must
|
107
|
-
be set explicitly for transformers.
|
108
|
-
|
109
|
-
sample_weight_col: Optional[str]
|
110
|
-
A string representing the column name containing the sample weights.
|
111
|
-
This argument is only required when working with weighted datasets.
|
112
|
-
|
113
|
-
drop_input_cols: Optional[bool], default=False
|
114
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
115
130
|
"""
|
116
131
|
|
117
132
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -125,6 +140,7 @@ class PolynomialCountSketch(BaseTransformer):
|
|
125
140
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
126
141
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
127
142
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
143
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
128
144
|
drop_input_cols: Optional[bool] = False,
|
129
145
|
sample_weight_col: Optional[str] = None,
|
130
146
|
) -> None:
|
@@ -133,9 +149,10 @@ class PolynomialCountSketch(BaseTransformer):
|
|
133
149
|
self.set_input_cols(input_cols)
|
134
150
|
self.set_output_cols(output_cols)
|
135
151
|
self.set_label_cols(label_cols)
|
152
|
+
self.set_passthrough_cols(passthrough_cols)
|
136
153
|
self.set_drop_input_cols(drop_input_cols)
|
137
154
|
self.set_sample_weight_col(sample_weight_col)
|
138
|
-
deps = set(
|
155
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
139
156
|
|
140
157
|
self._deps = list(deps)
|
141
158
|
|
@@ -148,13 +165,14 @@ class PolynomialCountSketch(BaseTransformer):
|
|
148
165
|
args=init_args,
|
149
166
|
klass=sklearn.kernel_approximation.PolynomialCountSketch
|
150
167
|
)
|
151
|
-
self._sklearn_object = sklearn.kernel_approximation.PolynomialCountSketch(
|
168
|
+
self._sklearn_object: Any = sklearn.kernel_approximation.PolynomialCountSketch(
|
152
169
|
**cleaned_up_init_args,
|
153
170
|
)
|
154
171
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
155
172
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
156
173
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
157
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=PolynomialCountSketch.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
174
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=PolynomialCountSketch.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
175
|
+
self._autogenerated = True
|
158
176
|
|
159
177
|
def _get_rand_id(self) -> str:
|
160
178
|
"""
|
@@ -165,24 +183,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
165
183
|
"""
|
166
184
|
return str(uuid4()).replace("-", "_").upper()
|
167
185
|
|
168
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
169
|
-
"""
|
170
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
171
|
-
|
172
|
-
Args:
|
173
|
-
dataset: Input dataset.
|
174
|
-
"""
|
175
|
-
if not self.input_cols:
|
176
|
-
cols = [
|
177
|
-
c for c in dataset.columns
|
178
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
179
|
-
]
|
180
|
-
self.set_input_cols(input_cols=cols)
|
181
|
-
|
182
|
-
if not self.output_cols:
|
183
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
184
|
-
self.set_output_cols(output_cols=cols)
|
185
|
-
|
186
186
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "PolynomialCountSketch":
|
187
187
|
"""
|
188
188
|
Input columns setter.
|
@@ -228,54 +228,48 @@ class PolynomialCountSketch(BaseTransformer):
|
|
228
228
|
self
|
229
229
|
"""
|
230
230
|
self._infer_input_output_cols(dataset)
|
231
|
-
if isinstance(dataset,
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
self.
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
231
|
+
if isinstance(dataset, DataFrame):
|
232
|
+
session = dataset._session
|
233
|
+
assert session is not None # keep mypy happy
|
234
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
235
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
236
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
237
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
238
|
+
|
239
|
+
# Specify input columns so column pruning will be enforced
|
240
|
+
selected_cols = self._get_active_columns()
|
241
|
+
if len(selected_cols) > 0:
|
242
|
+
dataset = dataset.select(selected_cols)
|
243
|
+
|
244
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
245
|
+
|
246
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
247
|
+
if SNOWML_SPROC_ENV in os.environ:
|
248
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
249
|
+
project=_PROJECT,
|
250
|
+
subproject=_SUBPROJECT,
|
251
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), PolynomialCountSketch.__class__.__name__),
|
252
|
+
api_calls=[Session.call],
|
253
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
254
|
+
)
|
255
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
256
|
+
pd_df.columns = dataset.columns
|
257
|
+
dataset = pd_df
|
258
|
+
|
259
|
+
model_trainer = ModelTrainerBuilder.build(
|
260
|
+
estimator=self._sklearn_object,
|
261
|
+
dataset=dataset,
|
262
|
+
input_cols=self.input_cols,
|
263
|
+
label_cols=self.label_cols,
|
264
|
+
sample_weight_col=self.sample_weight_col,
|
265
|
+
autogenerated=self._autogenerated,
|
266
|
+
subproject=_SUBPROJECT
|
267
|
+
)
|
268
|
+
self._sklearn_object = model_trainer.train()
|
247
269
|
self._is_fitted = True
|
248
270
|
self._get_model_signatures(dataset)
|
249
271
|
return self
|
250
272
|
|
251
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
252
|
-
session = dataset._session
|
253
|
-
assert session is not None # keep mypy happy
|
254
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
255
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
256
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
257
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
258
|
-
|
259
|
-
# Specify input columns so column pruning will be enforced
|
260
|
-
selected_cols = self._get_active_columns()
|
261
|
-
if len(selected_cols) > 0:
|
262
|
-
dataset = dataset.select(selected_cols)
|
263
|
-
|
264
|
-
estimator = self._sklearn_object
|
265
|
-
assert estimator is not None # Keep mypy happy
|
266
|
-
|
267
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
268
|
-
|
269
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
270
|
-
dataset,
|
271
|
-
session,
|
272
|
-
estimator,
|
273
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
274
|
-
self.input_cols,
|
275
|
-
self.label_cols,
|
276
|
-
self.sample_weight_col,
|
277
|
-
)
|
278
|
-
|
279
273
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
280
274
|
if self._drop_input_cols:
|
281
275
|
return []
|
@@ -463,11 +457,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
463
457
|
subproject=_SUBPROJECT,
|
464
458
|
custom_tags=dict([("autogen", True)]),
|
465
459
|
)
|
466
|
-
@telemetry.add_stmt_params_to_df(
|
467
|
-
project=_PROJECT,
|
468
|
-
subproject=_SUBPROJECT,
|
469
|
-
custom_tags=dict([("autogen", True)]),
|
470
|
-
)
|
471
460
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
472
461
|
"""Method not supported for this class.
|
473
462
|
|
@@ -519,11 +508,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
519
508
|
subproject=_SUBPROJECT,
|
520
509
|
custom_tags=dict([("autogen", True)]),
|
521
510
|
)
|
522
|
-
@telemetry.add_stmt_params_to_df(
|
523
|
-
project=_PROJECT,
|
524
|
-
subproject=_SUBPROJECT,
|
525
|
-
custom_tags=dict([("autogen", True)]),
|
526
|
-
)
|
527
511
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
528
512
|
"""Generate the feature map approximation for X
|
529
513
|
For more details on this function, see [sklearn.kernel_approximation.PolynomialCountSketch.transform]
|
@@ -582,7 +566,8 @@ class PolynomialCountSketch(BaseTransformer):
|
|
582
566
|
if False:
|
583
567
|
self.fit(dataset)
|
584
568
|
assert self._sklearn_object is not None
|
585
|
-
|
569
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
570
|
+
return labels
|
586
571
|
else:
|
587
572
|
raise NotImplementedError
|
588
573
|
|
@@ -618,6 +603,7 @@ class PolynomialCountSketch(BaseTransformer):
|
|
618
603
|
output_cols = []
|
619
604
|
|
620
605
|
# Make sure column names are valid snowflake identifiers.
|
606
|
+
assert output_cols is not None # Make MyPy happy
|
621
607
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
622
608
|
|
623
609
|
return rv
|
@@ -628,11 +614,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
628
614
|
subproject=_SUBPROJECT,
|
629
615
|
custom_tags=dict([("autogen", True)]),
|
630
616
|
)
|
631
|
-
@telemetry.add_stmt_params_to_df(
|
632
|
-
project=_PROJECT,
|
633
|
-
subproject=_SUBPROJECT,
|
634
|
-
custom_tags=dict([("autogen", True)]),
|
635
|
-
)
|
636
617
|
def predict_proba(
|
637
618
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
638
619
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -673,11 +654,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
673
654
|
subproject=_SUBPROJECT,
|
674
655
|
custom_tags=dict([("autogen", True)]),
|
675
656
|
)
|
676
|
-
@telemetry.add_stmt_params_to_df(
|
677
|
-
project=_PROJECT,
|
678
|
-
subproject=_SUBPROJECT,
|
679
|
-
custom_tags=dict([("autogen", True)]),
|
680
|
-
)
|
681
657
|
def predict_log_proba(
|
682
658
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
683
659
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -714,16 +690,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
714
690
|
return output_df
|
715
691
|
|
716
692
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
717
|
-
@telemetry.send_api_usage_telemetry(
|
718
|
-
project=_PROJECT,
|
719
|
-
subproject=_SUBPROJECT,
|
720
|
-
custom_tags=dict([("autogen", True)]),
|
721
|
-
)
|
722
|
-
@telemetry.add_stmt_params_to_df(
|
723
|
-
project=_PROJECT,
|
724
|
-
subproject=_SUBPROJECT,
|
725
|
-
custom_tags=dict([("autogen", True)]),
|
726
|
-
)
|
727
693
|
def decision_function(
|
728
694
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
729
695
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -822,11 +788,6 @@ class PolynomialCountSketch(BaseTransformer):
|
|
822
788
|
subproject=_SUBPROJECT,
|
823
789
|
custom_tags=dict([("autogen", True)]),
|
824
790
|
)
|
825
|
-
@telemetry.add_stmt_params_to_df(
|
826
|
-
project=_PROJECT,
|
827
|
-
subproject=_SUBPROJECT,
|
828
|
-
custom_tags=dict([("autogen", True)]),
|
829
|
-
)
|
830
791
|
def kneighbors(
|
831
792
|
self,
|
832
793
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -886,18 +847,28 @@ class PolynomialCountSketch(BaseTransformer):
|
|
886
847
|
# For classifier, the type of predict is the same as the type of label
|
887
848
|
if self._sklearn_object._estimator_type == 'classifier':
|
888
849
|
# label columns is the desired type for output
|
889
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
850
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
890
851
|
# rename the output columns
|
891
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
852
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
853
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
854
|
+
([] if self._drop_input_cols else inputs)
|
855
|
+
+ outputs)
|
856
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
857
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
858
|
+
# Clusterer returns int64 cluster labels.
|
859
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
860
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
892
861
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
893
862
|
([] if self._drop_input_cols else inputs)
|
894
863
|
+ outputs)
|
864
|
+
|
895
865
|
# For regressor, the type of predict is float64
|
896
866
|
elif self._sklearn_object._estimator_type == 'regressor':
|
897
867
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
898
868
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
899
869
|
([] if self._drop_input_cols else inputs)
|
900
870
|
+ outputs)
|
871
|
+
|
901
872
|
for prob_func in PROB_FUNCTIONS:
|
902
873
|
if hasattr(self, prob_func):
|
903
874
|
output_cols_prefix: str = f"{prob_func}_"
|