snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.linear_model".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class Lars(BaseTransformer):
|
57
58
|
r"""Least Angle Regression model a
|
58
59
|
For more details on this class, see [sklearn.linear_model.Lars]
|
@@ -60,6 +61,51 @@ class Lars(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
A string or list of strings representing column names that contain labels.
|
74
|
+
Label columns must be specified with this parameter during initialization
|
75
|
+
or with the `set_label_cols` method before fitting.
|
76
|
+
|
77
|
+
output_cols: Optional[Union[str, List[str]]]
|
78
|
+
A string or list of strings representing column names that will store the
|
79
|
+
output of predict and transform operations. The length of output_cols must
|
80
|
+
match the expected number of output columns from the specific predictor or
|
81
|
+
transformer class used.
|
82
|
+
If you omit this parameter, output column names are derived by adding an
|
83
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
84
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
85
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
86
|
+
In general, explicitly specifying output column names is clearer, especially
|
87
|
+
if you don’t specify the input column names.
|
88
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
89
|
+
be set explicitly for transformers. Output columns can also be set after
|
90
|
+
initialization with the `set_output_cols` method.
|
91
|
+
|
92
|
+
sample_weight_col: Optional[str]
|
93
|
+
A string representing the column name containing the sample weights.
|
94
|
+
This argument is only required when working with weighted datasets. Sample
|
95
|
+
weight column can also be set after initialization with the
|
96
|
+
`set_sample_weight_col` method.
|
97
|
+
|
98
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
99
|
+
A string or a list of strings indicating column names to be excluded from any
|
100
|
+
operations (such as train, transform, or inference). These specified column(s)
|
101
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
102
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
103
|
+
columns, like index columns, during training or inference. Passthrough columns
|
104
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
105
|
+
|
106
|
+
drop_input_cols: Optional[bool], default=False
|
107
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
108
|
+
|
63
109
|
fit_intercept: bool, default=True
|
64
110
|
Whether to calculate the intercept for this model. If set
|
65
111
|
to false, no intercept will be used in calculations
|
@@ -109,35 +155,6 @@ class Lars(BaseTransformer):
|
|
109
155
|
Determines random number generation for jittering. Pass an int
|
110
156
|
for reproducible output across multiple function calls.
|
111
157
|
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
|
112
|
-
|
113
|
-
input_cols: Optional[Union[str, List[str]]]
|
114
|
-
A string or list of strings representing column names that contain features.
|
115
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
116
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
117
|
-
considered input columns.
|
118
|
-
|
119
|
-
label_cols: Optional[Union[str, List[str]]]
|
120
|
-
A string or list of strings representing column names that contain labels.
|
121
|
-
This is a required param for estimators, as there is no way to infer these
|
122
|
-
columns. If this parameter is not specified, then object is fitted without
|
123
|
-
labels (like a transformer).
|
124
|
-
|
125
|
-
output_cols: Optional[Union[str, List[str]]]
|
126
|
-
A string or list of strings representing column names that will store the
|
127
|
-
output of predict and transform operations. The length of output_cols must
|
128
|
-
match the expected number of output columns from the specific estimator or
|
129
|
-
transformer class used.
|
130
|
-
If this parameter is not specified, output column names are derived by
|
131
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
132
|
-
column names work for estimator's predict() method, but output_cols must
|
133
|
-
be set explicitly for transformers.
|
134
|
-
|
135
|
-
sample_weight_col: Optional[str]
|
136
|
-
A string representing the column name containing the sample weights.
|
137
|
-
This argument is only required when working with weighted datasets.
|
138
|
-
|
139
|
-
drop_input_cols: Optional[bool], default=False
|
140
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
141
158
|
"""
|
142
159
|
|
143
160
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -156,6 +173,7 @@ class Lars(BaseTransformer):
|
|
156
173
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
157
174
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
158
175
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
176
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
159
177
|
drop_input_cols: Optional[bool] = False,
|
160
178
|
sample_weight_col: Optional[str] = None,
|
161
179
|
) -> None:
|
@@ -164,9 +182,10 @@ class Lars(BaseTransformer):
|
|
164
182
|
self.set_input_cols(input_cols)
|
165
183
|
self.set_output_cols(output_cols)
|
166
184
|
self.set_label_cols(label_cols)
|
185
|
+
self.set_passthrough_cols(passthrough_cols)
|
167
186
|
self.set_drop_input_cols(drop_input_cols)
|
168
187
|
self.set_sample_weight_col(sample_weight_col)
|
169
|
-
deps = set(
|
188
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
170
189
|
|
171
190
|
self._deps = list(deps)
|
172
191
|
|
@@ -184,13 +203,14 @@ class Lars(BaseTransformer):
|
|
184
203
|
args=init_args,
|
185
204
|
klass=sklearn.linear_model.Lars
|
186
205
|
)
|
187
|
-
self._sklearn_object = sklearn.linear_model.Lars(
|
206
|
+
self._sklearn_object: Any = sklearn.linear_model.Lars(
|
188
207
|
**cleaned_up_init_args,
|
189
208
|
)
|
190
209
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
191
210
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
192
211
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
193
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=Lars.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
212
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=Lars.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
213
|
+
self._autogenerated = True
|
194
214
|
|
195
215
|
def _get_rand_id(self) -> str:
|
196
216
|
"""
|
@@ -201,24 +221,6 @@ class Lars(BaseTransformer):
|
|
201
221
|
"""
|
202
222
|
return str(uuid4()).replace("-", "_").upper()
|
203
223
|
|
204
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
205
|
-
"""
|
206
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
207
|
-
|
208
|
-
Args:
|
209
|
-
dataset: Input dataset.
|
210
|
-
"""
|
211
|
-
if not self.input_cols:
|
212
|
-
cols = [
|
213
|
-
c for c in dataset.columns
|
214
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
215
|
-
]
|
216
|
-
self.set_input_cols(input_cols=cols)
|
217
|
-
|
218
|
-
if not self.output_cols:
|
219
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
220
|
-
self.set_output_cols(output_cols=cols)
|
221
|
-
|
222
224
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "Lars":
|
223
225
|
"""
|
224
226
|
Input columns setter.
|
@@ -264,54 +266,48 @@ class Lars(BaseTransformer):
|
|
264
266
|
self
|
265
267
|
"""
|
266
268
|
self._infer_input_output_cols(dataset)
|
267
|
-
if isinstance(dataset,
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
self.
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
269
|
+
if isinstance(dataset, DataFrame):
|
270
|
+
session = dataset._session
|
271
|
+
assert session is not None # keep mypy happy
|
272
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
273
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
274
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
275
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
276
|
+
|
277
|
+
# Specify input columns so column pruning will be enforced
|
278
|
+
selected_cols = self._get_active_columns()
|
279
|
+
if len(selected_cols) > 0:
|
280
|
+
dataset = dataset.select(selected_cols)
|
281
|
+
|
282
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
283
|
+
|
284
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
285
|
+
if SNOWML_SPROC_ENV in os.environ:
|
286
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
287
|
+
project=_PROJECT,
|
288
|
+
subproject=_SUBPROJECT,
|
289
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), Lars.__class__.__name__),
|
290
|
+
api_calls=[Session.call],
|
291
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
292
|
+
)
|
293
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
294
|
+
pd_df.columns = dataset.columns
|
295
|
+
dataset = pd_df
|
296
|
+
|
297
|
+
model_trainer = ModelTrainerBuilder.build(
|
298
|
+
estimator=self._sklearn_object,
|
299
|
+
dataset=dataset,
|
300
|
+
input_cols=self.input_cols,
|
301
|
+
label_cols=self.label_cols,
|
302
|
+
sample_weight_col=self.sample_weight_col,
|
303
|
+
autogenerated=self._autogenerated,
|
304
|
+
subproject=_SUBPROJECT
|
305
|
+
)
|
306
|
+
self._sklearn_object = model_trainer.train()
|
283
307
|
self._is_fitted = True
|
284
308
|
self._get_model_signatures(dataset)
|
285
309
|
return self
|
286
310
|
|
287
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
288
|
-
session = dataset._session
|
289
|
-
assert session is not None # keep mypy happy
|
290
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
291
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
292
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
293
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
294
|
-
|
295
|
-
# Specify input columns so column pruning will be enforced
|
296
|
-
selected_cols = self._get_active_columns()
|
297
|
-
if len(selected_cols) > 0:
|
298
|
-
dataset = dataset.select(selected_cols)
|
299
|
-
|
300
|
-
estimator = self._sklearn_object
|
301
|
-
assert estimator is not None # Keep mypy happy
|
302
|
-
|
303
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
304
|
-
|
305
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
306
|
-
dataset,
|
307
|
-
session,
|
308
|
-
estimator,
|
309
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
310
|
-
self.input_cols,
|
311
|
-
self.label_cols,
|
312
|
-
self.sample_weight_col,
|
313
|
-
)
|
314
|
-
|
315
311
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
316
312
|
if self._drop_input_cols:
|
317
313
|
return []
|
@@ -499,11 +495,6 @@ class Lars(BaseTransformer):
|
|
499
495
|
subproject=_SUBPROJECT,
|
500
496
|
custom_tags=dict([("autogen", True)]),
|
501
497
|
)
|
502
|
-
@telemetry.add_stmt_params_to_df(
|
503
|
-
project=_PROJECT,
|
504
|
-
subproject=_SUBPROJECT,
|
505
|
-
custom_tags=dict([("autogen", True)]),
|
506
|
-
)
|
507
498
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
508
499
|
"""Predict using the linear model
|
509
500
|
For more details on this function, see [sklearn.linear_model.Lars.predict]
|
@@ -557,11 +548,6 @@ class Lars(BaseTransformer):
|
|
557
548
|
subproject=_SUBPROJECT,
|
558
549
|
custom_tags=dict([("autogen", True)]),
|
559
550
|
)
|
560
|
-
@telemetry.add_stmt_params_to_df(
|
561
|
-
project=_PROJECT,
|
562
|
-
subproject=_SUBPROJECT,
|
563
|
-
custom_tags=dict([("autogen", True)]),
|
564
|
-
)
|
565
551
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
566
552
|
"""Method not supported for this class.
|
567
553
|
|
@@ -618,7 +604,8 @@ class Lars(BaseTransformer):
|
|
618
604
|
if False:
|
619
605
|
self.fit(dataset)
|
620
606
|
assert self._sklearn_object is not None
|
621
|
-
|
607
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
608
|
+
return labels
|
622
609
|
else:
|
623
610
|
raise NotImplementedError
|
624
611
|
|
@@ -654,6 +641,7 @@ class Lars(BaseTransformer):
|
|
654
641
|
output_cols = []
|
655
642
|
|
656
643
|
# Make sure column names are valid snowflake identifiers.
|
644
|
+
assert output_cols is not None # Make MyPy happy
|
657
645
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
658
646
|
|
659
647
|
return rv
|
@@ -664,11 +652,6 @@ class Lars(BaseTransformer):
|
|
664
652
|
subproject=_SUBPROJECT,
|
665
653
|
custom_tags=dict([("autogen", True)]),
|
666
654
|
)
|
667
|
-
@telemetry.add_stmt_params_to_df(
|
668
|
-
project=_PROJECT,
|
669
|
-
subproject=_SUBPROJECT,
|
670
|
-
custom_tags=dict([("autogen", True)]),
|
671
|
-
)
|
672
655
|
def predict_proba(
|
673
656
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
674
657
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -709,11 +692,6 @@ class Lars(BaseTransformer):
|
|
709
692
|
subproject=_SUBPROJECT,
|
710
693
|
custom_tags=dict([("autogen", True)]),
|
711
694
|
)
|
712
|
-
@telemetry.add_stmt_params_to_df(
|
713
|
-
project=_PROJECT,
|
714
|
-
subproject=_SUBPROJECT,
|
715
|
-
custom_tags=dict([("autogen", True)]),
|
716
|
-
)
|
717
695
|
def predict_log_proba(
|
718
696
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
719
697
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -750,16 +728,6 @@ class Lars(BaseTransformer):
|
|
750
728
|
return output_df
|
751
729
|
|
752
730
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
753
|
-
@telemetry.send_api_usage_telemetry(
|
754
|
-
project=_PROJECT,
|
755
|
-
subproject=_SUBPROJECT,
|
756
|
-
custom_tags=dict([("autogen", True)]),
|
757
|
-
)
|
758
|
-
@telemetry.add_stmt_params_to_df(
|
759
|
-
project=_PROJECT,
|
760
|
-
subproject=_SUBPROJECT,
|
761
|
-
custom_tags=dict([("autogen", True)]),
|
762
|
-
)
|
763
731
|
def decision_function(
|
764
732
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
765
733
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -860,11 +828,6 @@ class Lars(BaseTransformer):
|
|
860
828
|
subproject=_SUBPROJECT,
|
861
829
|
custom_tags=dict([("autogen", True)]),
|
862
830
|
)
|
863
|
-
@telemetry.add_stmt_params_to_df(
|
864
|
-
project=_PROJECT,
|
865
|
-
subproject=_SUBPROJECT,
|
866
|
-
custom_tags=dict([("autogen", True)]),
|
867
|
-
)
|
868
831
|
def kneighbors(
|
869
832
|
self,
|
870
833
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -924,18 +887,28 @@ class Lars(BaseTransformer):
|
|
924
887
|
# For classifier, the type of predict is the same as the type of label
|
925
888
|
if self._sklearn_object._estimator_type == 'classifier':
|
926
889
|
# label columns is the desired type for output
|
927
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
890
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
928
891
|
# rename the output columns
|
929
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
892
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
930
893
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
931
894
|
([] if self._drop_input_cols else inputs)
|
932
895
|
+ outputs)
|
896
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
897
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
898
|
+
# Clusterer returns int64 cluster labels.
|
899
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
900
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
901
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
902
|
+
([] if self._drop_input_cols else inputs)
|
903
|
+
+ outputs)
|
904
|
+
|
933
905
|
# For regressor, the type of predict is float64
|
934
906
|
elif self._sklearn_object._estimator_type == 'regressor':
|
935
907
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
936
908
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
937
909
|
([] if self._drop_input_cols else inputs)
|
938
910
|
+ outputs)
|
911
|
+
|
939
912
|
for prob_func in PROB_FUNCTIONS:
|
940
913
|
if hasattr(self, prob_func):
|
941
914
|
output_cols_prefix: str = f"{prob_func}_"
|