snowflake-ml-python 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +96 -124
- snowflake/ml/modeling/cluster/affinity_propagation.py +94 -124
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +94 -124
- snowflake/ml/modeling/cluster/birch.py +94 -124
- snowflake/ml/modeling/cluster/bisecting_k_means.py +94 -124
- snowflake/ml/modeling/cluster/dbscan.py +94 -124
- snowflake/ml/modeling/cluster/feature_agglomeration.py +94 -124
- snowflake/ml/modeling/cluster/k_means.py +93 -124
- snowflake/ml/modeling/cluster/mean_shift.py +94 -124
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +93 -124
- snowflake/ml/modeling/cluster/optics.py +94 -124
- snowflake/ml/modeling/cluster/spectral_biclustering.py +94 -124
- snowflake/ml/modeling/cluster/spectral_clustering.py +94 -124
- snowflake/ml/modeling/cluster/spectral_coclustering.py +94 -124
- snowflake/ml/modeling/compose/column_transformer.py +94 -124
- snowflake/ml/modeling/compose/transformed_target_regressor.py +96 -124
- snowflake/ml/modeling/covariance/elliptic_envelope.py +94 -124
- snowflake/ml/modeling/covariance/empirical_covariance.py +80 -110
- snowflake/ml/modeling/covariance/graphical_lasso.py +94 -124
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +94 -124
- snowflake/ml/modeling/covariance/ledoit_wolf.py +85 -115
- snowflake/ml/modeling/covariance/min_cov_det.py +94 -124
- snowflake/ml/modeling/covariance/oas.py +80 -110
- snowflake/ml/modeling/covariance/shrunk_covariance.py +84 -114
- snowflake/ml/modeling/decomposition/dictionary_learning.py +94 -124
- snowflake/ml/modeling/decomposition/factor_analysis.py +94 -124
- snowflake/ml/modeling/decomposition/fast_ica.py +94 -124
- snowflake/ml/modeling/decomposition/incremental_pca.py +94 -124
- snowflake/ml/modeling/decomposition/kernel_pca.py +94 -124
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +94 -124
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +94 -124
- snowflake/ml/modeling/decomposition/pca.py +94 -124
- snowflake/ml/modeling/decomposition/sparse_pca.py +94 -124
- snowflake/ml/modeling/decomposition/truncated_svd.py +94 -124
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +96 -124
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +91 -119
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/bagging_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/bagging_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/isolation_forest.py +94 -124
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/stacking_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/voting_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/voting_regressor.py +91 -119
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +82 -110
- snowflake/ml/modeling/feature_selection/select_fdr.py +80 -108
- snowflake/ml/modeling/feature_selection/select_fpr.py +80 -108
- snowflake/ml/modeling/feature_selection/select_fwe.py +80 -108
- snowflake/ml/modeling/feature_selection/select_k_best.py +81 -109
- snowflake/ml/modeling/feature_selection/select_percentile.py +80 -108
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +94 -124
- snowflake/ml/modeling/feature_selection/variance_threshold.py +76 -106
- snowflake/ml/modeling/framework/base.py +2 -2
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +96 -124
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +96 -124
- snowflake/ml/modeling/impute/iterative_imputer.py +94 -124
- snowflake/ml/modeling/impute/knn_imputer.py +94 -124
- snowflake/ml/modeling/impute/missing_indicator.py +94 -124
- snowflake/ml/modeling/impute/simple_imputer.py +1 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +77 -107
- snowflake/ml/modeling/kernel_approximation/nystroem.py +94 -124
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +94 -124
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +86 -116
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +84 -114
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +96 -124
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +71 -100
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +71 -100
- snowflake/ml/modeling/linear_model/ard_regression.py +96 -124
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +96 -124
- snowflake/ml/modeling/linear_model/elastic_net.py +96 -124
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +96 -124
- snowflake/ml/modeling/linear_model/gamma_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/huber_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/lars.py +96 -124
- snowflake/ml/modeling/linear_model/lars_cv.py +96 -124
- snowflake/ml/modeling/linear_model/lasso.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_cv.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_lars.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +96 -124
- snowflake/ml/modeling/linear_model/linear_regression.py +91 -119
- snowflake/ml/modeling/linear_model/logistic_regression.py +96 -124
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +96 -124
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +96 -124
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +96 -124
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +95 -124
- snowflake/ml/modeling/linear_model/perceptron.py +95 -124
- snowflake/ml/modeling/linear_model/poisson_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/ransac_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/ridge.py +96 -124
- snowflake/ml/modeling/linear_model/ridge_classifier.py +96 -124
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +96 -124
- snowflake/ml/modeling/linear_model/ridge_cv.py +96 -124
- snowflake/ml/modeling/linear_model/sgd_classifier.py +96 -124
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +94 -124
- snowflake/ml/modeling/linear_model/sgd_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +96 -124
- snowflake/ml/modeling/manifold/isomap.py +94 -124
- snowflake/ml/modeling/manifold/mds.py +94 -124
- snowflake/ml/modeling/manifold/spectral_embedding.py +94 -124
- snowflake/ml/modeling/manifold/tsne.py +94 -124
- snowflake/ml/modeling/metrics/classification.py +187 -52
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +94 -124
- snowflake/ml/modeling/mixture/gaussian_mixture.py +94 -124
- snowflake/ml/modeling/model_selection/grid_search_cv.py +88 -138
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +90 -144
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +86 -114
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +93 -121
- snowflake/ml/modeling/multiclass/output_code_classifier.py +94 -122
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +92 -120
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +96 -124
- snowflake/ml/modeling/naive_bayes/complement_nb.py +92 -120
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +79 -107
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +88 -116
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +96 -124
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +96 -124
- snowflake/ml/modeling/neighbors/kernel_density.py +94 -124
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +94 -124
- snowflake/ml/modeling/neighbors/nearest_centroid.py +89 -117
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +94 -124
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +96 -124
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +96 -124
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +96 -124
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +94 -124
- snowflake/ml/modeling/neural_network/mlp_classifier.py +96 -124
- snowflake/ml/modeling/neural_network/mlp_regressor.py +96 -124
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +14 -9
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +0 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +21 -13
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +20 -14
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +35 -19
- snowflake/ml/modeling/preprocessing/normalizer.py +6 -9
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +20 -13
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +25 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +94 -124
- snowflake/ml/modeling/preprocessing/robust_scaler.py +28 -14
- snowflake/ml/modeling/preprocessing/standard_scaler.py +25 -13
- snowflake/ml/modeling/semi_supervised/label_propagation.py +96 -124
- snowflake/ml/modeling/semi_supervised/label_spreading.py +96 -124
- snowflake/ml/modeling/svm/linear_svc.py +96 -124
- snowflake/ml/modeling/svm/linear_svr.py +96 -124
- snowflake/ml/modeling/svm/nu_svc.py +96 -124
- snowflake/ml/modeling/svm/nu_svr.py +96 -124
- snowflake/ml/modeling/svm/svc.py +96 -124
- snowflake/ml/modeling/svm/svr.py +96 -124
- snowflake/ml/modeling/tree/decision_tree_classifier.py +96 -124
- snowflake/ml/modeling/tree/decision_tree_regressor.py +96 -124
- snowflake/ml/modeling/tree/extra_tree_classifier.py +96 -124
- snowflake/ml/modeling/tree/extra_tree_regressor.py +96 -124
- snowflake/ml/modeling/xgboost/xgb_classifier.py +96 -125
- snowflake/ml/modeling/xgboost/xgb_regressor.py +96 -125
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +96 -125
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +96 -125
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +21 -3
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.1.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.covariance".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class EmpiricalCovariance(BaseTransformer):
|
57
58
|
r"""Maximum likelihood covariance estimator
|
58
59
|
For more details on this class, see [sklearn.covariance.EmpiricalCovariance]
|
@@ -60,50 +61,57 @@ class EmpiricalCovariance(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
63
|
-
store_precision: bool, default=True
|
64
|
-
Specifies if the estimated precision is stored.
|
65
|
-
|
66
|
-
assume_centered: bool, default=False
|
67
|
-
If True, data are not centered before computation.
|
68
|
-
Useful when working with data whose mean is almost, but not exactly
|
69
|
-
zero.
|
70
|
-
If False (default), data are centered before computation.
|
71
64
|
|
72
65
|
input_cols: Optional[Union[str, List[str]]]
|
73
66
|
A string or list of strings representing column names that contain features.
|
74
67
|
If this parameter is not specified, all columns in the input DataFrame except
|
75
68
|
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
76
|
-
parameters are considered input columns.
|
77
|
-
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
78
72
|
label_cols: Optional[Union[str, List[str]]]
|
79
|
-
|
80
|
-
|
81
|
-
columns. If this parameter is not specified, then object is fitted without
|
82
|
-
labels (like a transformer).
|
83
|
-
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
84
75
|
output_cols: Optional[Union[str, List[str]]]
|
85
76
|
A string or list of strings representing column names that will store the
|
86
77
|
output of predict and transform operations. The length of output_cols must
|
87
|
-
match the expected number of output columns from the specific
|
78
|
+
match the expected number of output columns from the specific predictor or
|
88
79
|
transformer class used.
|
89
|
-
If this parameter
|
90
|
-
|
91
|
-
|
92
|
-
be set explicitly for transformers.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
93
89
|
|
94
90
|
sample_weight_col: Optional[str]
|
95
91
|
A string representing the column name containing the sample weights.
|
96
|
-
This argument is only required when working with weighted datasets.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
97
95
|
|
98
96
|
passthrough_cols: Optional[Union[str, List[str]]]
|
99
97
|
A string or a list of strings indicating column names to be excluded from any
|
100
98
|
operations (such as train, transform, or inference). These specified column(s)
|
101
99
|
will remain untouched throughout the process. This option is helpful in scenarios
|
102
100
|
requiring automatic input_cols inference, but need to avoid using specific
|
103
|
-
columns, like index columns, during training or inference.
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
104
103
|
|
105
104
|
drop_input_cols: Optional[bool], default=False
|
106
105
|
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
107
|
+
store_precision: bool, default=True
|
108
|
+
Specifies if the estimated precision is stored.
|
109
|
+
|
110
|
+
assume_centered: bool, default=False
|
111
|
+
If True, data are not centered before computation.
|
112
|
+
Useful when working with data whose mean is almost, but not exactly
|
113
|
+
zero.
|
114
|
+
If False (default), data are centered before computation.
|
107
115
|
"""
|
108
116
|
|
109
117
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -126,7 +134,7 @@ class EmpiricalCovariance(BaseTransformer):
|
|
126
134
|
self.set_passthrough_cols(passthrough_cols)
|
127
135
|
self.set_drop_input_cols(drop_input_cols)
|
128
136
|
self.set_sample_weight_col(sample_weight_col)
|
129
|
-
deps = set(
|
137
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
130
138
|
|
131
139
|
self._deps = list(deps)
|
132
140
|
|
@@ -136,13 +144,14 @@ class EmpiricalCovariance(BaseTransformer):
|
|
136
144
|
args=init_args,
|
137
145
|
klass=sklearn.covariance.EmpiricalCovariance
|
138
146
|
)
|
139
|
-
self._sklearn_object = sklearn.covariance.EmpiricalCovariance(
|
147
|
+
self._sklearn_object: Any = sklearn.covariance.EmpiricalCovariance(
|
140
148
|
**cleaned_up_init_args,
|
141
149
|
)
|
142
150
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
143
151
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
144
152
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
145
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=EmpiricalCovariance.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
153
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=EmpiricalCovariance.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
154
|
+
self._autogenerated = True
|
146
155
|
|
147
156
|
def _get_rand_id(self) -> str:
|
148
157
|
"""
|
@@ -198,54 +207,48 @@ class EmpiricalCovariance(BaseTransformer):
|
|
198
207
|
self
|
199
208
|
"""
|
200
209
|
self._infer_input_output_cols(dataset)
|
201
|
-
if isinstance(dataset,
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
self.
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
210
|
+
if isinstance(dataset, DataFrame):
|
211
|
+
session = dataset._session
|
212
|
+
assert session is not None # keep mypy happy
|
213
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
214
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
215
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
216
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
217
|
+
|
218
|
+
# Specify input columns so column pruning will be enforced
|
219
|
+
selected_cols = self._get_active_columns()
|
220
|
+
if len(selected_cols) > 0:
|
221
|
+
dataset = dataset.select(selected_cols)
|
222
|
+
|
223
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
224
|
+
|
225
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
226
|
+
if SNOWML_SPROC_ENV in os.environ:
|
227
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
228
|
+
project=_PROJECT,
|
229
|
+
subproject=_SUBPROJECT,
|
230
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), EmpiricalCovariance.__class__.__name__),
|
231
|
+
api_calls=[Session.call],
|
232
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
233
|
+
)
|
234
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
235
|
+
pd_df.columns = dataset.columns
|
236
|
+
dataset = pd_df
|
237
|
+
|
238
|
+
model_trainer = ModelTrainerBuilder.build(
|
239
|
+
estimator=self._sklearn_object,
|
240
|
+
dataset=dataset,
|
241
|
+
input_cols=self.input_cols,
|
242
|
+
label_cols=self.label_cols,
|
243
|
+
sample_weight_col=self.sample_weight_col,
|
244
|
+
autogenerated=self._autogenerated,
|
245
|
+
subproject=_SUBPROJECT
|
246
|
+
)
|
247
|
+
self._sklearn_object = model_trainer.train()
|
217
248
|
self._is_fitted = True
|
218
249
|
self._get_model_signatures(dataset)
|
219
250
|
return self
|
220
251
|
|
221
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
222
|
-
session = dataset._session
|
223
|
-
assert session is not None # keep mypy happy
|
224
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
225
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
226
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
227
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
228
|
-
|
229
|
-
# Specify input columns so column pruning will be enforced
|
230
|
-
selected_cols = self._get_active_columns()
|
231
|
-
if len(selected_cols) > 0:
|
232
|
-
dataset = dataset.select(selected_cols)
|
233
|
-
|
234
|
-
estimator = self._sklearn_object
|
235
|
-
assert estimator is not None # Keep mypy happy
|
236
|
-
|
237
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
238
|
-
|
239
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
240
|
-
dataset,
|
241
|
-
session,
|
242
|
-
estimator,
|
243
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
244
|
-
self.input_cols,
|
245
|
-
self.label_cols,
|
246
|
-
self.sample_weight_col,
|
247
|
-
)
|
248
|
-
|
249
252
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
250
253
|
if self._drop_input_cols:
|
251
254
|
return []
|
@@ -433,11 +436,6 @@ class EmpiricalCovariance(BaseTransformer):
|
|
433
436
|
subproject=_SUBPROJECT,
|
434
437
|
custom_tags=dict([("autogen", True)]),
|
435
438
|
)
|
436
|
-
@telemetry.add_stmt_params_to_df(
|
437
|
-
project=_PROJECT,
|
438
|
-
subproject=_SUBPROJECT,
|
439
|
-
custom_tags=dict([("autogen", True)]),
|
440
|
-
)
|
441
439
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
442
440
|
"""Method not supported for this class.
|
443
441
|
|
@@ -489,11 +487,6 @@ class EmpiricalCovariance(BaseTransformer):
|
|
489
487
|
subproject=_SUBPROJECT,
|
490
488
|
custom_tags=dict([("autogen", True)]),
|
491
489
|
)
|
492
|
-
@telemetry.add_stmt_params_to_df(
|
493
|
-
project=_PROJECT,
|
494
|
-
subproject=_SUBPROJECT,
|
495
|
-
custom_tags=dict([("autogen", True)]),
|
496
|
-
)
|
497
490
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
498
491
|
"""Method not supported for this class.
|
499
492
|
|
@@ -550,7 +543,8 @@ class EmpiricalCovariance(BaseTransformer):
|
|
550
543
|
if False:
|
551
544
|
self.fit(dataset)
|
552
545
|
assert self._sklearn_object is not None
|
553
|
-
|
546
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
547
|
+
return labels
|
554
548
|
else:
|
555
549
|
raise NotImplementedError
|
556
550
|
|
@@ -586,6 +580,7 @@ class EmpiricalCovariance(BaseTransformer):
|
|
586
580
|
output_cols = []
|
587
581
|
|
588
582
|
# Make sure column names are valid snowflake identifiers.
|
583
|
+
assert output_cols is not None # Make MyPy happy
|
589
584
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
590
585
|
|
591
586
|
return rv
|
@@ -596,11 +591,6 @@ class EmpiricalCovariance(BaseTransformer):
|
|
596
591
|
subproject=_SUBPROJECT,
|
597
592
|
custom_tags=dict([("autogen", True)]),
|
598
593
|
)
|
599
|
-
@telemetry.add_stmt_params_to_df(
|
600
|
-
project=_PROJECT,
|
601
|
-
subproject=_SUBPROJECT,
|
602
|
-
custom_tags=dict([("autogen", True)]),
|
603
|
-
)
|
604
594
|
def predict_proba(
|
605
595
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
606
596
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -641,11 +631,6 @@ class EmpiricalCovariance(BaseTransformer):
|
|
641
631
|
subproject=_SUBPROJECT,
|
642
632
|
custom_tags=dict([("autogen", True)]),
|
643
633
|
)
|
644
|
-
@telemetry.add_stmt_params_to_df(
|
645
|
-
project=_PROJECT,
|
646
|
-
subproject=_SUBPROJECT,
|
647
|
-
custom_tags=dict([("autogen", True)]),
|
648
|
-
)
|
649
634
|
def predict_log_proba(
|
650
635
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
651
636
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -682,16 +667,6 @@ class EmpiricalCovariance(BaseTransformer):
|
|
682
667
|
return output_df
|
683
668
|
|
684
669
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
685
|
-
@telemetry.send_api_usage_telemetry(
|
686
|
-
project=_PROJECT,
|
687
|
-
subproject=_SUBPROJECT,
|
688
|
-
custom_tags=dict([("autogen", True)]),
|
689
|
-
)
|
690
|
-
@telemetry.add_stmt_params_to_df(
|
691
|
-
project=_PROJECT,
|
692
|
-
subproject=_SUBPROJECT,
|
693
|
-
custom_tags=dict([("autogen", True)]),
|
694
|
-
)
|
695
670
|
def decision_function(
|
696
671
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
697
672
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -792,11 +767,6 @@ class EmpiricalCovariance(BaseTransformer):
|
|
792
767
|
subproject=_SUBPROJECT,
|
793
768
|
custom_tags=dict([("autogen", True)]),
|
794
769
|
)
|
795
|
-
@telemetry.add_stmt_params_to_df(
|
796
|
-
project=_PROJECT,
|
797
|
-
subproject=_SUBPROJECT,
|
798
|
-
custom_tags=dict([("autogen", True)]),
|
799
|
-
)
|
800
770
|
def kneighbors(
|
801
771
|
self,
|
802
772
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -856,9 +826,9 @@ class EmpiricalCovariance(BaseTransformer):
|
|
856
826
|
# For classifier, the type of predict is the same as the type of label
|
857
827
|
if self._sklearn_object._estimator_type == 'classifier':
|
858
828
|
# label columns is the desired type for output
|
859
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
829
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
860
830
|
# rename the output columns
|
861
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
831
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
862
832
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
863
833
|
([] if self._drop_input_cols else inputs)
|
864
834
|
+ outputs)
|
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.covariance".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class GraphicalLasso(BaseTransformer):
|
57
58
|
r"""Sparse inverse covariance estimation with an l1-penalized estimator
|
58
59
|
For more details on this class, see [sklearn.covariance.GraphicalLasso]
|
@@ -60,6 +61,49 @@ class GraphicalLasso(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
alpha: float, default=0.01
|
64
108
|
The regularization parameter: the higher alpha, the more
|
65
109
|
regularization, the sparser the inverse covariance.
|
@@ -102,42 +146,6 @@ class GraphicalLasso(BaseTransformer):
|
|
102
146
|
Useful when working with data whose mean is almost, but not exactly
|
103
147
|
zero.
|
104
148
|
If False, data are centered before computation.
|
105
|
-
|
106
|
-
input_cols: Optional[Union[str, List[str]]]
|
107
|
-
A string or list of strings representing column names that contain features.
|
108
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
109
|
-
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
110
|
-
parameters are considered input columns.
|
111
|
-
|
112
|
-
label_cols: Optional[Union[str, List[str]]]
|
113
|
-
A string or list of strings representing column names that contain labels.
|
114
|
-
This is a required param for estimators, as there is no way to infer these
|
115
|
-
columns. If this parameter is not specified, then object is fitted without
|
116
|
-
labels (like a transformer).
|
117
|
-
|
118
|
-
output_cols: Optional[Union[str, List[str]]]
|
119
|
-
A string or list of strings representing column names that will store the
|
120
|
-
output of predict and transform operations. The length of output_cols must
|
121
|
-
match the expected number of output columns from the specific estimator or
|
122
|
-
transformer class used.
|
123
|
-
If this parameter is not specified, output column names are derived by
|
124
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
125
|
-
column names work for estimator's predict() method, but output_cols must
|
126
|
-
be set explicitly for transformers.
|
127
|
-
|
128
|
-
sample_weight_col: Optional[str]
|
129
|
-
A string representing the column name containing the sample weights.
|
130
|
-
This argument is only required when working with weighted datasets.
|
131
|
-
|
132
|
-
passthrough_cols: Optional[Union[str, List[str]]]
|
133
|
-
A string or a list of strings indicating column names to be excluded from any
|
134
|
-
operations (such as train, transform, or inference). These specified column(s)
|
135
|
-
will remain untouched throughout the process. This option is helpful in scenarios
|
136
|
-
requiring automatic input_cols inference, but need to avoid using specific
|
137
|
-
columns, like index columns, during training or inference.
|
138
|
-
|
139
|
-
drop_input_cols: Optional[bool], default=False
|
140
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
141
149
|
"""
|
142
150
|
|
143
151
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -167,7 +175,7 @@ class GraphicalLasso(BaseTransformer):
|
|
167
175
|
self.set_passthrough_cols(passthrough_cols)
|
168
176
|
self.set_drop_input_cols(drop_input_cols)
|
169
177
|
self.set_sample_weight_col(sample_weight_col)
|
170
|
-
deps = set(
|
178
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
171
179
|
|
172
180
|
self._deps = list(deps)
|
173
181
|
|
@@ -184,13 +192,14 @@ class GraphicalLasso(BaseTransformer):
|
|
184
192
|
args=init_args,
|
185
193
|
klass=sklearn.covariance.GraphicalLasso
|
186
194
|
)
|
187
|
-
self._sklearn_object = sklearn.covariance.GraphicalLasso(
|
195
|
+
self._sklearn_object: Any = sklearn.covariance.GraphicalLasso(
|
188
196
|
**cleaned_up_init_args,
|
189
197
|
)
|
190
198
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
191
199
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
192
200
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
193
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=GraphicalLasso.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
201
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=GraphicalLasso.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
202
|
+
self._autogenerated = True
|
194
203
|
|
195
204
|
def _get_rand_id(self) -> str:
|
196
205
|
"""
|
@@ -246,54 +255,48 @@ class GraphicalLasso(BaseTransformer):
|
|
246
255
|
self
|
247
256
|
"""
|
248
257
|
self._infer_input_output_cols(dataset)
|
249
|
-
if isinstance(dataset,
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
self.
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
258
|
+
if isinstance(dataset, DataFrame):
|
259
|
+
session = dataset._session
|
260
|
+
assert session is not None # keep mypy happy
|
261
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
262
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
263
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
264
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
265
|
+
|
266
|
+
# Specify input columns so column pruning will be enforced
|
267
|
+
selected_cols = self._get_active_columns()
|
268
|
+
if len(selected_cols) > 0:
|
269
|
+
dataset = dataset.select(selected_cols)
|
270
|
+
|
271
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
272
|
+
|
273
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
274
|
+
if SNOWML_SPROC_ENV in os.environ:
|
275
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
276
|
+
project=_PROJECT,
|
277
|
+
subproject=_SUBPROJECT,
|
278
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), GraphicalLasso.__class__.__name__),
|
279
|
+
api_calls=[Session.call],
|
280
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
281
|
+
)
|
282
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
283
|
+
pd_df.columns = dataset.columns
|
284
|
+
dataset = pd_df
|
285
|
+
|
286
|
+
model_trainer = ModelTrainerBuilder.build(
|
287
|
+
estimator=self._sklearn_object,
|
288
|
+
dataset=dataset,
|
289
|
+
input_cols=self.input_cols,
|
290
|
+
label_cols=self.label_cols,
|
291
|
+
sample_weight_col=self.sample_weight_col,
|
292
|
+
autogenerated=self._autogenerated,
|
293
|
+
subproject=_SUBPROJECT
|
294
|
+
)
|
295
|
+
self._sklearn_object = model_trainer.train()
|
265
296
|
self._is_fitted = True
|
266
297
|
self._get_model_signatures(dataset)
|
267
298
|
return self
|
268
299
|
|
269
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
270
|
-
session = dataset._session
|
271
|
-
assert session is not None # keep mypy happy
|
272
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
273
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
274
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
275
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
276
|
-
|
277
|
-
# Specify input columns so column pruning will be enforced
|
278
|
-
selected_cols = self._get_active_columns()
|
279
|
-
if len(selected_cols) > 0:
|
280
|
-
dataset = dataset.select(selected_cols)
|
281
|
-
|
282
|
-
estimator = self._sklearn_object
|
283
|
-
assert estimator is not None # Keep mypy happy
|
284
|
-
|
285
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
286
|
-
|
287
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
288
|
-
dataset,
|
289
|
-
session,
|
290
|
-
estimator,
|
291
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
292
|
-
self.input_cols,
|
293
|
-
self.label_cols,
|
294
|
-
self.sample_weight_col,
|
295
|
-
)
|
296
|
-
|
297
300
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
298
301
|
if self._drop_input_cols:
|
299
302
|
return []
|
@@ -481,11 +484,6 @@ class GraphicalLasso(BaseTransformer):
|
|
481
484
|
subproject=_SUBPROJECT,
|
482
485
|
custom_tags=dict([("autogen", True)]),
|
483
486
|
)
|
484
|
-
@telemetry.add_stmt_params_to_df(
|
485
|
-
project=_PROJECT,
|
486
|
-
subproject=_SUBPROJECT,
|
487
|
-
custom_tags=dict([("autogen", True)]),
|
488
|
-
)
|
489
487
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
490
488
|
"""Method not supported for this class.
|
491
489
|
|
@@ -537,11 +535,6 @@ class GraphicalLasso(BaseTransformer):
|
|
537
535
|
subproject=_SUBPROJECT,
|
538
536
|
custom_tags=dict([("autogen", True)]),
|
539
537
|
)
|
540
|
-
@telemetry.add_stmt_params_to_df(
|
541
|
-
project=_PROJECT,
|
542
|
-
subproject=_SUBPROJECT,
|
543
|
-
custom_tags=dict([("autogen", True)]),
|
544
|
-
)
|
545
538
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
546
539
|
"""Method not supported for this class.
|
547
540
|
|
@@ -598,7 +591,8 @@ class GraphicalLasso(BaseTransformer):
|
|
598
591
|
if False:
|
599
592
|
self.fit(dataset)
|
600
593
|
assert self._sklearn_object is not None
|
601
|
-
|
594
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
595
|
+
return labels
|
602
596
|
else:
|
603
597
|
raise NotImplementedError
|
604
598
|
|
@@ -634,6 +628,7 @@ class GraphicalLasso(BaseTransformer):
|
|
634
628
|
output_cols = []
|
635
629
|
|
636
630
|
# Make sure column names are valid snowflake identifiers.
|
631
|
+
assert output_cols is not None # Make MyPy happy
|
637
632
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
638
633
|
|
639
634
|
return rv
|
@@ -644,11 +639,6 @@ class GraphicalLasso(BaseTransformer):
|
|
644
639
|
subproject=_SUBPROJECT,
|
645
640
|
custom_tags=dict([("autogen", True)]),
|
646
641
|
)
|
647
|
-
@telemetry.add_stmt_params_to_df(
|
648
|
-
project=_PROJECT,
|
649
|
-
subproject=_SUBPROJECT,
|
650
|
-
custom_tags=dict([("autogen", True)]),
|
651
|
-
)
|
652
642
|
def predict_proba(
|
653
643
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
654
644
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -689,11 +679,6 @@ class GraphicalLasso(BaseTransformer):
|
|
689
679
|
subproject=_SUBPROJECT,
|
690
680
|
custom_tags=dict([("autogen", True)]),
|
691
681
|
)
|
692
|
-
@telemetry.add_stmt_params_to_df(
|
693
|
-
project=_PROJECT,
|
694
|
-
subproject=_SUBPROJECT,
|
695
|
-
custom_tags=dict([("autogen", True)]),
|
696
|
-
)
|
697
682
|
def predict_log_proba(
|
698
683
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
699
684
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -730,16 +715,6 @@ class GraphicalLasso(BaseTransformer):
|
|
730
715
|
return output_df
|
731
716
|
|
732
717
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
733
|
-
@telemetry.send_api_usage_telemetry(
|
734
|
-
project=_PROJECT,
|
735
|
-
subproject=_SUBPROJECT,
|
736
|
-
custom_tags=dict([("autogen", True)]),
|
737
|
-
)
|
738
|
-
@telemetry.add_stmt_params_to_df(
|
739
|
-
project=_PROJECT,
|
740
|
-
subproject=_SUBPROJECT,
|
741
|
-
custom_tags=dict([("autogen", True)]),
|
742
|
-
)
|
743
718
|
def decision_function(
|
744
719
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
745
720
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -840,11 +815,6 @@ class GraphicalLasso(BaseTransformer):
|
|
840
815
|
subproject=_SUBPROJECT,
|
841
816
|
custom_tags=dict([("autogen", True)]),
|
842
817
|
)
|
843
|
-
@telemetry.add_stmt_params_to_df(
|
844
|
-
project=_PROJECT,
|
845
|
-
subproject=_SUBPROJECT,
|
846
|
-
custom_tags=dict([("autogen", True)]),
|
847
|
-
)
|
848
818
|
def kneighbors(
|
849
819
|
self,
|
850
820
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -904,9 +874,9 @@ class GraphicalLasso(BaseTransformer):
|
|
904
874
|
# For classifier, the type of predict is the same as the type of label
|
905
875
|
if self._sklearn_object._estimator_type == 'classifier':
|
906
876
|
# label columns is the desired type for output
|
907
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
877
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
908
878
|
# rename the output columns
|
909
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
879
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
910
880
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
911
881
|
([] if self._drop_input_cols else inputs)
|
912
882
|
+ outputs)
|