snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -203,12 +202,7 @@ class GaussianNB(BaseTransformer):
|
|
203
202
|
)
|
204
203
|
return selected_cols
|
205
204
|
|
206
|
-
|
207
|
-
project=_PROJECT,
|
208
|
-
subproject=_SUBPROJECT,
|
209
|
-
custom_tags=dict([("autogen", True)]),
|
210
|
-
)
|
211
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "GaussianNB":
|
205
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "GaussianNB":
|
212
206
|
"""Fit Gaussian Naive Bayes according to X, y
|
213
207
|
For more details on this function, see [sklearn.naive_bayes.GaussianNB.fit]
|
214
208
|
(https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html#sklearn.naive_bayes.GaussianNB.fit)
|
@@ -235,12 +229,14 @@ class GaussianNB(BaseTransformer):
|
|
235
229
|
|
236
230
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
237
231
|
|
238
|
-
|
232
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
239
233
|
if SNOWML_SPROC_ENV in os.environ:
|
240
234
|
statement_params = telemetry.get_function_usage_statement_params(
|
241
235
|
project=_PROJECT,
|
242
236
|
subproject=_SUBPROJECT,
|
243
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
237
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
238
|
+
inspect.currentframe(), GaussianNB.__class__.__name__
|
239
|
+
),
|
244
240
|
api_calls=[Session.call],
|
245
241
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
246
242
|
)
|
@@ -261,7 +257,7 @@ class GaussianNB(BaseTransformer):
|
|
261
257
|
)
|
262
258
|
self._sklearn_object = model_trainer.train()
|
263
259
|
self._is_fitted = True
|
264
|
-
self.
|
260
|
+
self._generate_model_signatures(dataset)
|
265
261
|
return self
|
266
262
|
|
267
263
|
def _batch_inference_validate_snowpark(
|
@@ -337,7 +333,9 @@ class GaussianNB(BaseTransformer):
|
|
337
333
|
# when it is classifier, infer the datatype from label columns
|
338
334
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
339
335
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
340
|
-
label_cols_signatures = [
|
336
|
+
label_cols_signatures = [
|
337
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
338
|
+
]
|
341
339
|
if len(label_cols_signatures) == 0:
|
342
340
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
343
341
|
raise exceptions.SnowflakeMLException(
|
@@ -345,25 +343,22 @@ class GaussianNB(BaseTransformer):
|
|
345
343
|
original_exception=ValueError(error_str),
|
346
344
|
)
|
347
345
|
|
348
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
349
|
-
label_cols_signatures[0].as_snowpark_type()
|
350
|
-
)
|
346
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
351
347
|
|
352
348
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
353
|
-
assert isinstance(
|
349
|
+
assert isinstance(
|
350
|
+
dataset._session, Session
|
351
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
354
352
|
|
355
353
|
transform_kwargs = dict(
|
356
|
-
session
|
357
|
-
dependencies
|
358
|
-
drop_input_cols
|
359
|
-
expected_output_cols_type
|
354
|
+
session=dataset._session,
|
355
|
+
dependencies=self._deps,
|
356
|
+
drop_input_cols=self._drop_input_cols,
|
357
|
+
expected_output_cols_type=expected_type_inferred,
|
360
358
|
)
|
361
359
|
|
362
360
|
elif isinstance(dataset, pd.DataFrame):
|
363
|
-
transform_kwargs = dict(
|
364
|
-
snowpark_input_cols = self._snowpark_cols,
|
365
|
-
drop_input_cols = self._drop_input_cols
|
366
|
-
)
|
361
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
367
362
|
|
368
363
|
transform_handlers = ModelTransformerBuilder.build(
|
369
364
|
dataset=dataset,
|
@@ -403,7 +398,7 @@ class GaussianNB(BaseTransformer):
|
|
403
398
|
Transformed dataset.
|
404
399
|
"""
|
405
400
|
super()._check_dataset_type(dataset)
|
406
|
-
inference_method="transform"
|
401
|
+
inference_method = "transform"
|
407
402
|
|
408
403
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
409
404
|
# are specific to the type of dataset used.
|
@@ -440,17 +435,14 @@ class GaussianNB(BaseTransformer):
|
|
440
435
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
441
436
|
|
442
437
|
transform_kwargs = dict(
|
443
|
-
session
|
444
|
-
dependencies
|
445
|
-
drop_input_cols
|
446
|
-
expected_output_cols_type
|
438
|
+
session=dataset._session,
|
439
|
+
dependencies=self._deps,
|
440
|
+
drop_input_cols=self._drop_input_cols,
|
441
|
+
expected_output_cols_type=expected_dtype,
|
447
442
|
)
|
448
443
|
|
449
444
|
elif isinstance(dataset, pd.DataFrame):
|
450
|
-
transform_kwargs = dict(
|
451
|
-
snowpark_input_cols = self._snowpark_cols,
|
452
|
-
drop_input_cols = self._drop_input_cols
|
453
|
-
)
|
445
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
454
446
|
|
455
447
|
transform_handlers = ModelTransformerBuilder.build(
|
456
448
|
dataset=dataset,
|
@@ -469,7 +461,11 @@ class GaussianNB(BaseTransformer):
|
|
469
461
|
return output_df
|
470
462
|
|
471
463
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
472
|
-
def fit_predict(
|
464
|
+
def fit_predict(
|
465
|
+
self,
|
466
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
467
|
+
output_cols_prefix: str = "fit_predict_",
|
468
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
473
469
|
""" Method not supported for this class.
|
474
470
|
|
475
471
|
|
@@ -494,7 +490,9 @@ class GaussianNB(BaseTransformer):
|
|
494
490
|
)
|
495
491
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
496
492
|
drop_input_cols=self._drop_input_cols,
|
497
|
-
expected_output_cols_list=
|
493
|
+
expected_output_cols_list=(
|
494
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
495
|
+
),
|
498
496
|
)
|
499
497
|
self._sklearn_object = fitted_estimator
|
500
498
|
self._is_fitted = True
|
@@ -511,6 +509,62 @@ class GaussianNB(BaseTransformer):
|
|
511
509
|
assert self._sklearn_object is not None
|
512
510
|
return self._sklearn_object.embedding_
|
513
511
|
|
512
|
+
|
513
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
514
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
515
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
516
|
+
"""
|
517
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
518
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
519
|
+
if output_cols:
|
520
|
+
output_cols = [
|
521
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
522
|
+
for c in output_cols
|
523
|
+
]
|
524
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
525
|
+
output_cols = [output_cols_prefix]
|
526
|
+
elif self._sklearn_object is not None:
|
527
|
+
classes = self._sklearn_object.classes_
|
528
|
+
if isinstance(classes, numpy.ndarray):
|
529
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
530
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
531
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
532
|
+
output_cols = []
|
533
|
+
for i, cl in enumerate(classes):
|
534
|
+
# For binary classification, there is only one output column for each class
|
535
|
+
# ndarray as the two classes are complementary.
|
536
|
+
if len(cl) == 2:
|
537
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
538
|
+
else:
|
539
|
+
output_cols.extend([
|
540
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
541
|
+
])
|
542
|
+
else:
|
543
|
+
output_cols = []
|
544
|
+
|
545
|
+
# Make sure column names are valid snowflake identifiers.
|
546
|
+
assert output_cols is not None # Make MyPy happy
|
547
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
548
|
+
|
549
|
+
return rv
|
550
|
+
|
551
|
+
def _align_expected_output_names(
|
552
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
553
|
+
) -> List[str]:
|
554
|
+
# in case the inferred output column names dimension is different
|
555
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
556
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
557
|
+
output_df_columns = list(output_df_pd.columns)
|
558
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
559
|
+
if self.sample_weight_col:
|
560
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
561
|
+
# if the dimension of inferred output column names is correct; use it
|
562
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
563
|
+
return expected_output_cols_list
|
564
|
+
# otherwise, use the sklearn estimator's output
|
565
|
+
else:
|
566
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
567
|
+
|
514
568
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
515
569
|
@telemetry.send_api_usage_telemetry(
|
516
570
|
project=_PROJECT,
|
@@ -543,24 +597,28 @@ class GaussianNB(BaseTransformer):
|
|
543
597
|
# are specific to the type of dataset used.
|
544
598
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
545
599
|
|
600
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
601
|
+
|
546
602
|
if isinstance(dataset, DataFrame):
|
547
603
|
self._deps = self._batch_inference_validate_snowpark(
|
548
604
|
dataset=dataset,
|
549
605
|
inference_method=inference_method,
|
550
606
|
)
|
551
|
-
assert isinstance(
|
607
|
+
assert isinstance(
|
608
|
+
dataset._session, Session
|
609
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
552
610
|
transform_kwargs = dict(
|
553
611
|
session=dataset._session,
|
554
612
|
dependencies=self._deps,
|
555
|
-
drop_input_cols
|
613
|
+
drop_input_cols=self._drop_input_cols,
|
556
614
|
expected_output_cols_type="float",
|
557
615
|
)
|
616
|
+
expected_output_cols = self._align_expected_output_names(
|
617
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
618
|
+
)
|
558
619
|
|
559
620
|
elif isinstance(dataset, pd.DataFrame):
|
560
|
-
transform_kwargs = dict(
|
561
|
-
snowpark_input_cols = self._snowpark_cols,
|
562
|
-
drop_input_cols = self._drop_input_cols
|
563
|
-
)
|
621
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
564
622
|
|
565
623
|
transform_handlers = ModelTransformerBuilder.build(
|
566
624
|
dataset=dataset,
|
@@ -572,7 +630,7 @@ class GaussianNB(BaseTransformer):
|
|
572
630
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
573
631
|
inference_method=inference_method,
|
574
632
|
input_cols=self.input_cols,
|
575
|
-
expected_output_cols=
|
633
|
+
expected_output_cols=expected_output_cols,
|
576
634
|
**transform_kwargs
|
577
635
|
)
|
578
636
|
return output_df
|
@@ -604,7 +662,8 @@ class GaussianNB(BaseTransformer):
|
|
604
662
|
Output dataset with log probability of the sample for each class in the model.
|
605
663
|
"""
|
606
664
|
super()._check_dataset_type(dataset)
|
607
|
-
inference_method="predict_log_proba"
|
665
|
+
inference_method = "predict_log_proba"
|
666
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
608
667
|
|
609
668
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
610
669
|
# are specific to the type of dataset used.
|
@@ -615,18 +674,20 @@ class GaussianNB(BaseTransformer):
|
|
615
674
|
dataset=dataset,
|
616
675
|
inference_method=inference_method,
|
617
676
|
)
|
618
|
-
assert isinstance(
|
677
|
+
assert isinstance(
|
678
|
+
dataset._session, Session
|
679
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
619
680
|
transform_kwargs = dict(
|
620
681
|
session=dataset._session,
|
621
682
|
dependencies=self._deps,
|
622
|
-
drop_input_cols
|
683
|
+
drop_input_cols=self._drop_input_cols,
|
623
684
|
expected_output_cols_type="float",
|
624
685
|
)
|
686
|
+
expected_output_cols = self._align_expected_output_names(
|
687
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
688
|
+
)
|
625
689
|
elif isinstance(dataset, pd.DataFrame):
|
626
|
-
transform_kwargs = dict(
|
627
|
-
snowpark_input_cols = self._snowpark_cols,
|
628
|
-
drop_input_cols = self._drop_input_cols
|
629
|
-
)
|
690
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
630
691
|
|
631
692
|
transform_handlers = ModelTransformerBuilder.build(
|
632
693
|
dataset=dataset,
|
@@ -639,7 +700,7 @@ class GaussianNB(BaseTransformer):
|
|
639
700
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
640
701
|
inference_method=inference_method,
|
641
702
|
input_cols=self.input_cols,
|
642
|
-
expected_output_cols=
|
703
|
+
expected_output_cols=expected_output_cols,
|
643
704
|
**transform_kwargs
|
644
705
|
)
|
645
706
|
return output_df
|
@@ -665,30 +726,34 @@ class GaussianNB(BaseTransformer):
|
|
665
726
|
Output dataset with results of the decision function for the samples in input dataset.
|
666
727
|
"""
|
667
728
|
super()._check_dataset_type(dataset)
|
668
|
-
inference_method="decision_function"
|
729
|
+
inference_method = "decision_function"
|
669
730
|
|
670
731
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
671
732
|
# are specific to the type of dataset used.
|
672
733
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
673
734
|
|
735
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
736
|
+
|
674
737
|
if isinstance(dataset, DataFrame):
|
675
738
|
self._deps = self._batch_inference_validate_snowpark(
|
676
739
|
dataset=dataset,
|
677
740
|
inference_method=inference_method,
|
678
741
|
)
|
679
|
-
assert isinstance(
|
742
|
+
assert isinstance(
|
743
|
+
dataset._session, Session
|
744
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
680
745
|
transform_kwargs = dict(
|
681
746
|
session=dataset._session,
|
682
747
|
dependencies=self._deps,
|
683
|
-
drop_input_cols
|
748
|
+
drop_input_cols=self._drop_input_cols,
|
684
749
|
expected_output_cols_type="float",
|
685
750
|
)
|
751
|
+
expected_output_cols = self._align_expected_output_names(
|
752
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
753
|
+
)
|
686
754
|
|
687
755
|
elif isinstance(dataset, pd.DataFrame):
|
688
|
-
transform_kwargs = dict(
|
689
|
-
snowpark_input_cols = self._snowpark_cols,
|
690
|
-
drop_input_cols = self._drop_input_cols
|
691
|
-
)
|
756
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
692
757
|
|
693
758
|
transform_handlers = ModelTransformerBuilder.build(
|
694
759
|
dataset=dataset,
|
@@ -701,7 +766,7 @@ class GaussianNB(BaseTransformer):
|
|
701
766
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
702
767
|
inference_method=inference_method,
|
703
768
|
input_cols=self.input_cols,
|
704
|
-
expected_output_cols=
|
769
|
+
expected_output_cols=expected_output_cols,
|
705
770
|
**transform_kwargs
|
706
771
|
)
|
707
772
|
return output_df
|
@@ -730,12 +795,14 @@ class GaussianNB(BaseTransformer):
|
|
730
795
|
Output dataset with probability of the sample for each class in the model.
|
731
796
|
"""
|
732
797
|
super()._check_dataset_type(dataset)
|
733
|
-
inference_method="score_samples"
|
798
|
+
inference_method = "score_samples"
|
734
799
|
|
735
800
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
736
801
|
# are specific to the type of dataset used.
|
737
802
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
738
803
|
|
804
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
805
|
+
|
739
806
|
if isinstance(dataset, DataFrame):
|
740
807
|
self._deps = self._batch_inference_validate_snowpark(
|
741
808
|
dataset=dataset,
|
@@ -748,6 +815,9 @@ class GaussianNB(BaseTransformer):
|
|
748
815
|
drop_input_cols = self._drop_input_cols,
|
749
816
|
expected_output_cols_type="float",
|
750
817
|
)
|
818
|
+
expected_output_cols = self._align_expected_output_names(
|
819
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
820
|
+
)
|
751
821
|
|
752
822
|
elif isinstance(dataset, pd.DataFrame):
|
753
823
|
transform_kwargs = dict(
|
@@ -766,7 +836,7 @@ class GaussianNB(BaseTransformer):
|
|
766
836
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
767
837
|
inference_method=inference_method,
|
768
838
|
input_cols=self.input_cols,
|
769
|
-
expected_output_cols=
|
839
|
+
expected_output_cols=expected_output_cols,
|
770
840
|
**transform_kwargs
|
771
841
|
)
|
772
842
|
return output_df
|
@@ -913,50 +983,84 @@ class GaussianNB(BaseTransformer):
|
|
913
983
|
)
|
914
984
|
return output_df
|
915
985
|
|
986
|
+
|
987
|
+
|
988
|
+
def to_sklearn(self) -> Any:
|
989
|
+
"""Get sklearn.naive_bayes.GaussianNB object.
|
990
|
+
"""
|
991
|
+
if self._sklearn_object is None:
|
992
|
+
self._sklearn_object = self._create_sklearn_object()
|
993
|
+
return self._sklearn_object
|
994
|
+
|
995
|
+
def to_xgboost(self) -> Any:
|
996
|
+
raise exceptions.SnowflakeMLException(
|
997
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
998
|
+
original_exception=AttributeError(
|
999
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1000
|
+
"to_xgboost()",
|
1001
|
+
"to_sklearn()"
|
1002
|
+
)
|
1003
|
+
),
|
1004
|
+
)
|
1005
|
+
|
1006
|
+
def to_lightgbm(self) -> Any:
|
1007
|
+
raise exceptions.SnowflakeMLException(
|
1008
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1009
|
+
original_exception=AttributeError(
|
1010
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1011
|
+
"to_lightgbm()",
|
1012
|
+
"to_sklearn()"
|
1013
|
+
)
|
1014
|
+
),
|
1015
|
+
)
|
916
1016
|
|
917
|
-
def
|
1017
|
+
def _get_dependencies(self) -> List[str]:
|
1018
|
+
return self._deps
|
1019
|
+
|
1020
|
+
|
1021
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
918
1022
|
self._model_signature_dict = dict()
|
919
1023
|
|
920
1024
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
921
1025
|
|
922
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1026
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
923
1027
|
outputs: List[BaseFeatureSpec] = []
|
924
1028
|
if hasattr(self, "predict"):
|
925
1029
|
# keep mypy happy
|
926
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1030
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
927
1031
|
# For classifier, the type of predict is the same as the type of label
|
928
|
-
if self._sklearn_object._estimator_type ==
|
929
|
-
|
1032
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1033
|
+
# label columns is the desired type for output
|
930
1034
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
931
1035
|
# rename the output columns
|
932
1036
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
933
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
934
|
-
|
935
|
-
|
1037
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1038
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1039
|
+
)
|
936
1040
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
937
1041
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
938
|
-
# Clusterer returns int64 cluster labels.
|
1042
|
+
# Clusterer returns int64 cluster labels.
|
939
1043
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
940
1044
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
941
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
942
|
-
|
943
|
-
|
944
|
-
|
1045
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1046
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1047
|
+
)
|
1048
|
+
|
945
1049
|
# For regressor, the type of predict is float64
|
946
|
-
elif self._sklearn_object._estimator_type ==
|
1050
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
947
1051
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
948
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
949
|
-
|
950
|
-
|
951
|
-
|
1052
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1053
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1054
|
+
)
|
1055
|
+
|
952
1056
|
for prob_func in PROB_FUNCTIONS:
|
953
1057
|
if hasattr(self, prob_func):
|
954
1058
|
output_cols_prefix: str = f"{prob_func}_"
|
955
1059
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
956
1060
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
957
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
958
|
-
|
959
|
-
|
1061
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1062
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1063
|
+
)
|
960
1064
|
|
961
1065
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
962
1066
|
items = list(self._model_signature_dict.items())
|
@@ -969,10 +1073,10 @@ class GaussianNB(BaseTransformer):
|
|
969
1073
|
"""Returns model signature of current class.
|
970
1074
|
|
971
1075
|
Raises:
|
972
|
-
|
1076
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
973
1077
|
|
974
1078
|
Returns:
|
975
|
-
Dict
|
1079
|
+
Dict with each method and its input output signature
|
976
1080
|
"""
|
977
1081
|
if self._model_signature_dict is None:
|
978
1082
|
raise exceptions.SnowflakeMLException(
|
@@ -980,35 +1084,3 @@ class GaussianNB(BaseTransformer):
|
|
980
1084
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
981
1085
|
)
|
982
1086
|
return self._model_signature_dict
|
983
|
-
|
984
|
-
def to_sklearn(self) -> Any:
|
985
|
-
"""Get sklearn.naive_bayes.GaussianNB object.
|
986
|
-
"""
|
987
|
-
if self._sklearn_object is None:
|
988
|
-
self._sklearn_object = self._create_sklearn_object()
|
989
|
-
return self._sklearn_object
|
990
|
-
|
991
|
-
def to_xgboost(self) -> Any:
|
992
|
-
raise exceptions.SnowflakeMLException(
|
993
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
994
|
-
original_exception=AttributeError(
|
995
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
996
|
-
"to_xgboost()",
|
997
|
-
"to_sklearn()"
|
998
|
-
)
|
999
|
-
),
|
1000
|
-
)
|
1001
|
-
|
1002
|
-
def to_lightgbm(self) -> Any:
|
1003
|
-
raise exceptions.SnowflakeMLException(
|
1004
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1005
|
-
original_exception=AttributeError(
|
1006
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1007
|
-
"to_lightgbm()",
|
1008
|
-
"to_sklearn()"
|
1009
|
-
)
|
1010
|
-
),
|
1011
|
-
)
|
1012
|
-
|
1013
|
-
def _get_dependencies(self) -> List[str]:
|
1014
|
-
return self._deps
|