snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -213,12 +212,7 @@ class NearestCentroid(BaseTransformer):
|
|
213
212
|
)
|
214
213
|
return selected_cols
|
215
214
|
|
216
|
-
|
217
|
-
project=_PROJECT,
|
218
|
-
subproject=_SUBPROJECT,
|
219
|
-
custom_tags=dict([("autogen", True)]),
|
220
|
-
)
|
221
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "NearestCentroid":
|
215
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "NearestCentroid":
|
222
216
|
"""Fit the NearestCentroid model according to the given training data
|
223
217
|
For more details on this function, see [sklearn.neighbors.NearestCentroid.fit]
|
224
218
|
(https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestCentroid.html#sklearn.neighbors.NearestCentroid.fit)
|
@@ -245,12 +239,14 @@ class NearestCentroid(BaseTransformer):
|
|
245
239
|
|
246
240
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
247
241
|
|
248
|
-
|
242
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
249
243
|
if SNOWML_SPROC_ENV in os.environ:
|
250
244
|
statement_params = telemetry.get_function_usage_statement_params(
|
251
245
|
project=_PROJECT,
|
252
246
|
subproject=_SUBPROJECT,
|
253
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
247
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
248
|
+
inspect.currentframe(), NearestCentroid.__class__.__name__
|
249
|
+
),
|
254
250
|
api_calls=[Session.call],
|
255
251
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
256
252
|
)
|
@@ -271,7 +267,7 @@ class NearestCentroid(BaseTransformer):
|
|
271
267
|
)
|
272
268
|
self._sklearn_object = model_trainer.train()
|
273
269
|
self._is_fitted = True
|
274
|
-
self.
|
270
|
+
self._generate_model_signatures(dataset)
|
275
271
|
return self
|
276
272
|
|
277
273
|
def _batch_inference_validate_snowpark(
|
@@ -347,7 +343,9 @@ class NearestCentroid(BaseTransformer):
|
|
347
343
|
# when it is classifier, infer the datatype from label columns
|
348
344
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
349
345
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
350
|
-
label_cols_signatures = [
|
346
|
+
label_cols_signatures = [
|
347
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
348
|
+
]
|
351
349
|
if len(label_cols_signatures) == 0:
|
352
350
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
353
351
|
raise exceptions.SnowflakeMLException(
|
@@ -355,25 +353,22 @@ class NearestCentroid(BaseTransformer):
|
|
355
353
|
original_exception=ValueError(error_str),
|
356
354
|
)
|
357
355
|
|
358
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
359
|
-
label_cols_signatures[0].as_snowpark_type()
|
360
|
-
)
|
356
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
361
357
|
|
362
358
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
363
|
-
assert isinstance(
|
359
|
+
assert isinstance(
|
360
|
+
dataset._session, Session
|
361
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
364
362
|
|
365
363
|
transform_kwargs = dict(
|
366
|
-
session
|
367
|
-
dependencies
|
368
|
-
drop_input_cols
|
369
|
-
expected_output_cols_type
|
364
|
+
session=dataset._session,
|
365
|
+
dependencies=self._deps,
|
366
|
+
drop_input_cols=self._drop_input_cols,
|
367
|
+
expected_output_cols_type=expected_type_inferred,
|
370
368
|
)
|
371
369
|
|
372
370
|
elif isinstance(dataset, pd.DataFrame):
|
373
|
-
transform_kwargs = dict(
|
374
|
-
snowpark_input_cols = self._snowpark_cols,
|
375
|
-
drop_input_cols = self._drop_input_cols
|
376
|
-
)
|
371
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
377
372
|
|
378
373
|
transform_handlers = ModelTransformerBuilder.build(
|
379
374
|
dataset=dataset,
|
@@ -413,7 +408,7 @@ class NearestCentroid(BaseTransformer):
|
|
413
408
|
Transformed dataset.
|
414
409
|
"""
|
415
410
|
super()._check_dataset_type(dataset)
|
416
|
-
inference_method="transform"
|
411
|
+
inference_method = "transform"
|
417
412
|
|
418
413
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
419
414
|
# are specific to the type of dataset used.
|
@@ -450,17 +445,14 @@ class NearestCentroid(BaseTransformer):
|
|
450
445
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
451
446
|
|
452
447
|
transform_kwargs = dict(
|
453
|
-
session
|
454
|
-
dependencies
|
455
|
-
drop_input_cols
|
456
|
-
expected_output_cols_type
|
448
|
+
session=dataset._session,
|
449
|
+
dependencies=self._deps,
|
450
|
+
drop_input_cols=self._drop_input_cols,
|
451
|
+
expected_output_cols_type=expected_dtype,
|
457
452
|
)
|
458
453
|
|
459
454
|
elif isinstance(dataset, pd.DataFrame):
|
460
|
-
transform_kwargs = dict(
|
461
|
-
snowpark_input_cols = self._snowpark_cols,
|
462
|
-
drop_input_cols = self._drop_input_cols
|
463
|
-
)
|
455
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
464
456
|
|
465
457
|
transform_handlers = ModelTransformerBuilder.build(
|
466
458
|
dataset=dataset,
|
@@ -479,7 +471,11 @@ class NearestCentroid(BaseTransformer):
|
|
479
471
|
return output_df
|
480
472
|
|
481
473
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
482
|
-
def fit_predict(
|
474
|
+
def fit_predict(
|
475
|
+
self,
|
476
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
477
|
+
output_cols_prefix: str = "fit_predict_",
|
478
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
483
479
|
""" Method not supported for this class.
|
484
480
|
|
485
481
|
|
@@ -504,7 +500,9 @@ class NearestCentroid(BaseTransformer):
|
|
504
500
|
)
|
505
501
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
506
502
|
drop_input_cols=self._drop_input_cols,
|
507
|
-
expected_output_cols_list=
|
503
|
+
expected_output_cols_list=(
|
504
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
505
|
+
),
|
508
506
|
)
|
509
507
|
self._sklearn_object = fitted_estimator
|
510
508
|
self._is_fitted = True
|
@@ -521,6 +519,62 @@ class NearestCentroid(BaseTransformer):
|
|
521
519
|
assert self._sklearn_object is not None
|
522
520
|
return self._sklearn_object.embedding_
|
523
521
|
|
522
|
+
|
523
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
524
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
525
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
526
|
+
"""
|
527
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
528
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
529
|
+
if output_cols:
|
530
|
+
output_cols = [
|
531
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
532
|
+
for c in output_cols
|
533
|
+
]
|
534
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
535
|
+
output_cols = [output_cols_prefix]
|
536
|
+
elif self._sklearn_object is not None:
|
537
|
+
classes = self._sklearn_object.classes_
|
538
|
+
if isinstance(classes, numpy.ndarray):
|
539
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
540
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
541
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
542
|
+
output_cols = []
|
543
|
+
for i, cl in enumerate(classes):
|
544
|
+
# For binary classification, there is only one output column for each class
|
545
|
+
# ndarray as the two classes are complementary.
|
546
|
+
if len(cl) == 2:
|
547
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
548
|
+
else:
|
549
|
+
output_cols.extend([
|
550
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
551
|
+
])
|
552
|
+
else:
|
553
|
+
output_cols = []
|
554
|
+
|
555
|
+
# Make sure column names are valid snowflake identifiers.
|
556
|
+
assert output_cols is not None # Make MyPy happy
|
557
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
558
|
+
|
559
|
+
return rv
|
560
|
+
|
561
|
+
def _align_expected_output_names(
|
562
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
563
|
+
) -> List[str]:
|
564
|
+
# in case the inferred output column names dimension is different
|
565
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
566
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
567
|
+
output_df_columns = list(output_df_pd.columns)
|
568
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
569
|
+
if self.sample_weight_col:
|
570
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
571
|
+
# if the dimension of inferred output column names is correct; use it
|
572
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
573
|
+
return expected_output_cols_list
|
574
|
+
# otherwise, use the sklearn estimator's output
|
575
|
+
else:
|
576
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
577
|
+
|
524
578
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
525
579
|
@telemetry.send_api_usage_telemetry(
|
526
580
|
project=_PROJECT,
|
@@ -551,24 +605,28 @@ class NearestCentroid(BaseTransformer):
|
|
551
605
|
# are specific to the type of dataset used.
|
552
606
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
553
607
|
|
608
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
609
|
+
|
554
610
|
if isinstance(dataset, DataFrame):
|
555
611
|
self._deps = self._batch_inference_validate_snowpark(
|
556
612
|
dataset=dataset,
|
557
613
|
inference_method=inference_method,
|
558
614
|
)
|
559
|
-
assert isinstance(
|
615
|
+
assert isinstance(
|
616
|
+
dataset._session, Session
|
617
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
560
618
|
transform_kwargs = dict(
|
561
619
|
session=dataset._session,
|
562
620
|
dependencies=self._deps,
|
563
|
-
drop_input_cols
|
621
|
+
drop_input_cols=self._drop_input_cols,
|
564
622
|
expected_output_cols_type="float",
|
565
623
|
)
|
624
|
+
expected_output_cols = self._align_expected_output_names(
|
625
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
626
|
+
)
|
566
627
|
|
567
628
|
elif isinstance(dataset, pd.DataFrame):
|
568
|
-
transform_kwargs = dict(
|
569
|
-
snowpark_input_cols = self._snowpark_cols,
|
570
|
-
drop_input_cols = self._drop_input_cols
|
571
|
-
)
|
629
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
572
630
|
|
573
631
|
transform_handlers = ModelTransformerBuilder.build(
|
574
632
|
dataset=dataset,
|
@@ -580,7 +638,7 @@ class NearestCentroid(BaseTransformer):
|
|
580
638
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
581
639
|
inference_method=inference_method,
|
582
640
|
input_cols=self.input_cols,
|
583
|
-
expected_output_cols=
|
641
|
+
expected_output_cols=expected_output_cols,
|
584
642
|
**transform_kwargs
|
585
643
|
)
|
586
644
|
return output_df
|
@@ -610,7 +668,8 @@ class NearestCentroid(BaseTransformer):
|
|
610
668
|
Output dataset with log probability of the sample for each class in the model.
|
611
669
|
"""
|
612
670
|
super()._check_dataset_type(dataset)
|
613
|
-
inference_method="predict_log_proba"
|
671
|
+
inference_method = "predict_log_proba"
|
672
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
614
673
|
|
615
674
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
616
675
|
# are specific to the type of dataset used.
|
@@ -621,18 +680,20 @@ class NearestCentroid(BaseTransformer):
|
|
621
680
|
dataset=dataset,
|
622
681
|
inference_method=inference_method,
|
623
682
|
)
|
624
|
-
assert isinstance(
|
683
|
+
assert isinstance(
|
684
|
+
dataset._session, Session
|
685
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
625
686
|
transform_kwargs = dict(
|
626
687
|
session=dataset._session,
|
627
688
|
dependencies=self._deps,
|
628
|
-
drop_input_cols
|
689
|
+
drop_input_cols=self._drop_input_cols,
|
629
690
|
expected_output_cols_type="float",
|
630
691
|
)
|
692
|
+
expected_output_cols = self._align_expected_output_names(
|
693
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
694
|
+
)
|
631
695
|
elif isinstance(dataset, pd.DataFrame):
|
632
|
-
transform_kwargs = dict(
|
633
|
-
snowpark_input_cols = self._snowpark_cols,
|
634
|
-
drop_input_cols = self._drop_input_cols
|
635
|
-
)
|
696
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
636
697
|
|
637
698
|
transform_handlers = ModelTransformerBuilder.build(
|
638
699
|
dataset=dataset,
|
@@ -645,7 +706,7 @@ class NearestCentroid(BaseTransformer):
|
|
645
706
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
646
707
|
inference_method=inference_method,
|
647
708
|
input_cols=self.input_cols,
|
648
|
-
expected_output_cols=
|
709
|
+
expected_output_cols=expected_output_cols,
|
649
710
|
**transform_kwargs
|
650
711
|
)
|
651
712
|
return output_df
|
@@ -671,30 +732,34 @@ class NearestCentroid(BaseTransformer):
|
|
671
732
|
Output dataset with results of the decision function for the samples in input dataset.
|
672
733
|
"""
|
673
734
|
super()._check_dataset_type(dataset)
|
674
|
-
inference_method="decision_function"
|
735
|
+
inference_method = "decision_function"
|
675
736
|
|
676
737
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
677
738
|
# are specific to the type of dataset used.
|
678
739
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
679
740
|
|
741
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
742
|
+
|
680
743
|
if isinstance(dataset, DataFrame):
|
681
744
|
self._deps = self._batch_inference_validate_snowpark(
|
682
745
|
dataset=dataset,
|
683
746
|
inference_method=inference_method,
|
684
747
|
)
|
685
|
-
assert isinstance(
|
748
|
+
assert isinstance(
|
749
|
+
dataset._session, Session
|
750
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
686
751
|
transform_kwargs = dict(
|
687
752
|
session=dataset._session,
|
688
753
|
dependencies=self._deps,
|
689
|
-
drop_input_cols
|
754
|
+
drop_input_cols=self._drop_input_cols,
|
690
755
|
expected_output_cols_type="float",
|
691
756
|
)
|
757
|
+
expected_output_cols = self._align_expected_output_names(
|
758
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
759
|
+
)
|
692
760
|
|
693
761
|
elif isinstance(dataset, pd.DataFrame):
|
694
|
-
transform_kwargs = dict(
|
695
|
-
snowpark_input_cols = self._snowpark_cols,
|
696
|
-
drop_input_cols = self._drop_input_cols
|
697
|
-
)
|
762
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
698
763
|
|
699
764
|
transform_handlers = ModelTransformerBuilder.build(
|
700
765
|
dataset=dataset,
|
@@ -707,7 +772,7 @@ class NearestCentroid(BaseTransformer):
|
|
707
772
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
708
773
|
inference_method=inference_method,
|
709
774
|
input_cols=self.input_cols,
|
710
|
-
expected_output_cols=
|
775
|
+
expected_output_cols=expected_output_cols,
|
711
776
|
**transform_kwargs
|
712
777
|
)
|
713
778
|
return output_df
|
@@ -736,12 +801,14 @@ class NearestCentroid(BaseTransformer):
|
|
736
801
|
Output dataset with probability of the sample for each class in the model.
|
737
802
|
"""
|
738
803
|
super()._check_dataset_type(dataset)
|
739
|
-
inference_method="score_samples"
|
804
|
+
inference_method = "score_samples"
|
740
805
|
|
741
806
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
742
807
|
# are specific to the type of dataset used.
|
743
808
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
744
809
|
|
810
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
811
|
+
|
745
812
|
if isinstance(dataset, DataFrame):
|
746
813
|
self._deps = self._batch_inference_validate_snowpark(
|
747
814
|
dataset=dataset,
|
@@ -754,6 +821,9 @@ class NearestCentroid(BaseTransformer):
|
|
754
821
|
drop_input_cols = self._drop_input_cols,
|
755
822
|
expected_output_cols_type="float",
|
756
823
|
)
|
824
|
+
expected_output_cols = self._align_expected_output_names(
|
825
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
826
|
+
)
|
757
827
|
|
758
828
|
elif isinstance(dataset, pd.DataFrame):
|
759
829
|
transform_kwargs = dict(
|
@@ -772,7 +842,7 @@ class NearestCentroid(BaseTransformer):
|
|
772
842
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
773
843
|
inference_method=inference_method,
|
774
844
|
input_cols=self.input_cols,
|
775
|
-
expected_output_cols=
|
845
|
+
expected_output_cols=expected_output_cols,
|
776
846
|
**transform_kwargs
|
777
847
|
)
|
778
848
|
return output_df
|
@@ -919,50 +989,84 @@ class NearestCentroid(BaseTransformer):
|
|
919
989
|
)
|
920
990
|
return output_df
|
921
991
|
|
992
|
+
|
993
|
+
|
994
|
+
def to_sklearn(self) -> Any:
|
995
|
+
"""Get sklearn.neighbors.NearestCentroid object.
|
996
|
+
"""
|
997
|
+
if self._sklearn_object is None:
|
998
|
+
self._sklearn_object = self._create_sklearn_object()
|
999
|
+
return self._sklearn_object
|
1000
|
+
|
1001
|
+
def to_xgboost(self) -> Any:
|
1002
|
+
raise exceptions.SnowflakeMLException(
|
1003
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1004
|
+
original_exception=AttributeError(
|
1005
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1006
|
+
"to_xgboost()",
|
1007
|
+
"to_sklearn()"
|
1008
|
+
)
|
1009
|
+
),
|
1010
|
+
)
|
1011
|
+
|
1012
|
+
def to_lightgbm(self) -> Any:
|
1013
|
+
raise exceptions.SnowflakeMLException(
|
1014
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1015
|
+
original_exception=AttributeError(
|
1016
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1017
|
+
"to_lightgbm()",
|
1018
|
+
"to_sklearn()"
|
1019
|
+
)
|
1020
|
+
),
|
1021
|
+
)
|
922
1022
|
|
923
|
-
def
|
1023
|
+
def _get_dependencies(self) -> List[str]:
|
1024
|
+
return self._deps
|
1025
|
+
|
1026
|
+
|
1027
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
924
1028
|
self._model_signature_dict = dict()
|
925
1029
|
|
926
1030
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
927
1031
|
|
928
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1032
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
929
1033
|
outputs: List[BaseFeatureSpec] = []
|
930
1034
|
if hasattr(self, "predict"):
|
931
1035
|
# keep mypy happy
|
932
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1036
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
933
1037
|
# For classifier, the type of predict is the same as the type of label
|
934
|
-
if self._sklearn_object._estimator_type ==
|
935
|
-
|
1038
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1039
|
+
# label columns is the desired type for output
|
936
1040
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
937
1041
|
# rename the output columns
|
938
1042
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
939
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
940
|
-
|
941
|
-
|
1043
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1044
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1045
|
+
)
|
942
1046
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
943
1047
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
944
|
-
# Clusterer returns int64 cluster labels.
|
1048
|
+
# Clusterer returns int64 cluster labels.
|
945
1049
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
946
1050
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
947
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
948
|
-
|
949
|
-
|
950
|
-
|
1051
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1052
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1053
|
+
)
|
1054
|
+
|
951
1055
|
# For regressor, the type of predict is float64
|
952
|
-
elif self._sklearn_object._estimator_type ==
|
1056
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
953
1057
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
954
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
955
|
-
|
956
|
-
|
957
|
-
|
1058
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1059
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1060
|
+
)
|
1061
|
+
|
958
1062
|
for prob_func in PROB_FUNCTIONS:
|
959
1063
|
if hasattr(self, prob_func):
|
960
1064
|
output_cols_prefix: str = f"{prob_func}_"
|
961
1065
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
962
1066
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
963
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
964
|
-
|
965
|
-
|
1067
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1068
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1069
|
+
)
|
966
1070
|
|
967
1071
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
968
1072
|
items = list(self._model_signature_dict.items())
|
@@ -975,10 +1079,10 @@ class NearestCentroid(BaseTransformer):
|
|
975
1079
|
"""Returns model signature of current class.
|
976
1080
|
|
977
1081
|
Raises:
|
978
|
-
|
1082
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
979
1083
|
|
980
1084
|
Returns:
|
981
|
-
Dict
|
1085
|
+
Dict with each method and its input output signature
|
982
1086
|
"""
|
983
1087
|
if self._model_signature_dict is None:
|
984
1088
|
raise exceptions.SnowflakeMLException(
|
@@ -986,35 +1090,3 @@ class NearestCentroid(BaseTransformer):
|
|
986
1090
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
987
1091
|
)
|
988
1092
|
return self._model_signature_dict
|
989
|
-
|
990
|
-
def to_sklearn(self) -> Any:
|
991
|
-
"""Get sklearn.neighbors.NearestCentroid object.
|
992
|
-
"""
|
993
|
-
if self._sklearn_object is None:
|
994
|
-
self._sklearn_object = self._create_sklearn_object()
|
995
|
-
return self._sklearn_object
|
996
|
-
|
997
|
-
def to_xgboost(self) -> Any:
|
998
|
-
raise exceptions.SnowflakeMLException(
|
999
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1000
|
-
original_exception=AttributeError(
|
1001
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1002
|
-
"to_xgboost()",
|
1003
|
-
"to_sklearn()"
|
1004
|
-
)
|
1005
|
-
),
|
1006
|
-
)
|
1007
|
-
|
1008
|
-
def to_lightgbm(self) -> Any:
|
1009
|
-
raise exceptions.SnowflakeMLException(
|
1010
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1011
|
-
original_exception=AttributeError(
|
1012
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1013
|
-
"to_lightgbm()",
|
1014
|
-
"to_sklearn()"
|
1015
|
-
)
|
1016
|
-
),
|
1017
|
-
)
|
1018
|
-
|
1019
|
-
def _get_dependencies(self) -> List[str]:
|
1020
|
-
return self._deps
|