snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -322,12 +321,7 @@ class TSNE(BaseTransformer):
|
|
322
321
|
)
|
323
322
|
return selected_cols
|
324
323
|
|
325
|
-
|
326
|
-
project=_PROJECT,
|
327
|
-
subproject=_SUBPROJECT,
|
328
|
-
custom_tags=dict([("autogen", True)]),
|
329
|
-
)
|
330
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "TSNE":
|
324
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "TSNE":
|
331
325
|
"""Fit X into an embedded space
|
332
326
|
For more details on this function, see [sklearn.manifold.TSNE.fit]
|
333
327
|
(https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html#sklearn.manifold.TSNE.fit)
|
@@ -354,12 +348,14 @@ class TSNE(BaseTransformer):
|
|
354
348
|
|
355
349
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
356
350
|
|
357
|
-
|
351
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
358
352
|
if SNOWML_SPROC_ENV in os.environ:
|
359
353
|
statement_params = telemetry.get_function_usage_statement_params(
|
360
354
|
project=_PROJECT,
|
361
355
|
subproject=_SUBPROJECT,
|
362
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
356
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
357
|
+
inspect.currentframe(), TSNE.__class__.__name__
|
358
|
+
),
|
363
359
|
api_calls=[Session.call],
|
364
360
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
365
361
|
)
|
@@ -380,7 +376,7 @@ class TSNE(BaseTransformer):
|
|
380
376
|
)
|
381
377
|
self._sklearn_object = model_trainer.train()
|
382
378
|
self._is_fitted = True
|
383
|
-
self.
|
379
|
+
self._generate_model_signatures(dataset)
|
384
380
|
return self
|
385
381
|
|
386
382
|
def _batch_inference_validate_snowpark(
|
@@ -454,7 +450,9 @@ class TSNE(BaseTransformer):
|
|
454
450
|
# when it is classifier, infer the datatype from label columns
|
455
451
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
456
452
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
457
|
-
label_cols_signatures = [
|
453
|
+
label_cols_signatures = [
|
454
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
455
|
+
]
|
458
456
|
if len(label_cols_signatures) == 0:
|
459
457
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
460
458
|
raise exceptions.SnowflakeMLException(
|
@@ -462,25 +460,22 @@ class TSNE(BaseTransformer):
|
|
462
460
|
original_exception=ValueError(error_str),
|
463
461
|
)
|
464
462
|
|
465
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
466
|
-
label_cols_signatures[0].as_snowpark_type()
|
467
|
-
)
|
463
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
468
464
|
|
469
465
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
470
|
-
assert isinstance(
|
466
|
+
assert isinstance(
|
467
|
+
dataset._session, Session
|
468
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
471
469
|
|
472
470
|
transform_kwargs = dict(
|
473
|
-
session
|
474
|
-
dependencies
|
475
|
-
drop_input_cols
|
476
|
-
expected_output_cols_type
|
471
|
+
session=dataset._session,
|
472
|
+
dependencies=self._deps,
|
473
|
+
drop_input_cols=self._drop_input_cols,
|
474
|
+
expected_output_cols_type=expected_type_inferred,
|
477
475
|
)
|
478
476
|
|
479
477
|
elif isinstance(dataset, pd.DataFrame):
|
480
|
-
transform_kwargs = dict(
|
481
|
-
snowpark_input_cols = self._snowpark_cols,
|
482
|
-
drop_input_cols = self._drop_input_cols
|
483
|
-
)
|
478
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
484
479
|
|
485
480
|
transform_handlers = ModelTransformerBuilder.build(
|
486
481
|
dataset=dataset,
|
@@ -520,7 +515,7 @@ class TSNE(BaseTransformer):
|
|
520
515
|
Transformed dataset.
|
521
516
|
"""
|
522
517
|
super()._check_dataset_type(dataset)
|
523
|
-
inference_method="transform"
|
518
|
+
inference_method = "transform"
|
524
519
|
|
525
520
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
526
521
|
# are specific to the type of dataset used.
|
@@ -557,17 +552,14 @@ class TSNE(BaseTransformer):
|
|
557
552
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
558
553
|
|
559
554
|
transform_kwargs = dict(
|
560
|
-
session
|
561
|
-
dependencies
|
562
|
-
drop_input_cols
|
563
|
-
expected_output_cols_type
|
555
|
+
session=dataset._session,
|
556
|
+
dependencies=self._deps,
|
557
|
+
drop_input_cols=self._drop_input_cols,
|
558
|
+
expected_output_cols_type=expected_dtype,
|
564
559
|
)
|
565
560
|
|
566
561
|
elif isinstance(dataset, pd.DataFrame):
|
567
|
-
transform_kwargs = dict(
|
568
|
-
snowpark_input_cols = self._snowpark_cols,
|
569
|
-
drop_input_cols = self._drop_input_cols
|
570
|
-
)
|
562
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
571
563
|
|
572
564
|
transform_handlers = ModelTransformerBuilder.build(
|
573
565
|
dataset=dataset,
|
@@ -586,7 +578,11 @@ class TSNE(BaseTransformer):
|
|
586
578
|
return output_df
|
587
579
|
|
588
580
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
589
|
-
def fit_predict(
|
581
|
+
def fit_predict(
|
582
|
+
self,
|
583
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
584
|
+
output_cols_prefix: str = "fit_predict_",
|
585
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
590
586
|
""" Method not supported for this class.
|
591
587
|
|
592
588
|
|
@@ -611,7 +607,9 @@ class TSNE(BaseTransformer):
|
|
611
607
|
)
|
612
608
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
613
609
|
drop_input_cols=self._drop_input_cols,
|
614
|
-
expected_output_cols_list=
|
610
|
+
expected_output_cols_list=(
|
611
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
612
|
+
),
|
615
613
|
)
|
616
614
|
self._sklearn_object = fitted_estimator
|
617
615
|
self._is_fitted = True
|
@@ -628,6 +626,62 @@ class TSNE(BaseTransformer):
|
|
628
626
|
assert self._sklearn_object is not None
|
629
627
|
return self._sklearn_object.embedding_
|
630
628
|
|
629
|
+
|
630
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
631
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
632
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
633
|
+
"""
|
634
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
635
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
636
|
+
if output_cols:
|
637
|
+
output_cols = [
|
638
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
639
|
+
for c in output_cols
|
640
|
+
]
|
641
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
642
|
+
output_cols = [output_cols_prefix]
|
643
|
+
elif self._sklearn_object is not None:
|
644
|
+
classes = self._sklearn_object.classes_
|
645
|
+
if isinstance(classes, numpy.ndarray):
|
646
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
647
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
648
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
649
|
+
output_cols = []
|
650
|
+
for i, cl in enumerate(classes):
|
651
|
+
# For binary classification, there is only one output column for each class
|
652
|
+
# ndarray as the two classes are complementary.
|
653
|
+
if len(cl) == 2:
|
654
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
655
|
+
else:
|
656
|
+
output_cols.extend([
|
657
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
658
|
+
])
|
659
|
+
else:
|
660
|
+
output_cols = []
|
661
|
+
|
662
|
+
# Make sure column names are valid snowflake identifiers.
|
663
|
+
assert output_cols is not None # Make MyPy happy
|
664
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
665
|
+
|
666
|
+
return rv
|
667
|
+
|
668
|
+
def _align_expected_output_names(
|
669
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
670
|
+
) -> List[str]:
|
671
|
+
# in case the inferred output column names dimension is different
|
672
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
673
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
674
|
+
output_df_columns = list(output_df_pd.columns)
|
675
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
676
|
+
if self.sample_weight_col:
|
677
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
678
|
+
# if the dimension of inferred output column names is correct; use it
|
679
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
680
|
+
return expected_output_cols_list
|
681
|
+
# otherwise, use the sklearn estimator's output
|
682
|
+
else:
|
683
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
684
|
+
|
631
685
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
632
686
|
@telemetry.send_api_usage_telemetry(
|
633
687
|
project=_PROJECT,
|
@@ -658,24 +712,28 @@ class TSNE(BaseTransformer):
|
|
658
712
|
# are specific to the type of dataset used.
|
659
713
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
660
714
|
|
715
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
716
|
+
|
661
717
|
if isinstance(dataset, DataFrame):
|
662
718
|
self._deps = self._batch_inference_validate_snowpark(
|
663
719
|
dataset=dataset,
|
664
720
|
inference_method=inference_method,
|
665
721
|
)
|
666
|
-
assert isinstance(
|
722
|
+
assert isinstance(
|
723
|
+
dataset._session, Session
|
724
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
667
725
|
transform_kwargs = dict(
|
668
726
|
session=dataset._session,
|
669
727
|
dependencies=self._deps,
|
670
|
-
drop_input_cols
|
728
|
+
drop_input_cols=self._drop_input_cols,
|
671
729
|
expected_output_cols_type="float",
|
672
730
|
)
|
731
|
+
expected_output_cols = self._align_expected_output_names(
|
732
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
733
|
+
)
|
673
734
|
|
674
735
|
elif isinstance(dataset, pd.DataFrame):
|
675
|
-
transform_kwargs = dict(
|
676
|
-
snowpark_input_cols = self._snowpark_cols,
|
677
|
-
drop_input_cols = self._drop_input_cols
|
678
|
-
)
|
736
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
679
737
|
|
680
738
|
transform_handlers = ModelTransformerBuilder.build(
|
681
739
|
dataset=dataset,
|
@@ -687,7 +745,7 @@ class TSNE(BaseTransformer):
|
|
687
745
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
688
746
|
inference_method=inference_method,
|
689
747
|
input_cols=self.input_cols,
|
690
|
-
expected_output_cols=
|
748
|
+
expected_output_cols=expected_output_cols,
|
691
749
|
**transform_kwargs
|
692
750
|
)
|
693
751
|
return output_df
|
@@ -717,7 +775,8 @@ class TSNE(BaseTransformer):
|
|
717
775
|
Output dataset with log probability of the sample for each class in the model.
|
718
776
|
"""
|
719
777
|
super()._check_dataset_type(dataset)
|
720
|
-
inference_method="predict_log_proba"
|
778
|
+
inference_method = "predict_log_proba"
|
779
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
721
780
|
|
722
781
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
723
782
|
# are specific to the type of dataset used.
|
@@ -728,18 +787,20 @@ class TSNE(BaseTransformer):
|
|
728
787
|
dataset=dataset,
|
729
788
|
inference_method=inference_method,
|
730
789
|
)
|
731
|
-
assert isinstance(
|
790
|
+
assert isinstance(
|
791
|
+
dataset._session, Session
|
792
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
732
793
|
transform_kwargs = dict(
|
733
794
|
session=dataset._session,
|
734
795
|
dependencies=self._deps,
|
735
|
-
drop_input_cols
|
796
|
+
drop_input_cols=self._drop_input_cols,
|
736
797
|
expected_output_cols_type="float",
|
737
798
|
)
|
799
|
+
expected_output_cols = self._align_expected_output_names(
|
800
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
801
|
+
)
|
738
802
|
elif isinstance(dataset, pd.DataFrame):
|
739
|
-
transform_kwargs = dict(
|
740
|
-
snowpark_input_cols = self._snowpark_cols,
|
741
|
-
drop_input_cols = self._drop_input_cols
|
742
|
-
)
|
803
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
743
804
|
|
744
805
|
transform_handlers = ModelTransformerBuilder.build(
|
745
806
|
dataset=dataset,
|
@@ -752,7 +813,7 @@ class TSNE(BaseTransformer):
|
|
752
813
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
753
814
|
inference_method=inference_method,
|
754
815
|
input_cols=self.input_cols,
|
755
|
-
expected_output_cols=
|
816
|
+
expected_output_cols=expected_output_cols,
|
756
817
|
**transform_kwargs
|
757
818
|
)
|
758
819
|
return output_df
|
@@ -778,30 +839,34 @@ class TSNE(BaseTransformer):
|
|
778
839
|
Output dataset with results of the decision function for the samples in input dataset.
|
779
840
|
"""
|
780
841
|
super()._check_dataset_type(dataset)
|
781
|
-
inference_method="decision_function"
|
842
|
+
inference_method = "decision_function"
|
782
843
|
|
783
844
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
784
845
|
# are specific to the type of dataset used.
|
785
846
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
786
847
|
|
848
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
849
|
+
|
787
850
|
if isinstance(dataset, DataFrame):
|
788
851
|
self._deps = self._batch_inference_validate_snowpark(
|
789
852
|
dataset=dataset,
|
790
853
|
inference_method=inference_method,
|
791
854
|
)
|
792
|
-
assert isinstance(
|
855
|
+
assert isinstance(
|
856
|
+
dataset._session, Session
|
857
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
793
858
|
transform_kwargs = dict(
|
794
859
|
session=dataset._session,
|
795
860
|
dependencies=self._deps,
|
796
|
-
drop_input_cols
|
861
|
+
drop_input_cols=self._drop_input_cols,
|
797
862
|
expected_output_cols_type="float",
|
798
863
|
)
|
864
|
+
expected_output_cols = self._align_expected_output_names(
|
865
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
866
|
+
)
|
799
867
|
|
800
868
|
elif isinstance(dataset, pd.DataFrame):
|
801
|
-
transform_kwargs = dict(
|
802
|
-
snowpark_input_cols = self._snowpark_cols,
|
803
|
-
drop_input_cols = self._drop_input_cols
|
804
|
-
)
|
869
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
805
870
|
|
806
871
|
transform_handlers = ModelTransformerBuilder.build(
|
807
872
|
dataset=dataset,
|
@@ -814,7 +879,7 @@ class TSNE(BaseTransformer):
|
|
814
879
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
815
880
|
inference_method=inference_method,
|
816
881
|
input_cols=self.input_cols,
|
817
|
-
expected_output_cols=
|
882
|
+
expected_output_cols=expected_output_cols,
|
818
883
|
**transform_kwargs
|
819
884
|
)
|
820
885
|
return output_df
|
@@ -843,12 +908,14 @@ class TSNE(BaseTransformer):
|
|
843
908
|
Output dataset with probability of the sample for each class in the model.
|
844
909
|
"""
|
845
910
|
super()._check_dataset_type(dataset)
|
846
|
-
inference_method="score_samples"
|
911
|
+
inference_method = "score_samples"
|
847
912
|
|
848
913
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
849
914
|
# are specific to the type of dataset used.
|
850
915
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
851
916
|
|
917
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
918
|
+
|
852
919
|
if isinstance(dataset, DataFrame):
|
853
920
|
self._deps = self._batch_inference_validate_snowpark(
|
854
921
|
dataset=dataset,
|
@@ -861,6 +928,9 @@ class TSNE(BaseTransformer):
|
|
861
928
|
drop_input_cols = self._drop_input_cols,
|
862
929
|
expected_output_cols_type="float",
|
863
930
|
)
|
931
|
+
expected_output_cols = self._align_expected_output_names(
|
932
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
933
|
+
)
|
864
934
|
|
865
935
|
elif isinstance(dataset, pd.DataFrame):
|
866
936
|
transform_kwargs = dict(
|
@@ -879,7 +949,7 @@ class TSNE(BaseTransformer):
|
|
879
949
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
880
950
|
inference_method=inference_method,
|
881
951
|
input_cols=self.input_cols,
|
882
|
-
expected_output_cols=
|
952
|
+
expected_output_cols=expected_output_cols,
|
883
953
|
**transform_kwargs
|
884
954
|
)
|
885
955
|
return output_df
|
@@ -1024,50 +1094,84 @@ class TSNE(BaseTransformer):
|
|
1024
1094
|
)
|
1025
1095
|
return output_df
|
1026
1096
|
|
1097
|
+
|
1098
|
+
|
1099
|
+
def to_sklearn(self) -> Any:
|
1100
|
+
"""Get sklearn.manifold.TSNE object.
|
1101
|
+
"""
|
1102
|
+
if self._sklearn_object is None:
|
1103
|
+
self._sklearn_object = self._create_sklearn_object()
|
1104
|
+
return self._sklearn_object
|
1105
|
+
|
1106
|
+
def to_xgboost(self) -> Any:
|
1107
|
+
raise exceptions.SnowflakeMLException(
|
1108
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1109
|
+
original_exception=AttributeError(
|
1110
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1111
|
+
"to_xgboost()",
|
1112
|
+
"to_sklearn()"
|
1113
|
+
)
|
1114
|
+
),
|
1115
|
+
)
|
1116
|
+
|
1117
|
+
def to_lightgbm(self) -> Any:
|
1118
|
+
raise exceptions.SnowflakeMLException(
|
1119
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1120
|
+
original_exception=AttributeError(
|
1121
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1122
|
+
"to_lightgbm()",
|
1123
|
+
"to_sklearn()"
|
1124
|
+
)
|
1125
|
+
),
|
1126
|
+
)
|
1027
1127
|
|
1028
|
-
def
|
1128
|
+
def _get_dependencies(self) -> List[str]:
|
1129
|
+
return self._deps
|
1130
|
+
|
1131
|
+
|
1132
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
1029
1133
|
self._model_signature_dict = dict()
|
1030
1134
|
|
1031
1135
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
1032
1136
|
|
1033
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1137
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
1034
1138
|
outputs: List[BaseFeatureSpec] = []
|
1035
1139
|
if hasattr(self, "predict"):
|
1036
1140
|
# keep mypy happy
|
1037
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1141
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1038
1142
|
# For classifier, the type of predict is the same as the type of label
|
1039
|
-
if self._sklearn_object._estimator_type ==
|
1040
|
-
|
1143
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1144
|
+
# label columns is the desired type for output
|
1041
1145
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1042
1146
|
# rename the output columns
|
1043
1147
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1044
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1045
|
-
|
1046
|
-
|
1148
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1149
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1150
|
+
)
|
1047
1151
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1048
1152
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1049
|
-
# Clusterer returns int64 cluster labels.
|
1153
|
+
# Clusterer returns int64 cluster labels.
|
1050
1154
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1051
1155
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1052
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1156
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1157
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1158
|
+
)
|
1159
|
+
|
1056
1160
|
# For regressor, the type of predict is float64
|
1057
|
-
elif self._sklearn_object._estimator_type ==
|
1161
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
1058
1162
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1059
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1163
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1164
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1165
|
+
)
|
1166
|
+
|
1063
1167
|
for prob_func in PROB_FUNCTIONS:
|
1064
1168
|
if hasattr(self, prob_func):
|
1065
1169
|
output_cols_prefix: str = f"{prob_func}_"
|
1066
1170
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1067
1171
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1068
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
1069
|
-
|
1070
|
-
|
1172
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1173
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1174
|
+
)
|
1071
1175
|
|
1072
1176
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
1073
1177
|
items = list(self._model_signature_dict.items())
|
@@ -1080,10 +1184,10 @@ class TSNE(BaseTransformer):
|
|
1080
1184
|
"""Returns model signature of current class.
|
1081
1185
|
|
1082
1186
|
Raises:
|
1083
|
-
|
1187
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
1084
1188
|
|
1085
1189
|
Returns:
|
1086
|
-
Dict
|
1190
|
+
Dict with each method and its input output signature
|
1087
1191
|
"""
|
1088
1192
|
if self._model_signature_dict is None:
|
1089
1193
|
raise exceptions.SnowflakeMLException(
|
@@ -1091,35 +1195,3 @@ class TSNE(BaseTransformer):
|
|
1091
1195
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
1092
1196
|
)
|
1093
1197
|
return self._model_signature_dict
|
1094
|
-
|
1095
|
-
def to_sklearn(self) -> Any:
|
1096
|
-
"""Get sklearn.manifold.TSNE object.
|
1097
|
-
"""
|
1098
|
-
if self._sklearn_object is None:
|
1099
|
-
self._sklearn_object = self._create_sklearn_object()
|
1100
|
-
return self._sklearn_object
|
1101
|
-
|
1102
|
-
def to_xgboost(self) -> Any:
|
1103
|
-
raise exceptions.SnowflakeMLException(
|
1104
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1105
|
-
original_exception=AttributeError(
|
1106
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1107
|
-
"to_xgboost()",
|
1108
|
-
"to_sklearn()"
|
1109
|
-
)
|
1110
|
-
),
|
1111
|
-
)
|
1112
|
-
|
1113
|
-
def to_lightgbm(self) -> Any:
|
1114
|
-
raise exceptions.SnowflakeMLException(
|
1115
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1116
|
-
original_exception=AttributeError(
|
1117
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1118
|
-
"to_lightgbm()",
|
1119
|
-
"to_sklearn()"
|
1120
|
-
)
|
1121
|
-
),
|
1122
|
-
)
|
1123
|
-
|
1124
|
-
def _get_dependencies(self) -> List[str]:
|
1125
|
-
return self._deps
|