snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -268,12 +267,7 @@ class ElasticNet(BaseTransformer):
|
|
268
267
|
)
|
269
268
|
return selected_cols
|
270
269
|
|
271
|
-
|
272
|
-
project=_PROJECT,
|
273
|
-
subproject=_SUBPROJECT,
|
274
|
-
custom_tags=dict([("autogen", True)]),
|
275
|
-
)
|
276
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "ElasticNet":
|
270
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "ElasticNet":
|
277
271
|
"""Fit model with coordinate descent
|
278
272
|
For more details on this function, see [sklearn.linear_model.ElasticNet.fit]
|
279
273
|
(https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html#sklearn.linear_model.ElasticNet.fit)
|
@@ -300,12 +294,14 @@ class ElasticNet(BaseTransformer):
|
|
300
294
|
|
301
295
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
302
296
|
|
303
|
-
|
297
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
304
298
|
if SNOWML_SPROC_ENV in os.environ:
|
305
299
|
statement_params = telemetry.get_function_usage_statement_params(
|
306
300
|
project=_PROJECT,
|
307
301
|
subproject=_SUBPROJECT,
|
308
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
302
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
303
|
+
inspect.currentframe(), ElasticNet.__class__.__name__
|
304
|
+
),
|
309
305
|
api_calls=[Session.call],
|
310
306
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
311
307
|
)
|
@@ -326,7 +322,7 @@ class ElasticNet(BaseTransformer):
|
|
326
322
|
)
|
327
323
|
self._sklearn_object = model_trainer.train()
|
328
324
|
self._is_fitted = True
|
329
|
-
self.
|
325
|
+
self._generate_model_signatures(dataset)
|
330
326
|
return self
|
331
327
|
|
332
328
|
def _batch_inference_validate_snowpark(
|
@@ -402,7 +398,9 @@ class ElasticNet(BaseTransformer):
|
|
402
398
|
# when it is classifier, infer the datatype from label columns
|
403
399
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
404
400
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
405
|
-
label_cols_signatures = [
|
401
|
+
label_cols_signatures = [
|
402
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
403
|
+
]
|
406
404
|
if len(label_cols_signatures) == 0:
|
407
405
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
408
406
|
raise exceptions.SnowflakeMLException(
|
@@ -410,25 +408,22 @@ class ElasticNet(BaseTransformer):
|
|
410
408
|
original_exception=ValueError(error_str),
|
411
409
|
)
|
412
410
|
|
413
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
414
|
-
label_cols_signatures[0].as_snowpark_type()
|
415
|
-
)
|
411
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
416
412
|
|
417
413
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
418
|
-
assert isinstance(
|
414
|
+
assert isinstance(
|
415
|
+
dataset._session, Session
|
416
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
419
417
|
|
420
418
|
transform_kwargs = dict(
|
421
|
-
session
|
422
|
-
dependencies
|
423
|
-
drop_input_cols
|
424
|
-
expected_output_cols_type
|
419
|
+
session=dataset._session,
|
420
|
+
dependencies=self._deps,
|
421
|
+
drop_input_cols=self._drop_input_cols,
|
422
|
+
expected_output_cols_type=expected_type_inferred,
|
425
423
|
)
|
426
424
|
|
427
425
|
elif isinstance(dataset, pd.DataFrame):
|
428
|
-
transform_kwargs = dict(
|
429
|
-
snowpark_input_cols = self._snowpark_cols,
|
430
|
-
drop_input_cols = self._drop_input_cols
|
431
|
-
)
|
426
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
432
427
|
|
433
428
|
transform_handlers = ModelTransformerBuilder.build(
|
434
429
|
dataset=dataset,
|
@@ -468,7 +463,7 @@ class ElasticNet(BaseTransformer):
|
|
468
463
|
Transformed dataset.
|
469
464
|
"""
|
470
465
|
super()._check_dataset_type(dataset)
|
471
|
-
inference_method="transform"
|
466
|
+
inference_method = "transform"
|
472
467
|
|
473
468
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
474
469
|
# are specific to the type of dataset used.
|
@@ -505,17 +500,14 @@ class ElasticNet(BaseTransformer):
|
|
505
500
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
506
501
|
|
507
502
|
transform_kwargs = dict(
|
508
|
-
session
|
509
|
-
dependencies
|
510
|
-
drop_input_cols
|
511
|
-
expected_output_cols_type
|
503
|
+
session=dataset._session,
|
504
|
+
dependencies=self._deps,
|
505
|
+
drop_input_cols=self._drop_input_cols,
|
506
|
+
expected_output_cols_type=expected_dtype,
|
512
507
|
)
|
513
508
|
|
514
509
|
elif isinstance(dataset, pd.DataFrame):
|
515
|
-
transform_kwargs = dict(
|
516
|
-
snowpark_input_cols = self._snowpark_cols,
|
517
|
-
drop_input_cols = self._drop_input_cols
|
518
|
-
)
|
510
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
519
511
|
|
520
512
|
transform_handlers = ModelTransformerBuilder.build(
|
521
513
|
dataset=dataset,
|
@@ -534,7 +526,11 @@ class ElasticNet(BaseTransformer):
|
|
534
526
|
return output_df
|
535
527
|
|
536
528
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
537
|
-
def fit_predict(
|
529
|
+
def fit_predict(
|
530
|
+
self,
|
531
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
532
|
+
output_cols_prefix: str = "fit_predict_",
|
533
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
538
534
|
""" Method not supported for this class.
|
539
535
|
|
540
536
|
|
@@ -559,7 +555,9 @@ class ElasticNet(BaseTransformer):
|
|
559
555
|
)
|
560
556
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
561
557
|
drop_input_cols=self._drop_input_cols,
|
562
|
-
expected_output_cols_list=
|
558
|
+
expected_output_cols_list=(
|
559
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
560
|
+
),
|
563
561
|
)
|
564
562
|
self._sklearn_object = fitted_estimator
|
565
563
|
self._is_fitted = True
|
@@ -576,6 +574,62 @@ class ElasticNet(BaseTransformer):
|
|
576
574
|
assert self._sklearn_object is not None
|
577
575
|
return self._sklearn_object.embedding_
|
578
576
|
|
577
|
+
|
578
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
579
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
580
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
581
|
+
"""
|
582
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
583
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
584
|
+
if output_cols:
|
585
|
+
output_cols = [
|
586
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
587
|
+
for c in output_cols
|
588
|
+
]
|
589
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
590
|
+
output_cols = [output_cols_prefix]
|
591
|
+
elif self._sklearn_object is not None:
|
592
|
+
classes = self._sklearn_object.classes_
|
593
|
+
if isinstance(classes, numpy.ndarray):
|
594
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
595
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
596
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
597
|
+
output_cols = []
|
598
|
+
for i, cl in enumerate(classes):
|
599
|
+
# For binary classification, there is only one output column for each class
|
600
|
+
# ndarray as the two classes are complementary.
|
601
|
+
if len(cl) == 2:
|
602
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
603
|
+
else:
|
604
|
+
output_cols.extend([
|
605
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
606
|
+
])
|
607
|
+
else:
|
608
|
+
output_cols = []
|
609
|
+
|
610
|
+
# Make sure column names are valid snowflake identifiers.
|
611
|
+
assert output_cols is not None # Make MyPy happy
|
612
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
613
|
+
|
614
|
+
return rv
|
615
|
+
|
616
|
+
def _align_expected_output_names(
|
617
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
618
|
+
) -> List[str]:
|
619
|
+
# in case the inferred output column names dimension is different
|
620
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
621
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
622
|
+
output_df_columns = list(output_df_pd.columns)
|
623
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
624
|
+
if self.sample_weight_col:
|
625
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
626
|
+
# if the dimension of inferred output column names is correct; use it
|
627
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
628
|
+
return expected_output_cols_list
|
629
|
+
# otherwise, use the sklearn estimator's output
|
630
|
+
else:
|
631
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
632
|
+
|
579
633
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
580
634
|
@telemetry.send_api_usage_telemetry(
|
581
635
|
project=_PROJECT,
|
@@ -606,24 +660,28 @@ class ElasticNet(BaseTransformer):
|
|
606
660
|
# are specific to the type of dataset used.
|
607
661
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
608
662
|
|
663
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
664
|
+
|
609
665
|
if isinstance(dataset, DataFrame):
|
610
666
|
self._deps = self._batch_inference_validate_snowpark(
|
611
667
|
dataset=dataset,
|
612
668
|
inference_method=inference_method,
|
613
669
|
)
|
614
|
-
assert isinstance(
|
670
|
+
assert isinstance(
|
671
|
+
dataset._session, Session
|
672
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
615
673
|
transform_kwargs = dict(
|
616
674
|
session=dataset._session,
|
617
675
|
dependencies=self._deps,
|
618
|
-
drop_input_cols
|
676
|
+
drop_input_cols=self._drop_input_cols,
|
619
677
|
expected_output_cols_type="float",
|
620
678
|
)
|
679
|
+
expected_output_cols = self._align_expected_output_names(
|
680
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
681
|
+
)
|
621
682
|
|
622
683
|
elif isinstance(dataset, pd.DataFrame):
|
623
|
-
transform_kwargs = dict(
|
624
|
-
snowpark_input_cols = self._snowpark_cols,
|
625
|
-
drop_input_cols = self._drop_input_cols
|
626
|
-
)
|
684
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
627
685
|
|
628
686
|
transform_handlers = ModelTransformerBuilder.build(
|
629
687
|
dataset=dataset,
|
@@ -635,7 +693,7 @@ class ElasticNet(BaseTransformer):
|
|
635
693
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
636
694
|
inference_method=inference_method,
|
637
695
|
input_cols=self.input_cols,
|
638
|
-
expected_output_cols=
|
696
|
+
expected_output_cols=expected_output_cols,
|
639
697
|
**transform_kwargs
|
640
698
|
)
|
641
699
|
return output_df
|
@@ -665,7 +723,8 @@ class ElasticNet(BaseTransformer):
|
|
665
723
|
Output dataset with log probability of the sample for each class in the model.
|
666
724
|
"""
|
667
725
|
super()._check_dataset_type(dataset)
|
668
|
-
inference_method="predict_log_proba"
|
726
|
+
inference_method = "predict_log_proba"
|
727
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
669
728
|
|
670
729
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
671
730
|
# are specific to the type of dataset used.
|
@@ -676,18 +735,20 @@ class ElasticNet(BaseTransformer):
|
|
676
735
|
dataset=dataset,
|
677
736
|
inference_method=inference_method,
|
678
737
|
)
|
679
|
-
assert isinstance(
|
738
|
+
assert isinstance(
|
739
|
+
dataset._session, Session
|
740
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
680
741
|
transform_kwargs = dict(
|
681
742
|
session=dataset._session,
|
682
743
|
dependencies=self._deps,
|
683
|
-
drop_input_cols
|
744
|
+
drop_input_cols=self._drop_input_cols,
|
684
745
|
expected_output_cols_type="float",
|
685
746
|
)
|
747
|
+
expected_output_cols = self._align_expected_output_names(
|
748
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
749
|
+
)
|
686
750
|
elif isinstance(dataset, pd.DataFrame):
|
687
|
-
transform_kwargs = dict(
|
688
|
-
snowpark_input_cols = self._snowpark_cols,
|
689
|
-
drop_input_cols = self._drop_input_cols
|
690
|
-
)
|
751
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
691
752
|
|
692
753
|
transform_handlers = ModelTransformerBuilder.build(
|
693
754
|
dataset=dataset,
|
@@ -700,7 +761,7 @@ class ElasticNet(BaseTransformer):
|
|
700
761
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
701
762
|
inference_method=inference_method,
|
702
763
|
input_cols=self.input_cols,
|
703
|
-
expected_output_cols=
|
764
|
+
expected_output_cols=expected_output_cols,
|
704
765
|
**transform_kwargs
|
705
766
|
)
|
706
767
|
return output_df
|
@@ -726,30 +787,34 @@ class ElasticNet(BaseTransformer):
|
|
726
787
|
Output dataset with results of the decision function for the samples in input dataset.
|
727
788
|
"""
|
728
789
|
super()._check_dataset_type(dataset)
|
729
|
-
inference_method="decision_function"
|
790
|
+
inference_method = "decision_function"
|
730
791
|
|
731
792
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
732
793
|
# are specific to the type of dataset used.
|
733
794
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
734
795
|
|
796
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
797
|
+
|
735
798
|
if isinstance(dataset, DataFrame):
|
736
799
|
self._deps = self._batch_inference_validate_snowpark(
|
737
800
|
dataset=dataset,
|
738
801
|
inference_method=inference_method,
|
739
802
|
)
|
740
|
-
assert isinstance(
|
803
|
+
assert isinstance(
|
804
|
+
dataset._session, Session
|
805
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
741
806
|
transform_kwargs = dict(
|
742
807
|
session=dataset._session,
|
743
808
|
dependencies=self._deps,
|
744
|
-
drop_input_cols
|
809
|
+
drop_input_cols=self._drop_input_cols,
|
745
810
|
expected_output_cols_type="float",
|
746
811
|
)
|
812
|
+
expected_output_cols = self._align_expected_output_names(
|
813
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
814
|
+
)
|
747
815
|
|
748
816
|
elif isinstance(dataset, pd.DataFrame):
|
749
|
-
transform_kwargs = dict(
|
750
|
-
snowpark_input_cols = self._snowpark_cols,
|
751
|
-
drop_input_cols = self._drop_input_cols
|
752
|
-
)
|
817
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
753
818
|
|
754
819
|
transform_handlers = ModelTransformerBuilder.build(
|
755
820
|
dataset=dataset,
|
@@ -762,7 +827,7 @@ class ElasticNet(BaseTransformer):
|
|
762
827
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
763
828
|
inference_method=inference_method,
|
764
829
|
input_cols=self.input_cols,
|
765
|
-
expected_output_cols=
|
830
|
+
expected_output_cols=expected_output_cols,
|
766
831
|
**transform_kwargs
|
767
832
|
)
|
768
833
|
return output_df
|
@@ -791,12 +856,14 @@ class ElasticNet(BaseTransformer):
|
|
791
856
|
Output dataset with probability of the sample for each class in the model.
|
792
857
|
"""
|
793
858
|
super()._check_dataset_type(dataset)
|
794
|
-
inference_method="score_samples"
|
859
|
+
inference_method = "score_samples"
|
795
860
|
|
796
861
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
797
862
|
# are specific to the type of dataset used.
|
798
863
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
799
864
|
|
865
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
866
|
+
|
800
867
|
if isinstance(dataset, DataFrame):
|
801
868
|
self._deps = self._batch_inference_validate_snowpark(
|
802
869
|
dataset=dataset,
|
@@ -809,6 +876,9 @@ class ElasticNet(BaseTransformer):
|
|
809
876
|
drop_input_cols = self._drop_input_cols,
|
810
877
|
expected_output_cols_type="float",
|
811
878
|
)
|
879
|
+
expected_output_cols = self._align_expected_output_names(
|
880
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
881
|
+
)
|
812
882
|
|
813
883
|
elif isinstance(dataset, pd.DataFrame):
|
814
884
|
transform_kwargs = dict(
|
@@ -827,7 +897,7 @@ class ElasticNet(BaseTransformer):
|
|
827
897
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
828
898
|
inference_method=inference_method,
|
829
899
|
input_cols=self.input_cols,
|
830
|
-
expected_output_cols=
|
900
|
+
expected_output_cols=expected_output_cols,
|
831
901
|
**transform_kwargs
|
832
902
|
)
|
833
903
|
return output_df
|
@@ -974,50 +1044,84 @@ class ElasticNet(BaseTransformer):
|
|
974
1044
|
)
|
975
1045
|
return output_df
|
976
1046
|
|
1047
|
+
|
1048
|
+
|
1049
|
+
def to_sklearn(self) -> Any:
|
1050
|
+
"""Get sklearn.linear_model.ElasticNet object.
|
1051
|
+
"""
|
1052
|
+
if self._sklearn_object is None:
|
1053
|
+
self._sklearn_object = self._create_sklearn_object()
|
1054
|
+
return self._sklearn_object
|
1055
|
+
|
1056
|
+
def to_xgboost(self) -> Any:
|
1057
|
+
raise exceptions.SnowflakeMLException(
|
1058
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1059
|
+
original_exception=AttributeError(
|
1060
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1061
|
+
"to_xgboost()",
|
1062
|
+
"to_sklearn()"
|
1063
|
+
)
|
1064
|
+
),
|
1065
|
+
)
|
1066
|
+
|
1067
|
+
def to_lightgbm(self) -> Any:
|
1068
|
+
raise exceptions.SnowflakeMLException(
|
1069
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1070
|
+
original_exception=AttributeError(
|
1071
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1072
|
+
"to_lightgbm()",
|
1073
|
+
"to_sklearn()"
|
1074
|
+
)
|
1075
|
+
),
|
1076
|
+
)
|
977
1077
|
|
978
|
-
def
|
1078
|
+
def _get_dependencies(self) -> List[str]:
|
1079
|
+
return self._deps
|
1080
|
+
|
1081
|
+
|
1082
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
979
1083
|
self._model_signature_dict = dict()
|
980
1084
|
|
981
1085
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
982
1086
|
|
983
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1087
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
984
1088
|
outputs: List[BaseFeatureSpec] = []
|
985
1089
|
if hasattr(self, "predict"):
|
986
1090
|
# keep mypy happy
|
987
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1091
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
988
1092
|
# For classifier, the type of predict is the same as the type of label
|
989
|
-
if self._sklearn_object._estimator_type ==
|
990
|
-
|
1093
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1094
|
+
# label columns is the desired type for output
|
991
1095
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
992
1096
|
# rename the output columns
|
993
1097
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
994
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
995
|
-
|
996
|
-
|
1098
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1099
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1100
|
+
)
|
997
1101
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
998
1102
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
999
|
-
# Clusterer returns int64 cluster labels.
|
1103
|
+
# Clusterer returns int64 cluster labels.
|
1000
1104
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1001
1105
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1002
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1106
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1107
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1108
|
+
)
|
1109
|
+
|
1006
1110
|
# For regressor, the type of predict is float64
|
1007
|
-
elif self._sklearn_object._estimator_type ==
|
1111
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
1008
1112
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1009
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1113
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1114
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1115
|
+
)
|
1116
|
+
|
1013
1117
|
for prob_func in PROB_FUNCTIONS:
|
1014
1118
|
if hasattr(self, prob_func):
|
1015
1119
|
output_cols_prefix: str = f"{prob_func}_"
|
1016
1120
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1017
1121
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1018
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
1019
|
-
|
1020
|
-
|
1122
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1123
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1124
|
+
)
|
1021
1125
|
|
1022
1126
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
1023
1127
|
items = list(self._model_signature_dict.items())
|
@@ -1030,10 +1134,10 @@ class ElasticNet(BaseTransformer):
|
|
1030
1134
|
"""Returns model signature of current class.
|
1031
1135
|
|
1032
1136
|
Raises:
|
1033
|
-
|
1137
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
1034
1138
|
|
1035
1139
|
Returns:
|
1036
|
-
Dict
|
1140
|
+
Dict with each method and its input output signature
|
1037
1141
|
"""
|
1038
1142
|
if self._model_signature_dict is None:
|
1039
1143
|
raise exceptions.SnowflakeMLException(
|
@@ -1041,35 +1145,3 @@ class ElasticNet(BaseTransformer):
|
|
1041
1145
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
1042
1146
|
)
|
1043
1147
|
return self._model_signature_dict
|
1044
|
-
|
1045
|
-
def to_sklearn(self) -> Any:
|
1046
|
-
"""Get sklearn.linear_model.ElasticNet object.
|
1047
|
-
"""
|
1048
|
-
if self._sklearn_object is None:
|
1049
|
-
self._sklearn_object = self._create_sklearn_object()
|
1050
|
-
return self._sklearn_object
|
1051
|
-
|
1052
|
-
def to_xgboost(self) -> Any:
|
1053
|
-
raise exceptions.SnowflakeMLException(
|
1054
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1055
|
-
original_exception=AttributeError(
|
1056
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1057
|
-
"to_xgboost()",
|
1058
|
-
"to_sklearn()"
|
1059
|
-
)
|
1060
|
-
),
|
1061
|
-
)
|
1062
|
-
|
1063
|
-
def to_lightgbm(self) -> Any:
|
1064
|
-
raise exceptions.SnowflakeMLException(
|
1065
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1066
|
-
original_exception=AttributeError(
|
1067
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1068
|
-
"to_lightgbm()",
|
1069
|
-
"to_sklearn()"
|
1070
|
-
)
|
1071
|
-
),
|
1072
|
-
)
|
1073
|
-
|
1074
|
-
def _get_dependencies(self) -> List[str]:
|
1075
|
-
return self._deps
|