snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -358,12 +357,7 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
358
357
|
)
|
359
358
|
return selected_cols
|
360
359
|
|
361
|
-
|
362
|
-
project=_PROJECT,
|
363
|
-
subproject=_SUBPROJECT,
|
364
|
-
custom_tags=dict([("autogen", True)]),
|
365
|
-
)
|
366
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "ExtraTreesRegressor":
|
360
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "ExtraTreesRegressor":
|
367
361
|
"""Build a forest of trees from the training set (X, y)
|
368
362
|
For more details on this function, see [sklearn.ensemble.ExtraTreesRegressor.fit]
|
369
363
|
(https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html#sklearn.ensemble.ExtraTreesRegressor.fit)
|
@@ -390,12 +384,14 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
390
384
|
|
391
385
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
392
386
|
|
393
|
-
|
387
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
394
388
|
if SNOWML_SPROC_ENV in os.environ:
|
395
389
|
statement_params = telemetry.get_function_usage_statement_params(
|
396
390
|
project=_PROJECT,
|
397
391
|
subproject=_SUBPROJECT,
|
398
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
392
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
393
|
+
inspect.currentframe(), ExtraTreesRegressor.__class__.__name__
|
394
|
+
),
|
399
395
|
api_calls=[Session.call],
|
400
396
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
401
397
|
)
|
@@ -416,7 +412,7 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
416
412
|
)
|
417
413
|
self._sklearn_object = model_trainer.train()
|
418
414
|
self._is_fitted = True
|
419
|
-
self.
|
415
|
+
self._generate_model_signatures(dataset)
|
420
416
|
return self
|
421
417
|
|
422
418
|
def _batch_inference_validate_snowpark(
|
@@ -492,7 +488,9 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
492
488
|
# when it is classifier, infer the datatype from label columns
|
493
489
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
494
490
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
495
|
-
label_cols_signatures = [
|
491
|
+
label_cols_signatures = [
|
492
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
493
|
+
]
|
496
494
|
if len(label_cols_signatures) == 0:
|
497
495
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
498
496
|
raise exceptions.SnowflakeMLException(
|
@@ -500,25 +498,22 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
500
498
|
original_exception=ValueError(error_str),
|
501
499
|
)
|
502
500
|
|
503
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
504
|
-
label_cols_signatures[0].as_snowpark_type()
|
505
|
-
)
|
501
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
506
502
|
|
507
503
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
508
|
-
assert isinstance(
|
504
|
+
assert isinstance(
|
505
|
+
dataset._session, Session
|
506
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
509
507
|
|
510
508
|
transform_kwargs = dict(
|
511
|
-
session
|
512
|
-
dependencies
|
513
|
-
drop_input_cols
|
514
|
-
expected_output_cols_type
|
509
|
+
session=dataset._session,
|
510
|
+
dependencies=self._deps,
|
511
|
+
drop_input_cols=self._drop_input_cols,
|
512
|
+
expected_output_cols_type=expected_type_inferred,
|
515
513
|
)
|
516
514
|
|
517
515
|
elif isinstance(dataset, pd.DataFrame):
|
518
|
-
transform_kwargs = dict(
|
519
|
-
snowpark_input_cols = self._snowpark_cols,
|
520
|
-
drop_input_cols = self._drop_input_cols
|
521
|
-
)
|
516
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
522
517
|
|
523
518
|
transform_handlers = ModelTransformerBuilder.build(
|
524
519
|
dataset=dataset,
|
@@ -558,7 +553,7 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
558
553
|
Transformed dataset.
|
559
554
|
"""
|
560
555
|
super()._check_dataset_type(dataset)
|
561
|
-
inference_method="transform"
|
556
|
+
inference_method = "transform"
|
562
557
|
|
563
558
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
564
559
|
# are specific to the type of dataset used.
|
@@ -595,17 +590,14 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
595
590
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
596
591
|
|
597
592
|
transform_kwargs = dict(
|
598
|
-
session
|
599
|
-
dependencies
|
600
|
-
drop_input_cols
|
601
|
-
expected_output_cols_type
|
593
|
+
session=dataset._session,
|
594
|
+
dependencies=self._deps,
|
595
|
+
drop_input_cols=self._drop_input_cols,
|
596
|
+
expected_output_cols_type=expected_dtype,
|
602
597
|
)
|
603
598
|
|
604
599
|
elif isinstance(dataset, pd.DataFrame):
|
605
|
-
transform_kwargs = dict(
|
606
|
-
snowpark_input_cols = self._snowpark_cols,
|
607
|
-
drop_input_cols = self._drop_input_cols
|
608
|
-
)
|
600
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
609
601
|
|
610
602
|
transform_handlers = ModelTransformerBuilder.build(
|
611
603
|
dataset=dataset,
|
@@ -624,7 +616,11 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
624
616
|
return output_df
|
625
617
|
|
626
618
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
627
|
-
def fit_predict(
|
619
|
+
def fit_predict(
|
620
|
+
self,
|
621
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
622
|
+
output_cols_prefix: str = "fit_predict_",
|
623
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
628
624
|
""" Method not supported for this class.
|
629
625
|
|
630
626
|
|
@@ -649,7 +645,9 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
649
645
|
)
|
650
646
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
651
647
|
drop_input_cols=self._drop_input_cols,
|
652
|
-
expected_output_cols_list=
|
648
|
+
expected_output_cols_list=(
|
649
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
650
|
+
),
|
653
651
|
)
|
654
652
|
self._sklearn_object = fitted_estimator
|
655
653
|
self._is_fitted = True
|
@@ -666,6 +664,62 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
666
664
|
assert self._sklearn_object is not None
|
667
665
|
return self._sklearn_object.embedding_
|
668
666
|
|
667
|
+
|
668
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
669
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
670
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
671
|
+
"""
|
672
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
673
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
674
|
+
if output_cols:
|
675
|
+
output_cols = [
|
676
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
677
|
+
for c in output_cols
|
678
|
+
]
|
679
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
680
|
+
output_cols = [output_cols_prefix]
|
681
|
+
elif self._sklearn_object is not None:
|
682
|
+
classes = self._sklearn_object.classes_
|
683
|
+
if isinstance(classes, numpy.ndarray):
|
684
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
685
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
686
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
687
|
+
output_cols = []
|
688
|
+
for i, cl in enumerate(classes):
|
689
|
+
# For binary classification, there is only one output column for each class
|
690
|
+
# ndarray as the two classes are complementary.
|
691
|
+
if len(cl) == 2:
|
692
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
693
|
+
else:
|
694
|
+
output_cols.extend([
|
695
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
696
|
+
])
|
697
|
+
else:
|
698
|
+
output_cols = []
|
699
|
+
|
700
|
+
# Make sure column names are valid snowflake identifiers.
|
701
|
+
assert output_cols is not None # Make MyPy happy
|
702
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
703
|
+
|
704
|
+
return rv
|
705
|
+
|
706
|
+
def _align_expected_output_names(
|
707
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
708
|
+
) -> List[str]:
|
709
|
+
# in case the inferred output column names dimension is different
|
710
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
711
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
712
|
+
output_df_columns = list(output_df_pd.columns)
|
713
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
714
|
+
if self.sample_weight_col:
|
715
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
716
|
+
# if the dimension of inferred output column names is correct; use it
|
717
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
718
|
+
return expected_output_cols_list
|
719
|
+
# otherwise, use the sklearn estimator's output
|
720
|
+
else:
|
721
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
722
|
+
|
669
723
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
670
724
|
@telemetry.send_api_usage_telemetry(
|
671
725
|
project=_PROJECT,
|
@@ -696,24 +750,28 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
696
750
|
# are specific to the type of dataset used.
|
697
751
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
698
752
|
|
753
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
754
|
+
|
699
755
|
if isinstance(dataset, DataFrame):
|
700
756
|
self._deps = self._batch_inference_validate_snowpark(
|
701
757
|
dataset=dataset,
|
702
758
|
inference_method=inference_method,
|
703
759
|
)
|
704
|
-
assert isinstance(
|
760
|
+
assert isinstance(
|
761
|
+
dataset._session, Session
|
762
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
705
763
|
transform_kwargs = dict(
|
706
764
|
session=dataset._session,
|
707
765
|
dependencies=self._deps,
|
708
|
-
drop_input_cols
|
766
|
+
drop_input_cols=self._drop_input_cols,
|
709
767
|
expected_output_cols_type="float",
|
710
768
|
)
|
769
|
+
expected_output_cols = self._align_expected_output_names(
|
770
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
771
|
+
)
|
711
772
|
|
712
773
|
elif isinstance(dataset, pd.DataFrame):
|
713
|
-
transform_kwargs = dict(
|
714
|
-
snowpark_input_cols = self._snowpark_cols,
|
715
|
-
drop_input_cols = self._drop_input_cols
|
716
|
-
)
|
774
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
717
775
|
|
718
776
|
transform_handlers = ModelTransformerBuilder.build(
|
719
777
|
dataset=dataset,
|
@@ -725,7 +783,7 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
725
783
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
726
784
|
inference_method=inference_method,
|
727
785
|
input_cols=self.input_cols,
|
728
|
-
expected_output_cols=
|
786
|
+
expected_output_cols=expected_output_cols,
|
729
787
|
**transform_kwargs
|
730
788
|
)
|
731
789
|
return output_df
|
@@ -755,7 +813,8 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
755
813
|
Output dataset with log probability of the sample for each class in the model.
|
756
814
|
"""
|
757
815
|
super()._check_dataset_type(dataset)
|
758
|
-
inference_method="predict_log_proba"
|
816
|
+
inference_method = "predict_log_proba"
|
817
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
759
818
|
|
760
819
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
761
820
|
# are specific to the type of dataset used.
|
@@ -766,18 +825,20 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
766
825
|
dataset=dataset,
|
767
826
|
inference_method=inference_method,
|
768
827
|
)
|
769
|
-
assert isinstance(
|
828
|
+
assert isinstance(
|
829
|
+
dataset._session, Session
|
830
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
770
831
|
transform_kwargs = dict(
|
771
832
|
session=dataset._session,
|
772
833
|
dependencies=self._deps,
|
773
|
-
drop_input_cols
|
834
|
+
drop_input_cols=self._drop_input_cols,
|
774
835
|
expected_output_cols_type="float",
|
775
836
|
)
|
837
|
+
expected_output_cols = self._align_expected_output_names(
|
838
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
839
|
+
)
|
776
840
|
elif isinstance(dataset, pd.DataFrame):
|
777
|
-
transform_kwargs = dict(
|
778
|
-
snowpark_input_cols = self._snowpark_cols,
|
779
|
-
drop_input_cols = self._drop_input_cols
|
780
|
-
)
|
841
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
781
842
|
|
782
843
|
transform_handlers = ModelTransformerBuilder.build(
|
783
844
|
dataset=dataset,
|
@@ -790,7 +851,7 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
790
851
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
791
852
|
inference_method=inference_method,
|
792
853
|
input_cols=self.input_cols,
|
793
|
-
expected_output_cols=
|
854
|
+
expected_output_cols=expected_output_cols,
|
794
855
|
**transform_kwargs
|
795
856
|
)
|
796
857
|
return output_df
|
@@ -816,30 +877,34 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
816
877
|
Output dataset with results of the decision function for the samples in input dataset.
|
817
878
|
"""
|
818
879
|
super()._check_dataset_type(dataset)
|
819
|
-
inference_method="decision_function"
|
880
|
+
inference_method = "decision_function"
|
820
881
|
|
821
882
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
822
883
|
# are specific to the type of dataset used.
|
823
884
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
824
885
|
|
886
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
887
|
+
|
825
888
|
if isinstance(dataset, DataFrame):
|
826
889
|
self._deps = self._batch_inference_validate_snowpark(
|
827
890
|
dataset=dataset,
|
828
891
|
inference_method=inference_method,
|
829
892
|
)
|
830
|
-
assert isinstance(
|
893
|
+
assert isinstance(
|
894
|
+
dataset._session, Session
|
895
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
831
896
|
transform_kwargs = dict(
|
832
897
|
session=dataset._session,
|
833
898
|
dependencies=self._deps,
|
834
|
-
drop_input_cols
|
899
|
+
drop_input_cols=self._drop_input_cols,
|
835
900
|
expected_output_cols_type="float",
|
836
901
|
)
|
902
|
+
expected_output_cols = self._align_expected_output_names(
|
903
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
904
|
+
)
|
837
905
|
|
838
906
|
elif isinstance(dataset, pd.DataFrame):
|
839
|
-
transform_kwargs = dict(
|
840
|
-
snowpark_input_cols = self._snowpark_cols,
|
841
|
-
drop_input_cols = self._drop_input_cols
|
842
|
-
)
|
907
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
843
908
|
|
844
909
|
transform_handlers = ModelTransformerBuilder.build(
|
845
910
|
dataset=dataset,
|
@@ -852,7 +917,7 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
852
917
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
853
918
|
inference_method=inference_method,
|
854
919
|
input_cols=self.input_cols,
|
855
|
-
expected_output_cols=
|
920
|
+
expected_output_cols=expected_output_cols,
|
856
921
|
**transform_kwargs
|
857
922
|
)
|
858
923
|
return output_df
|
@@ -881,12 +946,14 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
881
946
|
Output dataset with probability of the sample for each class in the model.
|
882
947
|
"""
|
883
948
|
super()._check_dataset_type(dataset)
|
884
|
-
inference_method="score_samples"
|
949
|
+
inference_method = "score_samples"
|
885
950
|
|
886
951
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
887
952
|
# are specific to the type of dataset used.
|
888
953
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
889
954
|
|
955
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
956
|
+
|
890
957
|
if isinstance(dataset, DataFrame):
|
891
958
|
self._deps = self._batch_inference_validate_snowpark(
|
892
959
|
dataset=dataset,
|
@@ -899,6 +966,9 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
899
966
|
drop_input_cols = self._drop_input_cols,
|
900
967
|
expected_output_cols_type="float",
|
901
968
|
)
|
969
|
+
expected_output_cols = self._align_expected_output_names(
|
970
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
971
|
+
)
|
902
972
|
|
903
973
|
elif isinstance(dataset, pd.DataFrame):
|
904
974
|
transform_kwargs = dict(
|
@@ -917,7 +987,7 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
917
987
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
918
988
|
inference_method=inference_method,
|
919
989
|
input_cols=self.input_cols,
|
920
|
-
expected_output_cols=
|
990
|
+
expected_output_cols=expected_output_cols,
|
921
991
|
**transform_kwargs
|
922
992
|
)
|
923
993
|
return output_df
|
@@ -1064,50 +1134,84 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
1064
1134
|
)
|
1065
1135
|
return output_df
|
1066
1136
|
|
1137
|
+
|
1138
|
+
|
1139
|
+
def to_sklearn(self) -> Any:
|
1140
|
+
"""Get sklearn.ensemble.ExtraTreesRegressor object.
|
1141
|
+
"""
|
1142
|
+
if self._sklearn_object is None:
|
1143
|
+
self._sklearn_object = self._create_sklearn_object()
|
1144
|
+
return self._sklearn_object
|
1145
|
+
|
1146
|
+
def to_xgboost(self) -> Any:
|
1147
|
+
raise exceptions.SnowflakeMLException(
|
1148
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1149
|
+
original_exception=AttributeError(
|
1150
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1151
|
+
"to_xgboost()",
|
1152
|
+
"to_sklearn()"
|
1153
|
+
)
|
1154
|
+
),
|
1155
|
+
)
|
1156
|
+
|
1157
|
+
def to_lightgbm(self) -> Any:
|
1158
|
+
raise exceptions.SnowflakeMLException(
|
1159
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1160
|
+
original_exception=AttributeError(
|
1161
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1162
|
+
"to_lightgbm()",
|
1163
|
+
"to_sklearn()"
|
1164
|
+
)
|
1165
|
+
),
|
1166
|
+
)
|
1067
1167
|
|
1068
|
-
def
|
1168
|
+
def _get_dependencies(self) -> List[str]:
|
1169
|
+
return self._deps
|
1170
|
+
|
1171
|
+
|
1172
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
1069
1173
|
self._model_signature_dict = dict()
|
1070
1174
|
|
1071
1175
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
1072
1176
|
|
1073
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1177
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
1074
1178
|
outputs: List[BaseFeatureSpec] = []
|
1075
1179
|
if hasattr(self, "predict"):
|
1076
1180
|
# keep mypy happy
|
1077
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1181
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1078
1182
|
# For classifier, the type of predict is the same as the type of label
|
1079
|
-
if self._sklearn_object._estimator_type ==
|
1080
|
-
|
1183
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1184
|
+
# label columns is the desired type for output
|
1081
1185
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1082
1186
|
# rename the output columns
|
1083
1187
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1084
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1085
|
-
|
1086
|
-
|
1188
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1189
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1190
|
+
)
|
1087
1191
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1088
1192
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1089
|
-
# Clusterer returns int64 cluster labels.
|
1193
|
+
# Clusterer returns int64 cluster labels.
|
1090
1194
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1091
1195
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1092
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1196
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1197
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1198
|
+
)
|
1199
|
+
|
1096
1200
|
# For regressor, the type of predict is float64
|
1097
|
-
elif self._sklearn_object._estimator_type ==
|
1201
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
1098
1202
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1099
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1100
|
-
|
1101
|
-
|
1102
|
-
|
1203
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1204
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1205
|
+
)
|
1206
|
+
|
1103
1207
|
for prob_func in PROB_FUNCTIONS:
|
1104
1208
|
if hasattr(self, prob_func):
|
1105
1209
|
output_cols_prefix: str = f"{prob_func}_"
|
1106
1210
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1107
1211
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1108
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
1109
|
-
|
1110
|
-
|
1212
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1213
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1214
|
+
)
|
1111
1215
|
|
1112
1216
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
1113
1217
|
items = list(self._model_signature_dict.items())
|
@@ -1120,10 +1224,10 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
1120
1224
|
"""Returns model signature of current class.
|
1121
1225
|
|
1122
1226
|
Raises:
|
1123
|
-
|
1227
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
1124
1228
|
|
1125
1229
|
Returns:
|
1126
|
-
Dict
|
1230
|
+
Dict with each method and its input output signature
|
1127
1231
|
"""
|
1128
1232
|
if self._model_signature_dict is None:
|
1129
1233
|
raise exceptions.SnowflakeMLException(
|
@@ -1131,35 +1235,3 @@ class ExtraTreesRegressor(BaseTransformer):
|
|
1131
1235
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
1132
1236
|
)
|
1133
1237
|
return self._model_signature_dict
|
1134
|
-
|
1135
|
-
def to_sklearn(self) -> Any:
|
1136
|
-
"""Get sklearn.ensemble.ExtraTreesRegressor object.
|
1137
|
-
"""
|
1138
|
-
if self._sklearn_object is None:
|
1139
|
-
self._sklearn_object = self._create_sklearn_object()
|
1140
|
-
return self._sklearn_object
|
1141
|
-
|
1142
|
-
def to_xgboost(self) -> Any:
|
1143
|
-
raise exceptions.SnowflakeMLException(
|
1144
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1145
|
-
original_exception=AttributeError(
|
1146
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1147
|
-
"to_xgboost()",
|
1148
|
-
"to_sklearn()"
|
1149
|
-
)
|
1150
|
-
),
|
1151
|
-
)
|
1152
|
-
|
1153
|
-
def to_lightgbm(self) -> Any:
|
1154
|
-
raise exceptions.SnowflakeMLException(
|
1155
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1156
|
-
original_exception=AttributeError(
|
1157
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1158
|
-
"to_lightgbm()",
|
1159
|
-
"to_sklearn()"
|
1160
|
-
)
|
1161
|
-
),
|
1162
|
-
)
|
1163
|
-
|
1164
|
-
def _get_dependencies(self) -> List[str]:
|
1165
|
-
return self._deps
|