snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -363,12 +362,7 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
363
362
|
)
|
364
363
|
return selected_cols
|
365
364
|
|
366
|
-
|
367
|
-
project=_PROJECT,
|
368
|
-
subproject=_SUBPROJECT,
|
369
|
-
custom_tags=dict([("autogen", True)]),
|
370
|
-
)
|
371
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "HistGradientBoostingRegressor":
|
365
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "HistGradientBoostingRegressor":
|
372
366
|
"""Fit the gradient boosting model
|
373
367
|
For more details on this function, see [sklearn.ensemble.HistGradientBoostingRegressor.fit]
|
374
368
|
(https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingRegressor.html#sklearn.ensemble.HistGradientBoostingRegressor.fit)
|
@@ -395,12 +389,14 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
395
389
|
|
396
390
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
397
391
|
|
398
|
-
|
392
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
399
393
|
if SNOWML_SPROC_ENV in os.environ:
|
400
394
|
statement_params = telemetry.get_function_usage_statement_params(
|
401
395
|
project=_PROJECT,
|
402
396
|
subproject=_SUBPROJECT,
|
403
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
397
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
398
|
+
inspect.currentframe(), HistGradientBoostingRegressor.__class__.__name__
|
399
|
+
),
|
404
400
|
api_calls=[Session.call],
|
405
401
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
406
402
|
)
|
@@ -421,7 +417,7 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
421
417
|
)
|
422
418
|
self._sklearn_object = model_trainer.train()
|
423
419
|
self._is_fitted = True
|
424
|
-
self.
|
420
|
+
self._generate_model_signatures(dataset)
|
425
421
|
return self
|
426
422
|
|
427
423
|
def _batch_inference_validate_snowpark(
|
@@ -497,7 +493,9 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
497
493
|
# when it is classifier, infer the datatype from label columns
|
498
494
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
499
495
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
500
|
-
label_cols_signatures = [
|
496
|
+
label_cols_signatures = [
|
497
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
498
|
+
]
|
501
499
|
if len(label_cols_signatures) == 0:
|
502
500
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
503
501
|
raise exceptions.SnowflakeMLException(
|
@@ -505,25 +503,22 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
505
503
|
original_exception=ValueError(error_str),
|
506
504
|
)
|
507
505
|
|
508
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
509
|
-
label_cols_signatures[0].as_snowpark_type()
|
510
|
-
)
|
506
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
511
507
|
|
512
508
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
513
|
-
assert isinstance(
|
509
|
+
assert isinstance(
|
510
|
+
dataset._session, Session
|
511
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
514
512
|
|
515
513
|
transform_kwargs = dict(
|
516
|
-
session
|
517
|
-
dependencies
|
518
|
-
drop_input_cols
|
519
|
-
expected_output_cols_type
|
514
|
+
session=dataset._session,
|
515
|
+
dependencies=self._deps,
|
516
|
+
drop_input_cols=self._drop_input_cols,
|
517
|
+
expected_output_cols_type=expected_type_inferred,
|
520
518
|
)
|
521
519
|
|
522
520
|
elif isinstance(dataset, pd.DataFrame):
|
523
|
-
transform_kwargs = dict(
|
524
|
-
snowpark_input_cols = self._snowpark_cols,
|
525
|
-
drop_input_cols = self._drop_input_cols
|
526
|
-
)
|
521
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
527
522
|
|
528
523
|
transform_handlers = ModelTransformerBuilder.build(
|
529
524
|
dataset=dataset,
|
@@ -563,7 +558,7 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
563
558
|
Transformed dataset.
|
564
559
|
"""
|
565
560
|
super()._check_dataset_type(dataset)
|
566
|
-
inference_method="transform"
|
561
|
+
inference_method = "transform"
|
567
562
|
|
568
563
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
569
564
|
# are specific to the type of dataset used.
|
@@ -600,17 +595,14 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
600
595
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
601
596
|
|
602
597
|
transform_kwargs = dict(
|
603
|
-
session
|
604
|
-
dependencies
|
605
|
-
drop_input_cols
|
606
|
-
expected_output_cols_type
|
598
|
+
session=dataset._session,
|
599
|
+
dependencies=self._deps,
|
600
|
+
drop_input_cols=self._drop_input_cols,
|
601
|
+
expected_output_cols_type=expected_dtype,
|
607
602
|
)
|
608
603
|
|
609
604
|
elif isinstance(dataset, pd.DataFrame):
|
610
|
-
transform_kwargs = dict(
|
611
|
-
snowpark_input_cols = self._snowpark_cols,
|
612
|
-
drop_input_cols = self._drop_input_cols
|
613
|
-
)
|
605
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
614
606
|
|
615
607
|
transform_handlers = ModelTransformerBuilder.build(
|
616
608
|
dataset=dataset,
|
@@ -629,7 +621,11 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
629
621
|
return output_df
|
630
622
|
|
631
623
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
632
|
-
def fit_predict(
|
624
|
+
def fit_predict(
|
625
|
+
self,
|
626
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
627
|
+
output_cols_prefix: str = "fit_predict_",
|
628
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
633
629
|
""" Method not supported for this class.
|
634
630
|
|
635
631
|
|
@@ -654,7 +650,9 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
654
650
|
)
|
655
651
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
656
652
|
drop_input_cols=self._drop_input_cols,
|
657
|
-
expected_output_cols_list=
|
653
|
+
expected_output_cols_list=(
|
654
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
655
|
+
),
|
658
656
|
)
|
659
657
|
self._sklearn_object = fitted_estimator
|
660
658
|
self._is_fitted = True
|
@@ -671,6 +669,62 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
671
669
|
assert self._sklearn_object is not None
|
672
670
|
return self._sklearn_object.embedding_
|
673
671
|
|
672
|
+
|
673
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
674
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
675
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
676
|
+
"""
|
677
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
678
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
679
|
+
if output_cols:
|
680
|
+
output_cols = [
|
681
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
682
|
+
for c in output_cols
|
683
|
+
]
|
684
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
685
|
+
output_cols = [output_cols_prefix]
|
686
|
+
elif self._sklearn_object is not None:
|
687
|
+
classes = self._sklearn_object.classes_
|
688
|
+
if isinstance(classes, numpy.ndarray):
|
689
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
690
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
691
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
692
|
+
output_cols = []
|
693
|
+
for i, cl in enumerate(classes):
|
694
|
+
# For binary classification, there is only one output column for each class
|
695
|
+
# ndarray as the two classes are complementary.
|
696
|
+
if len(cl) == 2:
|
697
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
698
|
+
else:
|
699
|
+
output_cols.extend([
|
700
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
701
|
+
])
|
702
|
+
else:
|
703
|
+
output_cols = []
|
704
|
+
|
705
|
+
# Make sure column names are valid snowflake identifiers.
|
706
|
+
assert output_cols is not None # Make MyPy happy
|
707
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
708
|
+
|
709
|
+
return rv
|
710
|
+
|
711
|
+
def _align_expected_output_names(
|
712
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
713
|
+
) -> List[str]:
|
714
|
+
# in case the inferred output column names dimension is different
|
715
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
716
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
717
|
+
output_df_columns = list(output_df_pd.columns)
|
718
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
719
|
+
if self.sample_weight_col:
|
720
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
721
|
+
# if the dimension of inferred output column names is correct; use it
|
722
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
723
|
+
return expected_output_cols_list
|
724
|
+
# otherwise, use the sklearn estimator's output
|
725
|
+
else:
|
726
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
727
|
+
|
674
728
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
675
729
|
@telemetry.send_api_usage_telemetry(
|
676
730
|
project=_PROJECT,
|
@@ -701,24 +755,28 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
701
755
|
# are specific to the type of dataset used.
|
702
756
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
703
757
|
|
758
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
759
|
+
|
704
760
|
if isinstance(dataset, DataFrame):
|
705
761
|
self._deps = self._batch_inference_validate_snowpark(
|
706
762
|
dataset=dataset,
|
707
763
|
inference_method=inference_method,
|
708
764
|
)
|
709
|
-
assert isinstance(
|
765
|
+
assert isinstance(
|
766
|
+
dataset._session, Session
|
767
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
710
768
|
transform_kwargs = dict(
|
711
769
|
session=dataset._session,
|
712
770
|
dependencies=self._deps,
|
713
|
-
drop_input_cols
|
771
|
+
drop_input_cols=self._drop_input_cols,
|
714
772
|
expected_output_cols_type="float",
|
715
773
|
)
|
774
|
+
expected_output_cols = self._align_expected_output_names(
|
775
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
776
|
+
)
|
716
777
|
|
717
778
|
elif isinstance(dataset, pd.DataFrame):
|
718
|
-
transform_kwargs = dict(
|
719
|
-
snowpark_input_cols = self._snowpark_cols,
|
720
|
-
drop_input_cols = self._drop_input_cols
|
721
|
-
)
|
779
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
722
780
|
|
723
781
|
transform_handlers = ModelTransformerBuilder.build(
|
724
782
|
dataset=dataset,
|
@@ -730,7 +788,7 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
730
788
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
731
789
|
inference_method=inference_method,
|
732
790
|
input_cols=self.input_cols,
|
733
|
-
expected_output_cols=
|
791
|
+
expected_output_cols=expected_output_cols,
|
734
792
|
**transform_kwargs
|
735
793
|
)
|
736
794
|
return output_df
|
@@ -760,7 +818,8 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
760
818
|
Output dataset with log probability of the sample for each class in the model.
|
761
819
|
"""
|
762
820
|
super()._check_dataset_type(dataset)
|
763
|
-
inference_method="predict_log_proba"
|
821
|
+
inference_method = "predict_log_proba"
|
822
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
764
823
|
|
765
824
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
766
825
|
# are specific to the type of dataset used.
|
@@ -771,18 +830,20 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
771
830
|
dataset=dataset,
|
772
831
|
inference_method=inference_method,
|
773
832
|
)
|
774
|
-
assert isinstance(
|
833
|
+
assert isinstance(
|
834
|
+
dataset._session, Session
|
835
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
775
836
|
transform_kwargs = dict(
|
776
837
|
session=dataset._session,
|
777
838
|
dependencies=self._deps,
|
778
|
-
drop_input_cols
|
839
|
+
drop_input_cols=self._drop_input_cols,
|
779
840
|
expected_output_cols_type="float",
|
780
841
|
)
|
842
|
+
expected_output_cols = self._align_expected_output_names(
|
843
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
844
|
+
)
|
781
845
|
elif isinstance(dataset, pd.DataFrame):
|
782
|
-
transform_kwargs = dict(
|
783
|
-
snowpark_input_cols = self._snowpark_cols,
|
784
|
-
drop_input_cols = self._drop_input_cols
|
785
|
-
)
|
846
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
786
847
|
|
787
848
|
transform_handlers = ModelTransformerBuilder.build(
|
788
849
|
dataset=dataset,
|
@@ -795,7 +856,7 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
795
856
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
796
857
|
inference_method=inference_method,
|
797
858
|
input_cols=self.input_cols,
|
798
|
-
expected_output_cols=
|
859
|
+
expected_output_cols=expected_output_cols,
|
799
860
|
**transform_kwargs
|
800
861
|
)
|
801
862
|
return output_df
|
@@ -821,30 +882,34 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
821
882
|
Output dataset with results of the decision function for the samples in input dataset.
|
822
883
|
"""
|
823
884
|
super()._check_dataset_type(dataset)
|
824
|
-
inference_method="decision_function"
|
885
|
+
inference_method = "decision_function"
|
825
886
|
|
826
887
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
827
888
|
# are specific to the type of dataset used.
|
828
889
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
829
890
|
|
891
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
892
|
+
|
830
893
|
if isinstance(dataset, DataFrame):
|
831
894
|
self._deps = self._batch_inference_validate_snowpark(
|
832
895
|
dataset=dataset,
|
833
896
|
inference_method=inference_method,
|
834
897
|
)
|
835
|
-
assert isinstance(
|
898
|
+
assert isinstance(
|
899
|
+
dataset._session, Session
|
900
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
836
901
|
transform_kwargs = dict(
|
837
902
|
session=dataset._session,
|
838
903
|
dependencies=self._deps,
|
839
|
-
drop_input_cols
|
904
|
+
drop_input_cols=self._drop_input_cols,
|
840
905
|
expected_output_cols_type="float",
|
841
906
|
)
|
907
|
+
expected_output_cols = self._align_expected_output_names(
|
908
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
909
|
+
)
|
842
910
|
|
843
911
|
elif isinstance(dataset, pd.DataFrame):
|
844
|
-
transform_kwargs = dict(
|
845
|
-
snowpark_input_cols = self._snowpark_cols,
|
846
|
-
drop_input_cols = self._drop_input_cols
|
847
|
-
)
|
912
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
848
913
|
|
849
914
|
transform_handlers = ModelTransformerBuilder.build(
|
850
915
|
dataset=dataset,
|
@@ -857,7 +922,7 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
857
922
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
858
923
|
inference_method=inference_method,
|
859
924
|
input_cols=self.input_cols,
|
860
|
-
expected_output_cols=
|
925
|
+
expected_output_cols=expected_output_cols,
|
861
926
|
**transform_kwargs
|
862
927
|
)
|
863
928
|
return output_df
|
@@ -886,12 +951,14 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
886
951
|
Output dataset with probability of the sample for each class in the model.
|
887
952
|
"""
|
888
953
|
super()._check_dataset_type(dataset)
|
889
|
-
inference_method="score_samples"
|
954
|
+
inference_method = "score_samples"
|
890
955
|
|
891
956
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
892
957
|
# are specific to the type of dataset used.
|
893
958
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
894
959
|
|
960
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
961
|
+
|
895
962
|
if isinstance(dataset, DataFrame):
|
896
963
|
self._deps = self._batch_inference_validate_snowpark(
|
897
964
|
dataset=dataset,
|
@@ -904,6 +971,9 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
904
971
|
drop_input_cols = self._drop_input_cols,
|
905
972
|
expected_output_cols_type="float",
|
906
973
|
)
|
974
|
+
expected_output_cols = self._align_expected_output_names(
|
975
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
976
|
+
)
|
907
977
|
|
908
978
|
elif isinstance(dataset, pd.DataFrame):
|
909
979
|
transform_kwargs = dict(
|
@@ -922,7 +992,7 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
922
992
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
923
993
|
inference_method=inference_method,
|
924
994
|
input_cols=self.input_cols,
|
925
|
-
expected_output_cols=
|
995
|
+
expected_output_cols=expected_output_cols,
|
926
996
|
**transform_kwargs
|
927
997
|
)
|
928
998
|
return output_df
|
@@ -1069,50 +1139,84 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
1069
1139
|
)
|
1070
1140
|
return output_df
|
1071
1141
|
|
1142
|
+
|
1143
|
+
|
1144
|
+
def to_sklearn(self) -> Any:
|
1145
|
+
"""Get sklearn.ensemble.HistGradientBoostingRegressor object.
|
1146
|
+
"""
|
1147
|
+
if self._sklearn_object is None:
|
1148
|
+
self._sklearn_object = self._create_sklearn_object()
|
1149
|
+
return self._sklearn_object
|
1150
|
+
|
1151
|
+
def to_xgboost(self) -> Any:
|
1152
|
+
raise exceptions.SnowflakeMLException(
|
1153
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1154
|
+
original_exception=AttributeError(
|
1155
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1156
|
+
"to_xgboost()",
|
1157
|
+
"to_sklearn()"
|
1158
|
+
)
|
1159
|
+
),
|
1160
|
+
)
|
1161
|
+
|
1162
|
+
def to_lightgbm(self) -> Any:
|
1163
|
+
raise exceptions.SnowflakeMLException(
|
1164
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1165
|
+
original_exception=AttributeError(
|
1166
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1167
|
+
"to_lightgbm()",
|
1168
|
+
"to_sklearn()"
|
1169
|
+
)
|
1170
|
+
),
|
1171
|
+
)
|
1072
1172
|
|
1073
|
-
def
|
1173
|
+
def _get_dependencies(self) -> List[str]:
|
1174
|
+
return self._deps
|
1175
|
+
|
1176
|
+
|
1177
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
1074
1178
|
self._model_signature_dict = dict()
|
1075
1179
|
|
1076
1180
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
1077
1181
|
|
1078
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1182
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
1079
1183
|
outputs: List[BaseFeatureSpec] = []
|
1080
1184
|
if hasattr(self, "predict"):
|
1081
1185
|
# keep mypy happy
|
1082
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1186
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1083
1187
|
# For classifier, the type of predict is the same as the type of label
|
1084
|
-
if self._sklearn_object._estimator_type ==
|
1085
|
-
|
1188
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1189
|
+
# label columns is the desired type for output
|
1086
1190
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1087
1191
|
# rename the output columns
|
1088
1192
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1089
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1090
|
-
|
1091
|
-
|
1193
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1194
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1195
|
+
)
|
1092
1196
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1093
1197
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1094
|
-
# Clusterer returns int64 cluster labels.
|
1198
|
+
# Clusterer returns int64 cluster labels.
|
1095
1199
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1096
1200
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1097
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1098
|
-
|
1099
|
-
|
1100
|
-
|
1201
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1202
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1203
|
+
)
|
1204
|
+
|
1101
1205
|
# For regressor, the type of predict is float64
|
1102
|
-
elif self._sklearn_object._estimator_type ==
|
1206
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
1103
1207
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1104
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1105
|
-
|
1106
|
-
|
1107
|
-
|
1208
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1209
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1210
|
+
)
|
1211
|
+
|
1108
1212
|
for prob_func in PROB_FUNCTIONS:
|
1109
1213
|
if hasattr(self, prob_func):
|
1110
1214
|
output_cols_prefix: str = f"{prob_func}_"
|
1111
1215
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1112
1216
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1113
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
1114
|
-
|
1115
|
-
|
1217
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1218
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1219
|
+
)
|
1116
1220
|
|
1117
1221
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
1118
1222
|
items = list(self._model_signature_dict.items())
|
@@ -1125,10 +1229,10 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
1125
1229
|
"""Returns model signature of current class.
|
1126
1230
|
|
1127
1231
|
Raises:
|
1128
|
-
|
1232
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
1129
1233
|
|
1130
1234
|
Returns:
|
1131
|
-
Dict
|
1235
|
+
Dict with each method and its input output signature
|
1132
1236
|
"""
|
1133
1237
|
if self._model_signature_dict is None:
|
1134
1238
|
raise exceptions.SnowflakeMLException(
|
@@ -1136,35 +1240,3 @@ class HistGradientBoostingRegressor(BaseTransformer):
|
|
1136
1240
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
1137
1241
|
)
|
1138
1242
|
return self._model_signature_dict
|
1139
|
-
|
1140
|
-
def to_sklearn(self) -> Any:
|
1141
|
-
"""Get sklearn.ensemble.HistGradientBoostingRegressor object.
|
1142
|
-
"""
|
1143
|
-
if self._sklearn_object is None:
|
1144
|
-
self._sklearn_object = self._create_sklearn_object()
|
1145
|
-
return self._sklearn_object
|
1146
|
-
|
1147
|
-
def to_xgboost(self) -> Any:
|
1148
|
-
raise exceptions.SnowflakeMLException(
|
1149
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1150
|
-
original_exception=AttributeError(
|
1151
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1152
|
-
"to_xgboost()",
|
1153
|
-
"to_sklearn()"
|
1154
|
-
)
|
1155
|
-
),
|
1156
|
-
)
|
1157
|
-
|
1158
|
-
def to_lightgbm(self) -> Any:
|
1159
|
-
raise exceptions.SnowflakeMLException(
|
1160
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1161
|
-
original_exception=AttributeError(
|
1162
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1163
|
-
"to_lightgbm()",
|
1164
|
-
"to_sklearn()"
|
1165
|
-
)
|
1166
|
-
),
|
1167
|
-
)
|
1168
|
-
|
1169
|
-
def _get_dependencies(self) -> List[str]:
|
1170
|
-
return self._deps
|