snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -400,12 +399,7 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
400
399
|
)
|
401
400
|
return selected_cols
|
402
401
|
|
403
|
-
|
404
|
-
project=_PROJECT,
|
405
|
-
subproject=_SUBPROJECT,
|
406
|
-
custom_tags=dict([("autogen", True)]),
|
407
|
-
)
|
408
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "GradientBoostingRegressor":
|
402
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "GradientBoostingRegressor":
|
409
403
|
"""Fit the gradient boosting model
|
410
404
|
For more details on this function, see [sklearn.ensemble.GradientBoostingRegressor.fit]
|
411
405
|
(https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html#sklearn.ensemble.GradientBoostingRegressor.fit)
|
@@ -432,12 +426,14 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
432
426
|
|
433
427
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
434
428
|
|
435
|
-
|
429
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
436
430
|
if SNOWML_SPROC_ENV in os.environ:
|
437
431
|
statement_params = telemetry.get_function_usage_statement_params(
|
438
432
|
project=_PROJECT,
|
439
433
|
subproject=_SUBPROJECT,
|
440
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
434
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
435
|
+
inspect.currentframe(), GradientBoostingRegressor.__class__.__name__
|
436
|
+
),
|
441
437
|
api_calls=[Session.call],
|
442
438
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
443
439
|
)
|
@@ -458,7 +454,7 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
458
454
|
)
|
459
455
|
self._sklearn_object = model_trainer.train()
|
460
456
|
self._is_fitted = True
|
461
|
-
self.
|
457
|
+
self._generate_model_signatures(dataset)
|
462
458
|
return self
|
463
459
|
|
464
460
|
def _batch_inference_validate_snowpark(
|
@@ -534,7 +530,9 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
534
530
|
# when it is classifier, infer the datatype from label columns
|
535
531
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
536
532
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
537
|
-
label_cols_signatures = [
|
533
|
+
label_cols_signatures = [
|
534
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
535
|
+
]
|
538
536
|
if len(label_cols_signatures) == 0:
|
539
537
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
540
538
|
raise exceptions.SnowflakeMLException(
|
@@ -542,25 +540,22 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
542
540
|
original_exception=ValueError(error_str),
|
543
541
|
)
|
544
542
|
|
545
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
546
|
-
label_cols_signatures[0].as_snowpark_type()
|
547
|
-
)
|
543
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
548
544
|
|
549
545
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
550
|
-
assert isinstance(
|
546
|
+
assert isinstance(
|
547
|
+
dataset._session, Session
|
548
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
551
549
|
|
552
550
|
transform_kwargs = dict(
|
553
|
-
session
|
554
|
-
dependencies
|
555
|
-
drop_input_cols
|
556
|
-
expected_output_cols_type
|
551
|
+
session=dataset._session,
|
552
|
+
dependencies=self._deps,
|
553
|
+
drop_input_cols=self._drop_input_cols,
|
554
|
+
expected_output_cols_type=expected_type_inferred,
|
557
555
|
)
|
558
556
|
|
559
557
|
elif isinstance(dataset, pd.DataFrame):
|
560
|
-
transform_kwargs = dict(
|
561
|
-
snowpark_input_cols = self._snowpark_cols,
|
562
|
-
drop_input_cols = self._drop_input_cols
|
563
|
-
)
|
558
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
564
559
|
|
565
560
|
transform_handlers = ModelTransformerBuilder.build(
|
566
561
|
dataset=dataset,
|
@@ -600,7 +595,7 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
600
595
|
Transformed dataset.
|
601
596
|
"""
|
602
597
|
super()._check_dataset_type(dataset)
|
603
|
-
inference_method="transform"
|
598
|
+
inference_method = "transform"
|
604
599
|
|
605
600
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
606
601
|
# are specific to the type of dataset used.
|
@@ -637,17 +632,14 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
637
632
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
638
633
|
|
639
634
|
transform_kwargs = dict(
|
640
|
-
session
|
641
|
-
dependencies
|
642
|
-
drop_input_cols
|
643
|
-
expected_output_cols_type
|
635
|
+
session=dataset._session,
|
636
|
+
dependencies=self._deps,
|
637
|
+
drop_input_cols=self._drop_input_cols,
|
638
|
+
expected_output_cols_type=expected_dtype,
|
644
639
|
)
|
645
640
|
|
646
641
|
elif isinstance(dataset, pd.DataFrame):
|
647
|
-
transform_kwargs = dict(
|
648
|
-
snowpark_input_cols = self._snowpark_cols,
|
649
|
-
drop_input_cols = self._drop_input_cols
|
650
|
-
)
|
642
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
651
643
|
|
652
644
|
transform_handlers = ModelTransformerBuilder.build(
|
653
645
|
dataset=dataset,
|
@@ -666,7 +658,11 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
666
658
|
return output_df
|
667
659
|
|
668
660
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
669
|
-
def fit_predict(
|
661
|
+
def fit_predict(
|
662
|
+
self,
|
663
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
664
|
+
output_cols_prefix: str = "fit_predict_",
|
665
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
670
666
|
""" Method not supported for this class.
|
671
667
|
|
672
668
|
|
@@ -691,7 +687,9 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
691
687
|
)
|
692
688
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
693
689
|
drop_input_cols=self._drop_input_cols,
|
694
|
-
expected_output_cols_list=
|
690
|
+
expected_output_cols_list=(
|
691
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
692
|
+
),
|
695
693
|
)
|
696
694
|
self._sklearn_object = fitted_estimator
|
697
695
|
self._is_fitted = True
|
@@ -708,6 +706,62 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
708
706
|
assert self._sklearn_object is not None
|
709
707
|
return self._sklearn_object.embedding_
|
710
708
|
|
709
|
+
|
710
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
711
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
712
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
713
|
+
"""
|
714
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
715
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
716
|
+
if output_cols:
|
717
|
+
output_cols = [
|
718
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
719
|
+
for c in output_cols
|
720
|
+
]
|
721
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
722
|
+
output_cols = [output_cols_prefix]
|
723
|
+
elif self._sklearn_object is not None:
|
724
|
+
classes = self._sklearn_object.classes_
|
725
|
+
if isinstance(classes, numpy.ndarray):
|
726
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
727
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
728
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
729
|
+
output_cols = []
|
730
|
+
for i, cl in enumerate(classes):
|
731
|
+
# For binary classification, there is only one output column for each class
|
732
|
+
# ndarray as the two classes are complementary.
|
733
|
+
if len(cl) == 2:
|
734
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
735
|
+
else:
|
736
|
+
output_cols.extend([
|
737
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
738
|
+
])
|
739
|
+
else:
|
740
|
+
output_cols = []
|
741
|
+
|
742
|
+
# Make sure column names are valid snowflake identifiers.
|
743
|
+
assert output_cols is not None # Make MyPy happy
|
744
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
745
|
+
|
746
|
+
return rv
|
747
|
+
|
748
|
+
def _align_expected_output_names(
|
749
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
750
|
+
) -> List[str]:
|
751
|
+
# in case the inferred output column names dimension is different
|
752
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
753
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
754
|
+
output_df_columns = list(output_df_pd.columns)
|
755
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
756
|
+
if self.sample_weight_col:
|
757
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
758
|
+
# if the dimension of inferred output column names is correct; use it
|
759
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
760
|
+
return expected_output_cols_list
|
761
|
+
# otherwise, use the sklearn estimator's output
|
762
|
+
else:
|
763
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
764
|
+
|
711
765
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
712
766
|
@telemetry.send_api_usage_telemetry(
|
713
767
|
project=_PROJECT,
|
@@ -738,24 +792,28 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
738
792
|
# are specific to the type of dataset used.
|
739
793
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
740
794
|
|
795
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
796
|
+
|
741
797
|
if isinstance(dataset, DataFrame):
|
742
798
|
self._deps = self._batch_inference_validate_snowpark(
|
743
799
|
dataset=dataset,
|
744
800
|
inference_method=inference_method,
|
745
801
|
)
|
746
|
-
assert isinstance(
|
802
|
+
assert isinstance(
|
803
|
+
dataset._session, Session
|
804
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
747
805
|
transform_kwargs = dict(
|
748
806
|
session=dataset._session,
|
749
807
|
dependencies=self._deps,
|
750
|
-
drop_input_cols
|
808
|
+
drop_input_cols=self._drop_input_cols,
|
751
809
|
expected_output_cols_type="float",
|
752
810
|
)
|
811
|
+
expected_output_cols = self._align_expected_output_names(
|
812
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
813
|
+
)
|
753
814
|
|
754
815
|
elif isinstance(dataset, pd.DataFrame):
|
755
|
-
transform_kwargs = dict(
|
756
|
-
snowpark_input_cols = self._snowpark_cols,
|
757
|
-
drop_input_cols = self._drop_input_cols
|
758
|
-
)
|
816
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
759
817
|
|
760
818
|
transform_handlers = ModelTransformerBuilder.build(
|
761
819
|
dataset=dataset,
|
@@ -767,7 +825,7 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
767
825
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
768
826
|
inference_method=inference_method,
|
769
827
|
input_cols=self.input_cols,
|
770
|
-
expected_output_cols=
|
828
|
+
expected_output_cols=expected_output_cols,
|
771
829
|
**transform_kwargs
|
772
830
|
)
|
773
831
|
return output_df
|
@@ -797,7 +855,8 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
797
855
|
Output dataset with log probability of the sample for each class in the model.
|
798
856
|
"""
|
799
857
|
super()._check_dataset_type(dataset)
|
800
|
-
inference_method="predict_log_proba"
|
858
|
+
inference_method = "predict_log_proba"
|
859
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
801
860
|
|
802
861
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
803
862
|
# are specific to the type of dataset used.
|
@@ -808,18 +867,20 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
808
867
|
dataset=dataset,
|
809
868
|
inference_method=inference_method,
|
810
869
|
)
|
811
|
-
assert isinstance(
|
870
|
+
assert isinstance(
|
871
|
+
dataset._session, Session
|
872
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
812
873
|
transform_kwargs = dict(
|
813
874
|
session=dataset._session,
|
814
875
|
dependencies=self._deps,
|
815
|
-
drop_input_cols
|
876
|
+
drop_input_cols=self._drop_input_cols,
|
816
877
|
expected_output_cols_type="float",
|
817
878
|
)
|
879
|
+
expected_output_cols = self._align_expected_output_names(
|
880
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
881
|
+
)
|
818
882
|
elif isinstance(dataset, pd.DataFrame):
|
819
|
-
transform_kwargs = dict(
|
820
|
-
snowpark_input_cols = self._snowpark_cols,
|
821
|
-
drop_input_cols = self._drop_input_cols
|
822
|
-
)
|
883
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
823
884
|
|
824
885
|
transform_handlers = ModelTransformerBuilder.build(
|
825
886
|
dataset=dataset,
|
@@ -832,7 +893,7 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
832
893
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
833
894
|
inference_method=inference_method,
|
834
895
|
input_cols=self.input_cols,
|
835
|
-
expected_output_cols=
|
896
|
+
expected_output_cols=expected_output_cols,
|
836
897
|
**transform_kwargs
|
837
898
|
)
|
838
899
|
return output_df
|
@@ -858,30 +919,34 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
858
919
|
Output dataset with results of the decision function for the samples in input dataset.
|
859
920
|
"""
|
860
921
|
super()._check_dataset_type(dataset)
|
861
|
-
inference_method="decision_function"
|
922
|
+
inference_method = "decision_function"
|
862
923
|
|
863
924
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
864
925
|
# are specific to the type of dataset used.
|
865
926
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
866
927
|
|
928
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
929
|
+
|
867
930
|
if isinstance(dataset, DataFrame):
|
868
931
|
self._deps = self._batch_inference_validate_snowpark(
|
869
932
|
dataset=dataset,
|
870
933
|
inference_method=inference_method,
|
871
934
|
)
|
872
|
-
assert isinstance(
|
935
|
+
assert isinstance(
|
936
|
+
dataset._session, Session
|
937
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
873
938
|
transform_kwargs = dict(
|
874
939
|
session=dataset._session,
|
875
940
|
dependencies=self._deps,
|
876
|
-
drop_input_cols
|
941
|
+
drop_input_cols=self._drop_input_cols,
|
877
942
|
expected_output_cols_type="float",
|
878
943
|
)
|
944
|
+
expected_output_cols = self._align_expected_output_names(
|
945
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
946
|
+
)
|
879
947
|
|
880
948
|
elif isinstance(dataset, pd.DataFrame):
|
881
|
-
transform_kwargs = dict(
|
882
|
-
snowpark_input_cols = self._snowpark_cols,
|
883
|
-
drop_input_cols = self._drop_input_cols
|
884
|
-
)
|
949
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
885
950
|
|
886
951
|
transform_handlers = ModelTransformerBuilder.build(
|
887
952
|
dataset=dataset,
|
@@ -894,7 +959,7 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
894
959
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
895
960
|
inference_method=inference_method,
|
896
961
|
input_cols=self.input_cols,
|
897
|
-
expected_output_cols=
|
962
|
+
expected_output_cols=expected_output_cols,
|
898
963
|
**transform_kwargs
|
899
964
|
)
|
900
965
|
return output_df
|
@@ -923,12 +988,14 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
923
988
|
Output dataset with probability of the sample for each class in the model.
|
924
989
|
"""
|
925
990
|
super()._check_dataset_type(dataset)
|
926
|
-
inference_method="score_samples"
|
991
|
+
inference_method = "score_samples"
|
927
992
|
|
928
993
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
929
994
|
# are specific to the type of dataset used.
|
930
995
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
931
996
|
|
997
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
998
|
+
|
932
999
|
if isinstance(dataset, DataFrame):
|
933
1000
|
self._deps = self._batch_inference_validate_snowpark(
|
934
1001
|
dataset=dataset,
|
@@ -941,6 +1008,9 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
941
1008
|
drop_input_cols = self._drop_input_cols,
|
942
1009
|
expected_output_cols_type="float",
|
943
1010
|
)
|
1011
|
+
expected_output_cols = self._align_expected_output_names(
|
1012
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
1013
|
+
)
|
944
1014
|
|
945
1015
|
elif isinstance(dataset, pd.DataFrame):
|
946
1016
|
transform_kwargs = dict(
|
@@ -959,7 +1029,7 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
959
1029
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
960
1030
|
inference_method=inference_method,
|
961
1031
|
input_cols=self.input_cols,
|
962
|
-
expected_output_cols=
|
1032
|
+
expected_output_cols=expected_output_cols,
|
963
1033
|
**transform_kwargs
|
964
1034
|
)
|
965
1035
|
return output_df
|
@@ -1106,50 +1176,84 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
1106
1176
|
)
|
1107
1177
|
return output_df
|
1108
1178
|
|
1179
|
+
|
1180
|
+
|
1181
|
+
def to_sklearn(self) -> Any:
|
1182
|
+
"""Get sklearn.ensemble.GradientBoostingRegressor object.
|
1183
|
+
"""
|
1184
|
+
if self._sklearn_object is None:
|
1185
|
+
self._sklearn_object = self._create_sklearn_object()
|
1186
|
+
return self._sklearn_object
|
1187
|
+
|
1188
|
+
def to_xgboost(self) -> Any:
|
1189
|
+
raise exceptions.SnowflakeMLException(
|
1190
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1191
|
+
original_exception=AttributeError(
|
1192
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1193
|
+
"to_xgboost()",
|
1194
|
+
"to_sklearn()"
|
1195
|
+
)
|
1196
|
+
),
|
1197
|
+
)
|
1198
|
+
|
1199
|
+
def to_lightgbm(self) -> Any:
|
1200
|
+
raise exceptions.SnowflakeMLException(
|
1201
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1202
|
+
original_exception=AttributeError(
|
1203
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1204
|
+
"to_lightgbm()",
|
1205
|
+
"to_sklearn()"
|
1206
|
+
)
|
1207
|
+
),
|
1208
|
+
)
|
1109
1209
|
|
1110
|
-
def
|
1210
|
+
def _get_dependencies(self) -> List[str]:
|
1211
|
+
return self._deps
|
1212
|
+
|
1213
|
+
|
1214
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
1111
1215
|
self._model_signature_dict = dict()
|
1112
1216
|
|
1113
1217
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
1114
1218
|
|
1115
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1219
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
1116
1220
|
outputs: List[BaseFeatureSpec] = []
|
1117
1221
|
if hasattr(self, "predict"):
|
1118
1222
|
# keep mypy happy
|
1119
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1223
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1120
1224
|
# For classifier, the type of predict is the same as the type of label
|
1121
|
-
if self._sklearn_object._estimator_type ==
|
1122
|
-
|
1225
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1226
|
+
# label columns is the desired type for output
|
1123
1227
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1124
1228
|
# rename the output columns
|
1125
1229
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1126
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1127
|
-
|
1128
|
-
|
1230
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1231
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1232
|
+
)
|
1129
1233
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1130
1234
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1131
|
-
# Clusterer returns int64 cluster labels.
|
1235
|
+
# Clusterer returns int64 cluster labels.
|
1132
1236
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1133
1237
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1134
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1238
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1239
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1240
|
+
)
|
1241
|
+
|
1138
1242
|
# For regressor, the type of predict is float64
|
1139
|
-
elif self._sklearn_object._estimator_type ==
|
1243
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
1140
1244
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1141
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1142
|
-
|
1143
|
-
|
1144
|
-
|
1245
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1246
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1247
|
+
)
|
1248
|
+
|
1145
1249
|
for prob_func in PROB_FUNCTIONS:
|
1146
1250
|
if hasattr(self, prob_func):
|
1147
1251
|
output_cols_prefix: str = f"{prob_func}_"
|
1148
1252
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1149
1253
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1150
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
1151
|
-
|
1152
|
-
|
1254
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1255
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1256
|
+
)
|
1153
1257
|
|
1154
1258
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
1155
1259
|
items = list(self._model_signature_dict.items())
|
@@ -1162,10 +1266,10 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
1162
1266
|
"""Returns model signature of current class.
|
1163
1267
|
|
1164
1268
|
Raises:
|
1165
|
-
|
1269
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
1166
1270
|
|
1167
1271
|
Returns:
|
1168
|
-
Dict
|
1272
|
+
Dict with each method and its input output signature
|
1169
1273
|
"""
|
1170
1274
|
if self._model_signature_dict is None:
|
1171
1275
|
raise exceptions.SnowflakeMLException(
|
@@ -1173,35 +1277,3 @@ class GradientBoostingRegressor(BaseTransformer):
|
|
1173
1277
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
1174
1278
|
)
|
1175
1279
|
return self._model_signature_dict
|
1176
|
-
|
1177
|
-
def to_sklearn(self) -> Any:
|
1178
|
-
"""Get sklearn.ensemble.GradientBoostingRegressor object.
|
1179
|
-
"""
|
1180
|
-
if self._sklearn_object is None:
|
1181
|
-
self._sklearn_object = self._create_sklearn_object()
|
1182
|
-
return self._sklearn_object
|
1183
|
-
|
1184
|
-
def to_xgboost(self) -> Any:
|
1185
|
-
raise exceptions.SnowflakeMLException(
|
1186
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1187
|
-
original_exception=AttributeError(
|
1188
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1189
|
-
"to_xgboost()",
|
1190
|
-
"to_sklearn()"
|
1191
|
-
)
|
1192
|
-
),
|
1193
|
-
)
|
1194
|
-
|
1195
|
-
def to_lightgbm(self) -> Any:
|
1196
|
-
raise exceptions.SnowflakeMLException(
|
1197
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1198
|
-
original_exception=AttributeError(
|
1199
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1200
|
-
"to_lightgbm()",
|
1201
|
-
"to_sklearn()"
|
1202
|
-
)
|
1203
|
-
),
|
1204
|
-
)
|
1205
|
-
|
1206
|
-
def _get_dependencies(self) -> List[str]:
|
1207
|
-
return self._deps
|