snowflake-ml-python 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/human_readable_id/adjectives.txt +128 -0
- snowflake/ml/_internal/human_readable_id/animals.txt +128 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator.py +40 -0
- snowflake/ml/_internal/human_readable_id/hrid_generator_base.py +135 -0
- snowflake/ml/_internal/utils/formatting.py +1 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +166 -184
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +6 -49
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +1 -3
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +5 -2
- snowflake/ml/model/_model_composer/model_composer.py +7 -5
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/infer_table_function.py_template +1 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +13 -1
- snowflake/ml/model/_packager/model_handlers/xgboost.py +1 -1
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/custom_model.py +3 -1
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/model_specifications.py +3 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +545 -0
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +8 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +24 -6
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/impute/simple_imputer.py +4 -15
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +198 -125
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +198 -125
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/_manager/model_manager.py +5 -1
- snowflake/ml/registry/model_registry.py +99 -26
- snowflake/ml/registry/registry.py +3 -2
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +94 -55
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +218 -212
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.3.1.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
33
33
|
BatchInferenceKwargsTypedDict,
|
34
34
|
ScoreKwargsTypedDict
|
35
35
|
)
|
36
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
37
|
+
from snowflake.ml.model.model_signature import (
|
38
|
+
BaseFeatureSpec,
|
39
|
+
DataType,
|
40
|
+
FeatureSpec,
|
41
|
+
ModelSignature,
|
42
|
+
_infer_signature,
|
43
|
+
_rename_signature_with_snowflake_identifiers,
|
44
|
+
)
|
36
45
|
|
37
46
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
38
47
|
|
@@ -43,16 +52,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
43
52
|
validate_sklearn_args,
|
44
53
|
)
|
45
54
|
|
46
|
-
from snowflake.ml.model.model_signature import (
|
47
|
-
DataType,
|
48
|
-
FeatureSpec,
|
49
|
-
ModelSignature,
|
50
|
-
_infer_signature,
|
51
|
-
_rename_signature_with_snowflake_identifiers,
|
52
|
-
BaseFeatureSpec,
|
53
|
-
)
|
54
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
55
|
-
|
56
55
|
_PROJECT = "ModelDevelopment"
|
57
56
|
# Derive subproject from module name by removing "sklearn"
|
58
57
|
# and converting module name from underscore to CamelCase
|
@@ -391,12 +390,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
391
390
|
)
|
392
391
|
return selected_cols
|
393
392
|
|
394
|
-
|
395
|
-
project=_PROJECT,
|
396
|
-
subproject=_SUBPROJECT,
|
397
|
-
custom_tags=dict([("autogen", True)]),
|
398
|
-
)
|
399
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "GradientBoostingClassifier":
|
393
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "GradientBoostingClassifier":
|
400
394
|
"""Fit the gradient boosting model
|
401
395
|
For more details on this function, see [sklearn.ensemble.GradientBoostingClassifier.fit]
|
402
396
|
(https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html#sklearn.ensemble.GradientBoostingClassifier.fit)
|
@@ -423,12 +417,14 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
423
417
|
|
424
418
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
425
419
|
|
426
|
-
|
420
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
427
421
|
if SNOWML_SPROC_ENV in os.environ:
|
428
422
|
statement_params = telemetry.get_function_usage_statement_params(
|
429
423
|
project=_PROJECT,
|
430
424
|
subproject=_SUBPROJECT,
|
431
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
425
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
426
|
+
inspect.currentframe(), GradientBoostingClassifier.__class__.__name__
|
427
|
+
),
|
432
428
|
api_calls=[Session.call],
|
433
429
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
434
430
|
)
|
@@ -449,7 +445,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
449
445
|
)
|
450
446
|
self._sklearn_object = model_trainer.train()
|
451
447
|
self._is_fitted = True
|
452
|
-
self.
|
448
|
+
self._generate_model_signatures(dataset)
|
453
449
|
return self
|
454
450
|
|
455
451
|
def _batch_inference_validate_snowpark(
|
@@ -525,7 +521,9 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
525
521
|
# when it is classifier, infer the datatype from label columns
|
526
522
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
527
523
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
528
|
-
label_cols_signatures = [
|
524
|
+
label_cols_signatures = [
|
525
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
526
|
+
]
|
529
527
|
if len(label_cols_signatures) == 0:
|
530
528
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
531
529
|
raise exceptions.SnowflakeMLException(
|
@@ -533,25 +531,22 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
533
531
|
original_exception=ValueError(error_str),
|
534
532
|
)
|
535
533
|
|
536
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
537
|
-
label_cols_signatures[0].as_snowpark_type()
|
538
|
-
)
|
534
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
539
535
|
|
540
536
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
541
|
-
assert isinstance(
|
537
|
+
assert isinstance(
|
538
|
+
dataset._session, Session
|
539
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
542
540
|
|
543
541
|
transform_kwargs = dict(
|
544
|
-
session
|
545
|
-
dependencies
|
546
|
-
drop_input_cols
|
547
|
-
expected_output_cols_type
|
542
|
+
session=dataset._session,
|
543
|
+
dependencies=self._deps,
|
544
|
+
drop_input_cols=self._drop_input_cols,
|
545
|
+
expected_output_cols_type=expected_type_inferred,
|
548
546
|
)
|
549
547
|
|
550
548
|
elif isinstance(dataset, pd.DataFrame):
|
551
|
-
transform_kwargs = dict(
|
552
|
-
snowpark_input_cols = self._snowpark_cols,
|
553
|
-
drop_input_cols = self._drop_input_cols
|
554
|
-
)
|
549
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
555
550
|
|
556
551
|
transform_handlers = ModelTransformerBuilder.build(
|
557
552
|
dataset=dataset,
|
@@ -591,7 +586,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
591
586
|
Transformed dataset.
|
592
587
|
"""
|
593
588
|
super()._check_dataset_type(dataset)
|
594
|
-
inference_method="transform"
|
589
|
+
inference_method = "transform"
|
595
590
|
|
596
591
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
597
592
|
# are specific to the type of dataset used.
|
@@ -628,17 +623,14 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
628
623
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
629
624
|
|
630
625
|
transform_kwargs = dict(
|
631
|
-
session
|
632
|
-
dependencies
|
633
|
-
drop_input_cols
|
634
|
-
expected_output_cols_type
|
626
|
+
session=dataset._session,
|
627
|
+
dependencies=self._deps,
|
628
|
+
drop_input_cols=self._drop_input_cols,
|
629
|
+
expected_output_cols_type=expected_dtype,
|
635
630
|
)
|
636
631
|
|
637
632
|
elif isinstance(dataset, pd.DataFrame):
|
638
|
-
transform_kwargs = dict(
|
639
|
-
snowpark_input_cols = self._snowpark_cols,
|
640
|
-
drop_input_cols = self._drop_input_cols
|
641
|
-
)
|
633
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
642
634
|
|
643
635
|
transform_handlers = ModelTransformerBuilder.build(
|
644
636
|
dataset=dataset,
|
@@ -657,7 +649,11 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
657
649
|
return output_df
|
658
650
|
|
659
651
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
660
|
-
def fit_predict(
|
652
|
+
def fit_predict(
|
653
|
+
self,
|
654
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
655
|
+
output_cols_prefix: str = "fit_predict_",
|
656
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
661
657
|
""" Method not supported for this class.
|
662
658
|
|
663
659
|
|
@@ -682,7 +678,9 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
682
678
|
)
|
683
679
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
684
680
|
drop_input_cols=self._drop_input_cols,
|
685
|
-
expected_output_cols_list=
|
681
|
+
expected_output_cols_list=(
|
682
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
683
|
+
),
|
686
684
|
)
|
687
685
|
self._sklearn_object = fitted_estimator
|
688
686
|
self._is_fitted = True
|
@@ -699,6 +697,62 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
699
697
|
assert self._sklearn_object is not None
|
700
698
|
return self._sklearn_object.embedding_
|
701
699
|
|
700
|
+
|
701
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
702
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
703
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
704
|
+
"""
|
705
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
706
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
707
|
+
if output_cols:
|
708
|
+
output_cols = [
|
709
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
710
|
+
for c in output_cols
|
711
|
+
]
|
712
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
713
|
+
output_cols = [output_cols_prefix]
|
714
|
+
elif self._sklearn_object is not None:
|
715
|
+
classes = self._sklearn_object.classes_
|
716
|
+
if isinstance(classes, numpy.ndarray):
|
717
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
718
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
719
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
720
|
+
output_cols = []
|
721
|
+
for i, cl in enumerate(classes):
|
722
|
+
# For binary classification, there is only one output column for each class
|
723
|
+
# ndarray as the two classes are complementary.
|
724
|
+
if len(cl) == 2:
|
725
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
726
|
+
else:
|
727
|
+
output_cols.extend([
|
728
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
729
|
+
])
|
730
|
+
else:
|
731
|
+
output_cols = []
|
732
|
+
|
733
|
+
# Make sure column names are valid snowflake identifiers.
|
734
|
+
assert output_cols is not None # Make MyPy happy
|
735
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
736
|
+
|
737
|
+
return rv
|
738
|
+
|
739
|
+
def _align_expected_output_names(
|
740
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
741
|
+
) -> List[str]:
|
742
|
+
# in case the inferred output column names dimension is different
|
743
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
744
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
745
|
+
output_df_columns = list(output_df_pd.columns)
|
746
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
747
|
+
if self.sample_weight_col:
|
748
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
749
|
+
# if the dimension of inferred output column names is correct; use it
|
750
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
751
|
+
return expected_output_cols_list
|
752
|
+
# otherwise, use the sklearn estimator's output
|
753
|
+
else:
|
754
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
755
|
+
|
702
756
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
703
757
|
@telemetry.send_api_usage_telemetry(
|
704
758
|
project=_PROJECT,
|
@@ -731,24 +785,28 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
731
785
|
# are specific to the type of dataset used.
|
732
786
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
733
787
|
|
788
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
789
|
+
|
734
790
|
if isinstance(dataset, DataFrame):
|
735
791
|
self._deps = self._batch_inference_validate_snowpark(
|
736
792
|
dataset=dataset,
|
737
793
|
inference_method=inference_method,
|
738
794
|
)
|
739
|
-
assert isinstance(
|
795
|
+
assert isinstance(
|
796
|
+
dataset._session, Session
|
797
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
740
798
|
transform_kwargs = dict(
|
741
799
|
session=dataset._session,
|
742
800
|
dependencies=self._deps,
|
743
|
-
drop_input_cols
|
801
|
+
drop_input_cols=self._drop_input_cols,
|
744
802
|
expected_output_cols_type="float",
|
745
803
|
)
|
804
|
+
expected_output_cols = self._align_expected_output_names(
|
805
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
806
|
+
)
|
746
807
|
|
747
808
|
elif isinstance(dataset, pd.DataFrame):
|
748
|
-
transform_kwargs = dict(
|
749
|
-
snowpark_input_cols = self._snowpark_cols,
|
750
|
-
drop_input_cols = self._drop_input_cols
|
751
|
-
)
|
809
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
752
810
|
|
753
811
|
transform_handlers = ModelTransformerBuilder.build(
|
754
812
|
dataset=dataset,
|
@@ -760,7 +818,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
760
818
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
761
819
|
inference_method=inference_method,
|
762
820
|
input_cols=self.input_cols,
|
763
|
-
expected_output_cols=
|
821
|
+
expected_output_cols=expected_output_cols,
|
764
822
|
**transform_kwargs
|
765
823
|
)
|
766
824
|
return output_df
|
@@ -792,7 +850,8 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
792
850
|
Output dataset with log probability of the sample for each class in the model.
|
793
851
|
"""
|
794
852
|
super()._check_dataset_type(dataset)
|
795
|
-
inference_method="predict_log_proba"
|
853
|
+
inference_method = "predict_log_proba"
|
854
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
796
855
|
|
797
856
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
798
857
|
# are specific to the type of dataset used.
|
@@ -803,18 +862,20 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
803
862
|
dataset=dataset,
|
804
863
|
inference_method=inference_method,
|
805
864
|
)
|
806
|
-
assert isinstance(
|
865
|
+
assert isinstance(
|
866
|
+
dataset._session, Session
|
867
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
807
868
|
transform_kwargs = dict(
|
808
869
|
session=dataset._session,
|
809
870
|
dependencies=self._deps,
|
810
|
-
drop_input_cols
|
871
|
+
drop_input_cols=self._drop_input_cols,
|
811
872
|
expected_output_cols_type="float",
|
812
873
|
)
|
874
|
+
expected_output_cols = self._align_expected_output_names(
|
875
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
876
|
+
)
|
813
877
|
elif isinstance(dataset, pd.DataFrame):
|
814
|
-
transform_kwargs = dict(
|
815
|
-
snowpark_input_cols = self._snowpark_cols,
|
816
|
-
drop_input_cols = self._drop_input_cols
|
817
|
-
)
|
878
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
818
879
|
|
819
880
|
transform_handlers = ModelTransformerBuilder.build(
|
820
881
|
dataset=dataset,
|
@@ -827,7 +888,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
827
888
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
828
889
|
inference_method=inference_method,
|
829
890
|
input_cols=self.input_cols,
|
830
|
-
expected_output_cols=
|
891
|
+
expected_output_cols=expected_output_cols,
|
831
892
|
**transform_kwargs
|
832
893
|
)
|
833
894
|
return output_df
|
@@ -855,30 +916,34 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
855
916
|
Output dataset with results of the decision function for the samples in input dataset.
|
856
917
|
"""
|
857
918
|
super()._check_dataset_type(dataset)
|
858
|
-
inference_method="decision_function"
|
919
|
+
inference_method = "decision_function"
|
859
920
|
|
860
921
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
861
922
|
# are specific to the type of dataset used.
|
862
923
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
863
924
|
|
925
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
926
|
+
|
864
927
|
if isinstance(dataset, DataFrame):
|
865
928
|
self._deps = self._batch_inference_validate_snowpark(
|
866
929
|
dataset=dataset,
|
867
930
|
inference_method=inference_method,
|
868
931
|
)
|
869
|
-
assert isinstance(
|
932
|
+
assert isinstance(
|
933
|
+
dataset._session, Session
|
934
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
870
935
|
transform_kwargs = dict(
|
871
936
|
session=dataset._session,
|
872
937
|
dependencies=self._deps,
|
873
|
-
drop_input_cols
|
938
|
+
drop_input_cols=self._drop_input_cols,
|
874
939
|
expected_output_cols_type="float",
|
875
940
|
)
|
941
|
+
expected_output_cols = self._align_expected_output_names(
|
942
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
943
|
+
)
|
876
944
|
|
877
945
|
elif isinstance(dataset, pd.DataFrame):
|
878
|
-
transform_kwargs = dict(
|
879
|
-
snowpark_input_cols = self._snowpark_cols,
|
880
|
-
drop_input_cols = self._drop_input_cols
|
881
|
-
)
|
946
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
882
947
|
|
883
948
|
transform_handlers = ModelTransformerBuilder.build(
|
884
949
|
dataset=dataset,
|
@@ -891,7 +956,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
891
956
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
892
957
|
inference_method=inference_method,
|
893
958
|
input_cols=self.input_cols,
|
894
|
-
expected_output_cols=
|
959
|
+
expected_output_cols=expected_output_cols,
|
895
960
|
**transform_kwargs
|
896
961
|
)
|
897
962
|
return output_df
|
@@ -920,12 +985,14 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
920
985
|
Output dataset with probability of the sample for each class in the model.
|
921
986
|
"""
|
922
987
|
super()._check_dataset_type(dataset)
|
923
|
-
inference_method="score_samples"
|
988
|
+
inference_method = "score_samples"
|
924
989
|
|
925
990
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
926
991
|
# are specific to the type of dataset used.
|
927
992
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
928
993
|
|
994
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
995
|
+
|
929
996
|
if isinstance(dataset, DataFrame):
|
930
997
|
self._deps = self._batch_inference_validate_snowpark(
|
931
998
|
dataset=dataset,
|
@@ -938,6 +1005,9 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
938
1005
|
drop_input_cols = self._drop_input_cols,
|
939
1006
|
expected_output_cols_type="float",
|
940
1007
|
)
|
1008
|
+
expected_output_cols = self._align_expected_output_names(
|
1009
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
1010
|
+
)
|
941
1011
|
|
942
1012
|
elif isinstance(dataset, pd.DataFrame):
|
943
1013
|
transform_kwargs = dict(
|
@@ -956,7 +1026,7 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
956
1026
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
957
1027
|
inference_method=inference_method,
|
958
1028
|
input_cols=self.input_cols,
|
959
|
-
expected_output_cols=
|
1029
|
+
expected_output_cols=expected_output_cols,
|
960
1030
|
**transform_kwargs
|
961
1031
|
)
|
962
1032
|
return output_df
|
@@ -1103,50 +1173,84 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
1103
1173
|
)
|
1104
1174
|
return output_df
|
1105
1175
|
|
1176
|
+
|
1177
|
+
|
1178
|
+
def to_sklearn(self) -> Any:
|
1179
|
+
"""Get sklearn.ensemble.GradientBoostingClassifier object.
|
1180
|
+
"""
|
1181
|
+
if self._sklearn_object is None:
|
1182
|
+
self._sklearn_object = self._create_sklearn_object()
|
1183
|
+
return self._sklearn_object
|
1184
|
+
|
1185
|
+
def to_xgboost(self) -> Any:
|
1186
|
+
raise exceptions.SnowflakeMLException(
|
1187
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1188
|
+
original_exception=AttributeError(
|
1189
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1190
|
+
"to_xgboost()",
|
1191
|
+
"to_sklearn()"
|
1192
|
+
)
|
1193
|
+
),
|
1194
|
+
)
|
1195
|
+
|
1196
|
+
def to_lightgbm(self) -> Any:
|
1197
|
+
raise exceptions.SnowflakeMLException(
|
1198
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1199
|
+
original_exception=AttributeError(
|
1200
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1201
|
+
"to_lightgbm()",
|
1202
|
+
"to_sklearn()"
|
1203
|
+
)
|
1204
|
+
),
|
1205
|
+
)
|
1106
1206
|
|
1107
|
-
def
|
1207
|
+
def _get_dependencies(self) -> List[str]:
|
1208
|
+
return self._deps
|
1209
|
+
|
1210
|
+
|
1211
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
1108
1212
|
self._model_signature_dict = dict()
|
1109
1213
|
|
1110
1214
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
1111
1215
|
|
1112
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1216
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
1113
1217
|
outputs: List[BaseFeatureSpec] = []
|
1114
1218
|
if hasattr(self, "predict"):
|
1115
1219
|
# keep mypy happy
|
1116
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1220
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1117
1221
|
# For classifier, the type of predict is the same as the type of label
|
1118
|
-
if self._sklearn_object._estimator_type ==
|
1119
|
-
|
1222
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1223
|
+
# label columns is the desired type for output
|
1120
1224
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1121
1225
|
# rename the output columns
|
1122
1226
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1123
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1124
|
-
|
1125
|
-
|
1227
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1228
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1229
|
+
)
|
1126
1230
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1127
1231
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1128
|
-
# Clusterer returns int64 cluster labels.
|
1232
|
+
# Clusterer returns int64 cluster labels.
|
1129
1233
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1130
1234
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1131
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1132
|
-
|
1133
|
-
|
1134
|
-
|
1235
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1236
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1237
|
+
)
|
1238
|
+
|
1135
1239
|
# For regressor, the type of predict is float64
|
1136
|
-
elif self._sklearn_object._estimator_type ==
|
1240
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
1137
1241
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1138
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1242
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1243
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1244
|
+
)
|
1245
|
+
|
1142
1246
|
for prob_func in PROB_FUNCTIONS:
|
1143
1247
|
if hasattr(self, prob_func):
|
1144
1248
|
output_cols_prefix: str = f"{prob_func}_"
|
1145
1249
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1146
1250
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1147
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
1148
|
-
|
1149
|
-
|
1251
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1252
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1253
|
+
)
|
1150
1254
|
|
1151
1255
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
1152
1256
|
items = list(self._model_signature_dict.items())
|
@@ -1159,10 +1263,10 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
1159
1263
|
"""Returns model signature of current class.
|
1160
1264
|
|
1161
1265
|
Raises:
|
1162
|
-
|
1266
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
1163
1267
|
|
1164
1268
|
Returns:
|
1165
|
-
Dict
|
1269
|
+
Dict with each method and its input output signature
|
1166
1270
|
"""
|
1167
1271
|
if self._model_signature_dict is None:
|
1168
1272
|
raise exceptions.SnowflakeMLException(
|
@@ -1170,35 +1274,3 @@ class GradientBoostingClassifier(BaseTransformer):
|
|
1170
1274
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
1171
1275
|
)
|
1172
1276
|
return self._model_signature_dict
|
1173
|
-
|
1174
|
-
def to_sklearn(self) -> Any:
|
1175
|
-
"""Get sklearn.ensemble.GradientBoostingClassifier object.
|
1176
|
-
"""
|
1177
|
-
if self._sklearn_object is None:
|
1178
|
-
self._sklearn_object = self._create_sklearn_object()
|
1179
|
-
return self._sklearn_object
|
1180
|
-
|
1181
|
-
def to_xgboost(self) -> Any:
|
1182
|
-
raise exceptions.SnowflakeMLException(
|
1183
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1184
|
-
original_exception=AttributeError(
|
1185
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1186
|
-
"to_xgboost()",
|
1187
|
-
"to_sklearn()"
|
1188
|
-
)
|
1189
|
-
),
|
1190
|
-
)
|
1191
|
-
|
1192
|
-
def to_lightgbm(self) -> Any:
|
1193
|
-
raise exceptions.SnowflakeMLException(
|
1194
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1195
|
-
original_exception=AttributeError(
|
1196
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1197
|
-
"to_lightgbm()",
|
1198
|
-
"to_sklearn()"
|
1199
|
-
)
|
1200
|
-
),
|
1201
|
-
)
|
1202
|
-
|
1203
|
-
def _get_dependencies(self) -> List[str]:
|
1204
|
-
return self._deps
|