snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.naive_bayes".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class CategoricalNB(BaseTransformer):
|
57
58
|
r"""Naive Bayes classifier for categorical features
|
58
59
|
For more details on this class, see [sklearn.naive_bayes.CategoricalNB]
|
@@ -60,6 +61,51 @@ class CategoricalNB(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
A string or list of strings representing column names that contain labels.
|
74
|
+
Label columns must be specified with this parameter during initialization
|
75
|
+
or with the `set_label_cols` method before fitting.
|
76
|
+
|
77
|
+
output_cols: Optional[Union[str, List[str]]]
|
78
|
+
A string or list of strings representing column names that will store the
|
79
|
+
output of predict and transform operations. The length of output_cols must
|
80
|
+
match the expected number of output columns from the specific predictor or
|
81
|
+
transformer class used.
|
82
|
+
If you omit this parameter, output column names are derived by adding an
|
83
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
84
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
85
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
86
|
+
In general, explicitly specifying output column names is clearer, especially
|
87
|
+
if you don’t specify the input column names.
|
88
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
89
|
+
be set explicitly for transformers. Output columns can also be set after
|
90
|
+
initialization with the `set_output_cols` method.
|
91
|
+
|
92
|
+
sample_weight_col: Optional[str]
|
93
|
+
A string representing the column name containing the sample weights.
|
94
|
+
This argument is only required when working with weighted datasets. Sample
|
95
|
+
weight column can also be set after initialization with the
|
96
|
+
`set_sample_weight_col` method.
|
97
|
+
|
98
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
99
|
+
A string or a list of strings indicating column names to be excluded from any
|
100
|
+
operations (such as train, transform, or inference). These specified column(s)
|
101
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
102
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
103
|
+
columns, like index columns, during training or inference. Passthrough columns
|
104
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
105
|
+
|
106
|
+
drop_input_cols: Optional[bool], default=False
|
107
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
108
|
+
|
63
109
|
alpha: float, default=1.0
|
64
110
|
Additive (Laplace/Lidstone) smoothing parameter
|
65
111
|
(set alpha=0 and force_alpha=True, for no smoothing).
|
@@ -86,35 +132,6 @@ class CategoricalNB(BaseTransformer):
|
|
86
132
|
minimum number of categories for the ith column of the input.
|
87
133
|
- None (default): Determines the number of categories automatically
|
88
134
|
from the training data.
|
89
|
-
|
90
|
-
input_cols: Optional[Union[str, List[str]]]
|
91
|
-
A string or list of strings representing column names that contain features.
|
92
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
93
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
94
|
-
considered input columns.
|
95
|
-
|
96
|
-
label_cols: Optional[Union[str, List[str]]]
|
97
|
-
A string or list of strings representing column names that contain labels.
|
98
|
-
This is a required param for estimators, as there is no way to infer these
|
99
|
-
columns. If this parameter is not specified, then object is fitted without
|
100
|
-
labels (like a transformer).
|
101
|
-
|
102
|
-
output_cols: Optional[Union[str, List[str]]]
|
103
|
-
A string or list of strings representing column names that will store the
|
104
|
-
output of predict and transform operations. The length of output_cols must
|
105
|
-
match the expected number of output columns from the specific estimator or
|
106
|
-
transformer class used.
|
107
|
-
If this parameter is not specified, output column names are derived by
|
108
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
109
|
-
column names work for estimator's predict() method, but output_cols must
|
110
|
-
be set explicitly for transformers.
|
111
|
-
|
112
|
-
sample_weight_col: Optional[str]
|
113
|
-
A string representing the column name containing the sample weights.
|
114
|
-
This argument is only required when working with weighted datasets.
|
115
|
-
|
116
|
-
drop_input_cols: Optional[bool], default=False
|
117
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
118
135
|
"""
|
119
136
|
|
120
137
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -128,6 +145,7 @@ class CategoricalNB(BaseTransformer):
|
|
128
145
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
129
146
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
130
147
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
148
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
131
149
|
drop_input_cols: Optional[bool] = False,
|
132
150
|
sample_weight_col: Optional[str] = None,
|
133
151
|
) -> None:
|
@@ -136,9 +154,10 @@ class CategoricalNB(BaseTransformer):
|
|
136
154
|
self.set_input_cols(input_cols)
|
137
155
|
self.set_output_cols(output_cols)
|
138
156
|
self.set_label_cols(label_cols)
|
157
|
+
self.set_passthrough_cols(passthrough_cols)
|
139
158
|
self.set_drop_input_cols(drop_input_cols)
|
140
159
|
self.set_sample_weight_col(sample_weight_col)
|
141
|
-
deps = set(
|
160
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
142
161
|
|
143
162
|
self._deps = list(deps)
|
144
163
|
|
@@ -151,13 +170,14 @@ class CategoricalNB(BaseTransformer):
|
|
151
170
|
args=init_args,
|
152
171
|
klass=sklearn.naive_bayes.CategoricalNB
|
153
172
|
)
|
154
|
-
self._sklearn_object = sklearn.naive_bayes.CategoricalNB(
|
173
|
+
self._sklearn_object: Any = sklearn.naive_bayes.CategoricalNB(
|
155
174
|
**cleaned_up_init_args,
|
156
175
|
)
|
157
176
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
158
177
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
159
178
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
160
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=CategoricalNB.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
179
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=CategoricalNB.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
180
|
+
self._autogenerated = True
|
161
181
|
|
162
182
|
def _get_rand_id(self) -> str:
|
163
183
|
"""
|
@@ -168,24 +188,6 @@ class CategoricalNB(BaseTransformer):
|
|
168
188
|
"""
|
169
189
|
return str(uuid4()).replace("-", "_").upper()
|
170
190
|
|
171
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
172
|
-
"""
|
173
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
174
|
-
|
175
|
-
Args:
|
176
|
-
dataset: Input dataset.
|
177
|
-
"""
|
178
|
-
if not self.input_cols:
|
179
|
-
cols = [
|
180
|
-
c for c in dataset.columns
|
181
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
182
|
-
]
|
183
|
-
self.set_input_cols(input_cols=cols)
|
184
|
-
|
185
|
-
if not self.output_cols:
|
186
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
187
|
-
self.set_output_cols(output_cols=cols)
|
188
|
-
|
189
191
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "CategoricalNB":
|
190
192
|
"""
|
191
193
|
Input columns setter.
|
@@ -231,54 +233,48 @@ class CategoricalNB(BaseTransformer):
|
|
231
233
|
self
|
232
234
|
"""
|
233
235
|
self._infer_input_output_cols(dataset)
|
234
|
-
if isinstance(dataset,
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
self.
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
236
|
+
if isinstance(dataset, DataFrame):
|
237
|
+
session = dataset._session
|
238
|
+
assert session is not None # keep mypy happy
|
239
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
240
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
241
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
242
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
243
|
+
|
244
|
+
# Specify input columns so column pruning will be enforced
|
245
|
+
selected_cols = self._get_active_columns()
|
246
|
+
if len(selected_cols) > 0:
|
247
|
+
dataset = dataset.select(selected_cols)
|
248
|
+
|
249
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
250
|
+
|
251
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
252
|
+
if SNOWML_SPROC_ENV in os.environ:
|
253
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
254
|
+
project=_PROJECT,
|
255
|
+
subproject=_SUBPROJECT,
|
256
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), CategoricalNB.__class__.__name__),
|
257
|
+
api_calls=[Session.call],
|
258
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
259
|
+
)
|
260
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
261
|
+
pd_df.columns = dataset.columns
|
262
|
+
dataset = pd_df
|
263
|
+
|
264
|
+
model_trainer = ModelTrainerBuilder.build(
|
265
|
+
estimator=self._sklearn_object,
|
266
|
+
dataset=dataset,
|
267
|
+
input_cols=self.input_cols,
|
268
|
+
label_cols=self.label_cols,
|
269
|
+
sample_weight_col=self.sample_weight_col,
|
270
|
+
autogenerated=self._autogenerated,
|
271
|
+
subproject=_SUBPROJECT
|
272
|
+
)
|
273
|
+
self._sklearn_object = model_trainer.train()
|
250
274
|
self._is_fitted = True
|
251
275
|
self._get_model_signatures(dataset)
|
252
276
|
return self
|
253
277
|
|
254
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
255
|
-
session = dataset._session
|
256
|
-
assert session is not None # keep mypy happy
|
257
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
258
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
259
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
260
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
261
|
-
|
262
|
-
# Specify input columns so column pruning will be enforced
|
263
|
-
selected_cols = self._get_active_columns()
|
264
|
-
if len(selected_cols) > 0:
|
265
|
-
dataset = dataset.select(selected_cols)
|
266
|
-
|
267
|
-
estimator = self._sklearn_object
|
268
|
-
assert estimator is not None # Keep mypy happy
|
269
|
-
|
270
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
271
|
-
|
272
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
273
|
-
dataset,
|
274
|
-
session,
|
275
|
-
estimator,
|
276
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
277
|
-
self.input_cols,
|
278
|
-
self.label_cols,
|
279
|
-
self.sample_weight_col,
|
280
|
-
)
|
281
|
-
|
282
278
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
283
279
|
if self._drop_input_cols:
|
284
280
|
return []
|
@@ -466,11 +462,6 @@ class CategoricalNB(BaseTransformer):
|
|
466
462
|
subproject=_SUBPROJECT,
|
467
463
|
custom_tags=dict([("autogen", True)]),
|
468
464
|
)
|
469
|
-
@telemetry.add_stmt_params_to_df(
|
470
|
-
project=_PROJECT,
|
471
|
-
subproject=_SUBPROJECT,
|
472
|
-
custom_tags=dict([("autogen", True)]),
|
473
|
-
)
|
474
465
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
475
466
|
"""Perform classification on an array of test vectors X
|
476
467
|
For more details on this function, see [sklearn.naive_bayes.CategoricalNB.predict]
|
@@ -524,11 +515,6 @@ class CategoricalNB(BaseTransformer):
|
|
524
515
|
subproject=_SUBPROJECT,
|
525
516
|
custom_tags=dict([("autogen", True)]),
|
526
517
|
)
|
527
|
-
@telemetry.add_stmt_params_to_df(
|
528
|
-
project=_PROJECT,
|
529
|
-
subproject=_SUBPROJECT,
|
530
|
-
custom_tags=dict([("autogen", True)]),
|
531
|
-
)
|
532
518
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
533
519
|
"""Method not supported for this class.
|
534
520
|
|
@@ -585,7 +571,8 @@ class CategoricalNB(BaseTransformer):
|
|
585
571
|
if False:
|
586
572
|
self.fit(dataset)
|
587
573
|
assert self._sklearn_object is not None
|
588
|
-
|
574
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
575
|
+
return labels
|
589
576
|
else:
|
590
577
|
raise NotImplementedError
|
591
578
|
|
@@ -621,6 +608,7 @@ class CategoricalNB(BaseTransformer):
|
|
621
608
|
output_cols = []
|
622
609
|
|
623
610
|
# Make sure column names are valid snowflake identifiers.
|
611
|
+
assert output_cols is not None # Make MyPy happy
|
624
612
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
625
613
|
|
626
614
|
return rv
|
@@ -631,11 +619,6 @@ class CategoricalNB(BaseTransformer):
|
|
631
619
|
subproject=_SUBPROJECT,
|
632
620
|
custom_tags=dict([("autogen", True)]),
|
633
621
|
)
|
634
|
-
@telemetry.add_stmt_params_to_df(
|
635
|
-
project=_PROJECT,
|
636
|
-
subproject=_SUBPROJECT,
|
637
|
-
custom_tags=dict([("autogen", True)]),
|
638
|
-
)
|
639
622
|
def predict_proba(
|
640
623
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
641
624
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -678,11 +661,6 @@ class CategoricalNB(BaseTransformer):
|
|
678
661
|
subproject=_SUBPROJECT,
|
679
662
|
custom_tags=dict([("autogen", True)]),
|
680
663
|
)
|
681
|
-
@telemetry.add_stmt_params_to_df(
|
682
|
-
project=_PROJECT,
|
683
|
-
subproject=_SUBPROJECT,
|
684
|
-
custom_tags=dict([("autogen", True)]),
|
685
|
-
)
|
686
664
|
def predict_log_proba(
|
687
665
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
688
666
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -721,16 +699,6 @@ class CategoricalNB(BaseTransformer):
|
|
721
699
|
return output_df
|
722
700
|
|
723
701
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
724
|
-
@telemetry.send_api_usage_telemetry(
|
725
|
-
project=_PROJECT,
|
726
|
-
subproject=_SUBPROJECT,
|
727
|
-
custom_tags=dict([("autogen", True)]),
|
728
|
-
)
|
729
|
-
@telemetry.add_stmt_params_to_df(
|
730
|
-
project=_PROJECT,
|
731
|
-
subproject=_SUBPROJECT,
|
732
|
-
custom_tags=dict([("autogen", True)]),
|
733
|
-
)
|
734
702
|
def decision_function(
|
735
703
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
736
704
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -831,11 +799,6 @@ class CategoricalNB(BaseTransformer):
|
|
831
799
|
subproject=_SUBPROJECT,
|
832
800
|
custom_tags=dict([("autogen", True)]),
|
833
801
|
)
|
834
|
-
@telemetry.add_stmt_params_to_df(
|
835
|
-
project=_PROJECT,
|
836
|
-
subproject=_SUBPROJECT,
|
837
|
-
custom_tags=dict([("autogen", True)]),
|
838
|
-
)
|
839
802
|
def kneighbors(
|
840
803
|
self,
|
841
804
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -895,18 +858,28 @@ class CategoricalNB(BaseTransformer):
|
|
895
858
|
# For classifier, the type of predict is the same as the type of label
|
896
859
|
if self._sklearn_object._estimator_type == 'classifier':
|
897
860
|
# label columns is the desired type for output
|
898
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
861
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
899
862
|
# rename the output columns
|
900
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
863
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
901
864
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
902
865
|
([] if self._drop_input_cols else inputs)
|
903
866
|
+ outputs)
|
867
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
868
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
869
|
+
# Clusterer returns int64 cluster labels.
|
870
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
871
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
872
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
873
|
+
([] if self._drop_input_cols else inputs)
|
874
|
+
+ outputs)
|
875
|
+
|
904
876
|
# For regressor, the type of predict is float64
|
905
877
|
elif self._sklearn_object._estimator_type == 'regressor':
|
906
878
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
907
879
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
908
880
|
([] if self._drop_input_cols else inputs)
|
909
881
|
+ outputs)
|
882
|
+
|
910
883
|
for prob_func in PROB_FUNCTIONS:
|
911
884
|
if hasattr(self, prob_func):
|
912
885
|
output_cols_prefix: str = f"{prob_func}_"
|