snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.ensemble".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class RandomForestClassifier(BaseTransformer):
|
57
58
|
r"""A random forest classifier
|
58
59
|
For more details on this class, see [sklearn.ensemble.RandomForestClassifier]
|
@@ -60,6 +61,51 @@ class RandomForestClassifier(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
A string or list of strings representing column names that contain labels.
|
74
|
+
Label columns must be specified with this parameter during initialization
|
75
|
+
or with the `set_label_cols` method before fitting.
|
76
|
+
|
77
|
+
output_cols: Optional[Union[str, List[str]]]
|
78
|
+
A string or list of strings representing column names that will store the
|
79
|
+
output of predict and transform operations. The length of output_cols must
|
80
|
+
match the expected number of output columns from the specific predictor or
|
81
|
+
transformer class used.
|
82
|
+
If you omit this parameter, output column names are derived by adding an
|
83
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
84
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
85
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
86
|
+
In general, explicitly specifying output column names is clearer, especially
|
87
|
+
if you don’t specify the input column names.
|
88
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
89
|
+
be set explicitly for transformers. Output columns can also be set after
|
90
|
+
initialization with the `set_output_cols` method.
|
91
|
+
|
92
|
+
sample_weight_col: Optional[str]
|
93
|
+
A string representing the column name containing the sample weights.
|
94
|
+
This argument is only required when working with weighted datasets. Sample
|
95
|
+
weight column can also be set after initialization with the
|
96
|
+
`set_sample_weight_col` method.
|
97
|
+
|
98
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
99
|
+
A string or a list of strings indicating column names to be excluded from any
|
100
|
+
operations (such as train, transform, or inference). These specified column(s)
|
101
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
102
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
103
|
+
columns, like index columns, during training or inference. Passthrough columns
|
104
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
105
|
+
|
106
|
+
drop_input_cols: Optional[bool], default=False
|
107
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
108
|
+
|
63
109
|
n_estimators: int, default=100
|
64
110
|
The number of trees in the forest.
|
65
111
|
|
@@ -207,35 +253,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
207
253
|
- If int, then draw `max_samples` samples.
|
208
254
|
- If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus,
|
209
255
|
`max_samples` should be in the interval `(0.0, 1.0]`.
|
210
|
-
|
211
|
-
input_cols: Optional[Union[str, List[str]]]
|
212
|
-
A string or list of strings representing column names that contain features.
|
213
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
214
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
215
|
-
considered input columns.
|
216
|
-
|
217
|
-
label_cols: Optional[Union[str, List[str]]]
|
218
|
-
A string or list of strings representing column names that contain labels.
|
219
|
-
This is a required param for estimators, as there is no way to infer these
|
220
|
-
columns. If this parameter is not specified, then object is fitted without
|
221
|
-
labels (like a transformer).
|
222
|
-
|
223
|
-
output_cols: Optional[Union[str, List[str]]]
|
224
|
-
A string or list of strings representing column names that will store the
|
225
|
-
output of predict and transform operations. The length of output_cols must
|
226
|
-
match the expected number of output columns from the specific estimator or
|
227
|
-
transformer class used.
|
228
|
-
If this parameter is not specified, output column names are derived by
|
229
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
230
|
-
column names work for estimator's predict() method, but output_cols must
|
231
|
-
be set explicitly for transformers.
|
232
|
-
|
233
|
-
sample_weight_col: Optional[str]
|
234
|
-
A string representing the column name containing the sample weights.
|
235
|
-
This argument is only required when working with weighted datasets.
|
236
|
-
|
237
|
-
drop_input_cols: Optional[bool], default=False
|
238
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
239
256
|
"""
|
240
257
|
|
241
258
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -262,6 +279,7 @@ class RandomForestClassifier(BaseTransformer):
|
|
262
279
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
263
280
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
264
281
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
282
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
265
283
|
drop_input_cols: Optional[bool] = False,
|
266
284
|
sample_weight_col: Optional[str] = None,
|
267
285
|
) -> None:
|
@@ -270,9 +288,10 @@ class RandomForestClassifier(BaseTransformer):
|
|
270
288
|
self.set_input_cols(input_cols)
|
271
289
|
self.set_output_cols(output_cols)
|
272
290
|
self.set_label_cols(label_cols)
|
291
|
+
self.set_passthrough_cols(passthrough_cols)
|
273
292
|
self.set_drop_input_cols(drop_input_cols)
|
274
293
|
self.set_sample_weight_col(sample_weight_col)
|
275
|
-
deps = set(
|
294
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
276
295
|
|
277
296
|
self._deps = list(deps)
|
278
297
|
|
@@ -298,13 +317,14 @@ class RandomForestClassifier(BaseTransformer):
|
|
298
317
|
args=init_args,
|
299
318
|
klass=sklearn.ensemble.RandomForestClassifier
|
300
319
|
)
|
301
|
-
self._sklearn_object = sklearn.ensemble.RandomForestClassifier(
|
320
|
+
self._sklearn_object: Any = sklearn.ensemble.RandomForestClassifier(
|
302
321
|
**cleaned_up_init_args,
|
303
322
|
)
|
304
323
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
305
324
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
306
325
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
307
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=RandomForestClassifier.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
326
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=RandomForestClassifier.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
327
|
+
self._autogenerated = True
|
308
328
|
|
309
329
|
def _get_rand_id(self) -> str:
|
310
330
|
"""
|
@@ -315,24 +335,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
315
335
|
"""
|
316
336
|
return str(uuid4()).replace("-", "_").upper()
|
317
337
|
|
318
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
319
|
-
"""
|
320
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
321
|
-
|
322
|
-
Args:
|
323
|
-
dataset: Input dataset.
|
324
|
-
"""
|
325
|
-
if not self.input_cols:
|
326
|
-
cols = [
|
327
|
-
c for c in dataset.columns
|
328
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
329
|
-
]
|
330
|
-
self.set_input_cols(input_cols=cols)
|
331
|
-
|
332
|
-
if not self.output_cols:
|
333
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
334
|
-
self.set_output_cols(output_cols=cols)
|
335
|
-
|
336
338
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "RandomForestClassifier":
|
337
339
|
"""
|
338
340
|
Input columns setter.
|
@@ -378,54 +380,48 @@ class RandomForestClassifier(BaseTransformer):
|
|
378
380
|
self
|
379
381
|
"""
|
380
382
|
self._infer_input_output_cols(dataset)
|
381
|
-
if isinstance(dataset,
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
self.
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
383
|
+
if isinstance(dataset, DataFrame):
|
384
|
+
session = dataset._session
|
385
|
+
assert session is not None # keep mypy happy
|
386
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
387
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
388
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
389
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
390
|
+
|
391
|
+
# Specify input columns so column pruning will be enforced
|
392
|
+
selected_cols = self._get_active_columns()
|
393
|
+
if len(selected_cols) > 0:
|
394
|
+
dataset = dataset.select(selected_cols)
|
395
|
+
|
396
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
397
|
+
|
398
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
399
|
+
if SNOWML_SPROC_ENV in os.environ:
|
400
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
401
|
+
project=_PROJECT,
|
402
|
+
subproject=_SUBPROJECT,
|
403
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), RandomForestClassifier.__class__.__name__),
|
404
|
+
api_calls=[Session.call],
|
405
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
406
|
+
)
|
407
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
408
|
+
pd_df.columns = dataset.columns
|
409
|
+
dataset = pd_df
|
410
|
+
|
411
|
+
model_trainer = ModelTrainerBuilder.build(
|
412
|
+
estimator=self._sklearn_object,
|
413
|
+
dataset=dataset,
|
414
|
+
input_cols=self.input_cols,
|
415
|
+
label_cols=self.label_cols,
|
416
|
+
sample_weight_col=self.sample_weight_col,
|
417
|
+
autogenerated=self._autogenerated,
|
418
|
+
subproject=_SUBPROJECT
|
419
|
+
)
|
420
|
+
self._sklearn_object = model_trainer.train()
|
397
421
|
self._is_fitted = True
|
398
422
|
self._get_model_signatures(dataset)
|
399
423
|
return self
|
400
424
|
|
401
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
402
|
-
session = dataset._session
|
403
|
-
assert session is not None # keep mypy happy
|
404
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
405
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
406
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
407
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
408
|
-
|
409
|
-
# Specify input columns so column pruning will be enforced
|
410
|
-
selected_cols = self._get_active_columns()
|
411
|
-
if len(selected_cols) > 0:
|
412
|
-
dataset = dataset.select(selected_cols)
|
413
|
-
|
414
|
-
estimator = self._sklearn_object
|
415
|
-
assert estimator is not None # Keep mypy happy
|
416
|
-
|
417
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
418
|
-
|
419
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
420
|
-
dataset,
|
421
|
-
session,
|
422
|
-
estimator,
|
423
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
424
|
-
self.input_cols,
|
425
|
-
self.label_cols,
|
426
|
-
self.sample_weight_col,
|
427
|
-
)
|
428
|
-
|
429
425
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
430
426
|
if self._drop_input_cols:
|
431
427
|
return []
|
@@ -613,11 +609,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
613
609
|
subproject=_SUBPROJECT,
|
614
610
|
custom_tags=dict([("autogen", True)]),
|
615
611
|
)
|
616
|
-
@telemetry.add_stmt_params_to_df(
|
617
|
-
project=_PROJECT,
|
618
|
-
subproject=_SUBPROJECT,
|
619
|
-
custom_tags=dict([("autogen", True)]),
|
620
|
-
)
|
621
612
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
622
613
|
"""Predict class for X
|
623
614
|
For more details on this function, see [sklearn.ensemble.RandomForestClassifier.predict]
|
@@ -671,11 +662,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
671
662
|
subproject=_SUBPROJECT,
|
672
663
|
custom_tags=dict([("autogen", True)]),
|
673
664
|
)
|
674
|
-
@telemetry.add_stmt_params_to_df(
|
675
|
-
project=_PROJECT,
|
676
|
-
subproject=_SUBPROJECT,
|
677
|
-
custom_tags=dict([("autogen", True)]),
|
678
|
-
)
|
679
665
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
680
666
|
"""Method not supported for this class.
|
681
667
|
|
@@ -732,7 +718,8 @@ class RandomForestClassifier(BaseTransformer):
|
|
732
718
|
if False:
|
733
719
|
self.fit(dataset)
|
734
720
|
assert self._sklearn_object is not None
|
735
|
-
|
721
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
722
|
+
return labels
|
736
723
|
else:
|
737
724
|
raise NotImplementedError
|
738
725
|
|
@@ -768,6 +755,7 @@ class RandomForestClassifier(BaseTransformer):
|
|
768
755
|
output_cols = []
|
769
756
|
|
770
757
|
# Make sure column names are valid snowflake identifiers.
|
758
|
+
assert output_cols is not None # Make MyPy happy
|
771
759
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
772
760
|
|
773
761
|
return rv
|
@@ -778,11 +766,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
778
766
|
subproject=_SUBPROJECT,
|
779
767
|
custom_tags=dict([("autogen", True)]),
|
780
768
|
)
|
781
|
-
@telemetry.add_stmt_params_to_df(
|
782
|
-
project=_PROJECT,
|
783
|
-
subproject=_SUBPROJECT,
|
784
|
-
custom_tags=dict([("autogen", True)]),
|
785
|
-
)
|
786
769
|
def predict_proba(
|
787
770
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
788
771
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -825,11 +808,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
825
808
|
subproject=_SUBPROJECT,
|
826
809
|
custom_tags=dict([("autogen", True)]),
|
827
810
|
)
|
828
|
-
@telemetry.add_stmt_params_to_df(
|
829
|
-
project=_PROJECT,
|
830
|
-
subproject=_SUBPROJECT,
|
831
|
-
custom_tags=dict([("autogen", True)]),
|
832
|
-
)
|
833
811
|
def predict_log_proba(
|
834
812
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
835
813
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -868,16 +846,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
868
846
|
return output_df
|
869
847
|
|
870
848
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
871
|
-
@telemetry.send_api_usage_telemetry(
|
872
|
-
project=_PROJECT,
|
873
|
-
subproject=_SUBPROJECT,
|
874
|
-
custom_tags=dict([("autogen", True)]),
|
875
|
-
)
|
876
|
-
@telemetry.add_stmt_params_to_df(
|
877
|
-
project=_PROJECT,
|
878
|
-
subproject=_SUBPROJECT,
|
879
|
-
custom_tags=dict([("autogen", True)]),
|
880
|
-
)
|
881
849
|
def decision_function(
|
882
850
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
883
851
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -978,11 +946,6 @@ class RandomForestClassifier(BaseTransformer):
|
|
978
946
|
subproject=_SUBPROJECT,
|
979
947
|
custom_tags=dict([("autogen", True)]),
|
980
948
|
)
|
981
|
-
@telemetry.add_stmt_params_to_df(
|
982
|
-
project=_PROJECT,
|
983
|
-
subproject=_SUBPROJECT,
|
984
|
-
custom_tags=dict([("autogen", True)]),
|
985
|
-
)
|
986
949
|
def kneighbors(
|
987
950
|
self,
|
988
951
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -1042,18 +1005,28 @@ class RandomForestClassifier(BaseTransformer):
|
|
1042
1005
|
# For classifier, the type of predict is the same as the type of label
|
1043
1006
|
if self._sklearn_object._estimator_type == 'classifier':
|
1044
1007
|
# label columns is the desired type for output
|
1045
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
1008
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1046
1009
|
# rename the output columns
|
1047
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
1010
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1048
1011
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1049
1012
|
([] if self._drop_input_cols else inputs)
|
1050
1013
|
+ outputs)
|
1014
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1015
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1016
|
+
# Clusterer returns int64 cluster labels.
|
1017
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1018
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1019
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1020
|
+
([] if self._drop_input_cols else inputs)
|
1021
|
+
+ outputs)
|
1022
|
+
|
1051
1023
|
# For regressor, the type of predict is float64
|
1052
1024
|
elif self._sklearn_object._estimator_type == 'regressor':
|
1053
1025
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1054
1026
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1055
1027
|
([] if self._drop_input_cols else inputs)
|
1056
1028
|
+ outputs)
|
1029
|
+
|
1057
1030
|
for prob_func in PROB_FUNCTIONS:
|
1058
1031
|
if hasattr(self, prob_func):
|
1059
1032
|
output_cols_prefix: str = f"{prob_func}_"
|