snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.neural_network".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class BernoulliRBM(BaseTransformer):
|
57
58
|
r"""Bernoulli Restricted Boltzmann Machine (RBM)
|
58
59
|
For more details on this class, see [sklearn.neural_network.BernoulliRBM]
|
@@ -60,6 +61,49 @@ class BernoulliRBM(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
n_components: int, default=256
|
64
108
|
Number of binary hidden units.
|
65
109
|
|
@@ -90,35 +134,6 @@ class BernoulliRBM(BaseTransformer):
|
|
90
134
|
|
91
135
|
Pass an int for reproducible results across multiple function calls.
|
92
136
|
See :term:`Glossary <random_state>`.
|
93
|
-
|
94
|
-
input_cols: Optional[Union[str, List[str]]]
|
95
|
-
A string or list of strings representing column names that contain features.
|
96
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
97
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
98
|
-
considered input columns.
|
99
|
-
|
100
|
-
label_cols: Optional[Union[str, List[str]]]
|
101
|
-
A string or list of strings representing column names that contain labels.
|
102
|
-
This is a required param for estimators, as there is no way to infer these
|
103
|
-
columns. If this parameter is not specified, then object is fitted without
|
104
|
-
labels (like a transformer).
|
105
|
-
|
106
|
-
output_cols: Optional[Union[str, List[str]]]
|
107
|
-
A string or list of strings representing column names that will store the
|
108
|
-
output of predict and transform operations. The length of output_cols must
|
109
|
-
match the expected number of output columns from the specific estimator or
|
110
|
-
transformer class used.
|
111
|
-
If this parameter is not specified, output column names are derived by
|
112
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
113
|
-
column names work for estimator's predict() method, but output_cols must
|
114
|
-
be set explicitly for transformers.
|
115
|
-
|
116
|
-
sample_weight_col: Optional[str]
|
117
|
-
A string representing the column name containing the sample weights.
|
118
|
-
This argument is only required when working with weighted datasets.
|
119
|
-
|
120
|
-
drop_input_cols: Optional[bool], default=False
|
121
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
122
137
|
"""
|
123
138
|
|
124
139
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -133,6 +148,7 @@ class BernoulliRBM(BaseTransformer):
|
|
133
148
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
134
149
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
135
150
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
151
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
136
152
|
drop_input_cols: Optional[bool] = False,
|
137
153
|
sample_weight_col: Optional[str] = None,
|
138
154
|
) -> None:
|
@@ -141,9 +157,10 @@ class BernoulliRBM(BaseTransformer):
|
|
141
157
|
self.set_input_cols(input_cols)
|
142
158
|
self.set_output_cols(output_cols)
|
143
159
|
self.set_label_cols(label_cols)
|
160
|
+
self.set_passthrough_cols(passthrough_cols)
|
144
161
|
self.set_drop_input_cols(drop_input_cols)
|
145
162
|
self.set_sample_weight_col(sample_weight_col)
|
146
|
-
deps = set(
|
163
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
147
164
|
|
148
165
|
self._deps = list(deps)
|
149
166
|
|
@@ -157,13 +174,14 @@ class BernoulliRBM(BaseTransformer):
|
|
157
174
|
args=init_args,
|
158
175
|
klass=sklearn.neural_network.BernoulliRBM
|
159
176
|
)
|
160
|
-
self._sklearn_object = sklearn.neural_network.BernoulliRBM(
|
177
|
+
self._sklearn_object: Any = sklearn.neural_network.BernoulliRBM(
|
161
178
|
**cleaned_up_init_args,
|
162
179
|
)
|
163
180
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
164
181
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
165
182
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
166
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=BernoulliRBM.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
183
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=BernoulliRBM.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
184
|
+
self._autogenerated = True
|
167
185
|
|
168
186
|
def _get_rand_id(self) -> str:
|
169
187
|
"""
|
@@ -174,24 +192,6 @@ class BernoulliRBM(BaseTransformer):
|
|
174
192
|
"""
|
175
193
|
return str(uuid4()).replace("-", "_").upper()
|
176
194
|
|
177
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
178
|
-
"""
|
179
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
180
|
-
|
181
|
-
Args:
|
182
|
-
dataset: Input dataset.
|
183
|
-
"""
|
184
|
-
if not self.input_cols:
|
185
|
-
cols = [
|
186
|
-
c for c in dataset.columns
|
187
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
188
|
-
]
|
189
|
-
self.set_input_cols(input_cols=cols)
|
190
|
-
|
191
|
-
if not self.output_cols:
|
192
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
193
|
-
self.set_output_cols(output_cols=cols)
|
194
|
-
|
195
195
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "BernoulliRBM":
|
196
196
|
"""
|
197
197
|
Input columns setter.
|
@@ -237,54 +237,48 @@ class BernoulliRBM(BaseTransformer):
|
|
237
237
|
self
|
238
238
|
"""
|
239
239
|
self._infer_input_output_cols(dataset)
|
240
|
-
if isinstance(dataset,
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
self.
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
240
|
+
if isinstance(dataset, DataFrame):
|
241
|
+
session = dataset._session
|
242
|
+
assert session is not None # keep mypy happy
|
243
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
244
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
245
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
246
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
247
|
+
|
248
|
+
# Specify input columns so column pruning will be enforced
|
249
|
+
selected_cols = self._get_active_columns()
|
250
|
+
if len(selected_cols) > 0:
|
251
|
+
dataset = dataset.select(selected_cols)
|
252
|
+
|
253
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
254
|
+
|
255
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
256
|
+
if SNOWML_SPROC_ENV in os.environ:
|
257
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
258
|
+
project=_PROJECT,
|
259
|
+
subproject=_SUBPROJECT,
|
260
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), BernoulliRBM.__class__.__name__),
|
261
|
+
api_calls=[Session.call],
|
262
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
263
|
+
)
|
264
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
265
|
+
pd_df.columns = dataset.columns
|
266
|
+
dataset = pd_df
|
267
|
+
|
268
|
+
model_trainer = ModelTrainerBuilder.build(
|
269
|
+
estimator=self._sklearn_object,
|
270
|
+
dataset=dataset,
|
271
|
+
input_cols=self.input_cols,
|
272
|
+
label_cols=self.label_cols,
|
273
|
+
sample_weight_col=self.sample_weight_col,
|
274
|
+
autogenerated=self._autogenerated,
|
275
|
+
subproject=_SUBPROJECT
|
276
|
+
)
|
277
|
+
self._sklearn_object = model_trainer.train()
|
256
278
|
self._is_fitted = True
|
257
279
|
self._get_model_signatures(dataset)
|
258
280
|
return self
|
259
281
|
|
260
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
261
|
-
session = dataset._session
|
262
|
-
assert session is not None # keep mypy happy
|
263
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
264
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
265
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
266
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
267
|
-
|
268
|
-
# Specify input columns so column pruning will be enforced
|
269
|
-
selected_cols = self._get_active_columns()
|
270
|
-
if len(selected_cols) > 0:
|
271
|
-
dataset = dataset.select(selected_cols)
|
272
|
-
|
273
|
-
estimator = self._sklearn_object
|
274
|
-
assert estimator is not None # Keep mypy happy
|
275
|
-
|
276
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
277
|
-
|
278
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
279
|
-
dataset,
|
280
|
-
session,
|
281
|
-
estimator,
|
282
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
283
|
-
self.input_cols,
|
284
|
-
self.label_cols,
|
285
|
-
self.sample_weight_col,
|
286
|
-
)
|
287
|
-
|
288
282
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
289
283
|
if self._drop_input_cols:
|
290
284
|
return []
|
@@ -472,11 +466,6 @@ class BernoulliRBM(BaseTransformer):
|
|
472
466
|
subproject=_SUBPROJECT,
|
473
467
|
custom_tags=dict([("autogen", True)]),
|
474
468
|
)
|
475
|
-
@telemetry.add_stmt_params_to_df(
|
476
|
-
project=_PROJECT,
|
477
|
-
subproject=_SUBPROJECT,
|
478
|
-
custom_tags=dict([("autogen", True)]),
|
479
|
-
)
|
480
469
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
481
470
|
"""Method not supported for this class.
|
482
471
|
|
@@ -528,11 +517,6 @@ class BernoulliRBM(BaseTransformer):
|
|
528
517
|
subproject=_SUBPROJECT,
|
529
518
|
custom_tags=dict([("autogen", True)]),
|
530
519
|
)
|
531
|
-
@telemetry.add_stmt_params_to_df(
|
532
|
-
project=_PROJECT,
|
533
|
-
subproject=_SUBPROJECT,
|
534
|
-
custom_tags=dict([("autogen", True)]),
|
535
|
-
)
|
536
520
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
537
521
|
"""Compute the hidden layer activation probabilities, P(h=1|v=X)
|
538
522
|
For more details on this function, see [sklearn.neural_network.BernoulliRBM.transform]
|
@@ -591,7 +575,8 @@ class BernoulliRBM(BaseTransformer):
|
|
591
575
|
if False:
|
592
576
|
self.fit(dataset)
|
593
577
|
assert self._sklearn_object is not None
|
594
|
-
|
578
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
579
|
+
return labels
|
595
580
|
else:
|
596
581
|
raise NotImplementedError
|
597
582
|
|
@@ -627,6 +612,7 @@ class BernoulliRBM(BaseTransformer):
|
|
627
612
|
output_cols = []
|
628
613
|
|
629
614
|
# Make sure column names are valid snowflake identifiers.
|
615
|
+
assert output_cols is not None # Make MyPy happy
|
630
616
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
631
617
|
|
632
618
|
return rv
|
@@ -637,11 +623,6 @@ class BernoulliRBM(BaseTransformer):
|
|
637
623
|
subproject=_SUBPROJECT,
|
638
624
|
custom_tags=dict([("autogen", True)]),
|
639
625
|
)
|
640
|
-
@telemetry.add_stmt_params_to_df(
|
641
|
-
project=_PROJECT,
|
642
|
-
subproject=_SUBPROJECT,
|
643
|
-
custom_tags=dict([("autogen", True)]),
|
644
|
-
)
|
645
626
|
def predict_proba(
|
646
627
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
647
628
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -682,11 +663,6 @@ class BernoulliRBM(BaseTransformer):
|
|
682
663
|
subproject=_SUBPROJECT,
|
683
664
|
custom_tags=dict([("autogen", True)]),
|
684
665
|
)
|
685
|
-
@telemetry.add_stmt_params_to_df(
|
686
|
-
project=_PROJECT,
|
687
|
-
subproject=_SUBPROJECT,
|
688
|
-
custom_tags=dict([("autogen", True)]),
|
689
|
-
)
|
690
666
|
def predict_log_proba(
|
691
667
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
692
668
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -723,16 +699,6 @@ class BernoulliRBM(BaseTransformer):
|
|
723
699
|
return output_df
|
724
700
|
|
725
701
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
726
|
-
@telemetry.send_api_usage_telemetry(
|
727
|
-
project=_PROJECT,
|
728
|
-
subproject=_SUBPROJECT,
|
729
|
-
custom_tags=dict([("autogen", True)]),
|
730
|
-
)
|
731
|
-
@telemetry.add_stmt_params_to_df(
|
732
|
-
project=_PROJECT,
|
733
|
-
subproject=_SUBPROJECT,
|
734
|
-
custom_tags=dict([("autogen", True)]),
|
735
|
-
)
|
736
702
|
def decision_function(
|
737
703
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
738
704
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -831,11 +797,6 @@ class BernoulliRBM(BaseTransformer):
|
|
831
797
|
subproject=_SUBPROJECT,
|
832
798
|
custom_tags=dict([("autogen", True)]),
|
833
799
|
)
|
834
|
-
@telemetry.add_stmt_params_to_df(
|
835
|
-
project=_PROJECT,
|
836
|
-
subproject=_SUBPROJECT,
|
837
|
-
custom_tags=dict([("autogen", True)]),
|
838
|
-
)
|
839
800
|
def kneighbors(
|
840
801
|
self,
|
841
802
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -895,18 +856,28 @@ class BernoulliRBM(BaseTransformer):
|
|
895
856
|
# For classifier, the type of predict is the same as the type of label
|
896
857
|
if self._sklearn_object._estimator_type == 'classifier':
|
897
858
|
# label columns is the desired type for output
|
898
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
859
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
899
860
|
# rename the output columns
|
900
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
861
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
862
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
863
|
+
([] if self._drop_input_cols else inputs)
|
864
|
+
+ outputs)
|
865
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
866
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
867
|
+
# Clusterer returns int64 cluster labels.
|
868
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
869
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
901
870
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
902
871
|
([] if self._drop_input_cols else inputs)
|
903
872
|
+ outputs)
|
873
|
+
|
904
874
|
# For regressor, the type of predict is float64
|
905
875
|
elif self._sklearn_object._estimator_type == 'regressor':
|
906
876
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
907
877
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
908
878
|
([] if self._drop_input_cols else inputs)
|
909
879
|
+ outputs)
|
880
|
+
|
910
881
|
for prob_func in PROB_FUNCTIONS:
|
911
882
|
if hasattr(self, prob_func):
|
912
883
|
output_cols_prefix: str = f"{prob_func}_"
|