snowflake-ml-python 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +2 -1
- snowflake/ml/_internal/file_utils.py +29 -7
- snowflake/ml/_internal/telemetry.py +5 -8
- snowflake/ml/_internal/utils/uri.py +7 -2
- snowflake/ml/model/_deploy_client/image_builds/base_image_builder.py +15 -0
- snowflake/ml/model/_deploy_client/image_builds/client_image_builder.py +259 -0
- snowflake/ml/model/_deploy_client/image_builds/docker_context.py +89 -0
- snowflake/ml/model/_deploy_client/image_builds/gunicorn_run.sh +24 -0
- snowflake/ml/model/_deploy_client/image_builds/inference_server/main.py +118 -0
- snowflake/ml/model/_deploy_client/image_builds/templates/dockerfile_template +40 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +199 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy_options.py +88 -0
- snowflake/ml/model/_deploy_client/snowservice/templates/service_spec_template +24 -0
- snowflake/ml/model/_deploy_client/utils/constants.py +47 -0
- snowflake/ml/model/_deploy_client/utils/snowservice_client.py +178 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +24 -6
- snowflake/ml/model/_deploy_client/warehouse/infer_template.py +5 -2
- snowflake/ml/model/_deployer.py +14 -27
- snowflake/ml/model/_env.py +4 -4
- snowflake/ml/model/_handlers/custom.py +14 -2
- snowflake/ml/model/_handlers/pytorch.py +186 -0
- snowflake/ml/model/_handlers/sklearn.py +14 -9
- snowflake/ml/model/_handlers/snowmlmodel.py +14 -9
- snowflake/ml/model/_handlers/torchscript.py +180 -0
- snowflake/ml/model/_handlers/xgboost.py +19 -9
- snowflake/ml/model/_model.py +3 -2
- snowflake/ml/model/_model_meta.py +12 -7
- snowflake/ml/model/model_signature.py +446 -66
- snowflake/ml/model/type_hints.py +23 -4
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +51 -26
- snowflake/ml/modeling/cluster/affinity_propagation.py +51 -26
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +51 -26
- snowflake/ml/modeling/cluster/birch.py +51 -26
- snowflake/ml/modeling/cluster/bisecting_k_means.py +51 -26
- snowflake/ml/modeling/cluster/dbscan.py +51 -26
- snowflake/ml/modeling/cluster/feature_agglomeration.py +51 -26
- snowflake/ml/modeling/cluster/k_means.py +51 -26
- snowflake/ml/modeling/cluster/mean_shift.py +51 -26
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +51 -26
- snowflake/ml/modeling/cluster/optics.py +51 -26
- snowflake/ml/modeling/cluster/spectral_biclustering.py +51 -26
- snowflake/ml/modeling/cluster/spectral_clustering.py +51 -26
- snowflake/ml/modeling/cluster/spectral_coclustering.py +51 -26
- snowflake/ml/modeling/compose/column_transformer.py +51 -26
- snowflake/ml/modeling/compose/transformed_target_regressor.py +51 -26
- snowflake/ml/modeling/covariance/elliptic_envelope.py +51 -26
- snowflake/ml/modeling/covariance/empirical_covariance.py +51 -26
- snowflake/ml/modeling/covariance/graphical_lasso.py +51 -26
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +51 -26
- snowflake/ml/modeling/covariance/ledoit_wolf.py +51 -26
- snowflake/ml/modeling/covariance/min_cov_det.py +51 -26
- snowflake/ml/modeling/covariance/oas.py +51 -26
- snowflake/ml/modeling/covariance/shrunk_covariance.py +51 -26
- snowflake/ml/modeling/decomposition/dictionary_learning.py +51 -26
- snowflake/ml/modeling/decomposition/factor_analysis.py +51 -26
- snowflake/ml/modeling/decomposition/fast_ica.py +51 -26
- snowflake/ml/modeling/decomposition/incremental_pca.py +51 -26
- snowflake/ml/modeling/decomposition/kernel_pca.py +51 -26
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +51 -26
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +51 -26
- snowflake/ml/modeling/decomposition/pca.py +51 -26
- snowflake/ml/modeling/decomposition/sparse_pca.py +51 -26
- snowflake/ml/modeling/decomposition/truncated_svd.py +51 -26
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +51 -26
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +51 -26
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +51 -26
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +51 -26
- snowflake/ml/modeling/ensemble/bagging_classifier.py +51 -26
- snowflake/ml/modeling/ensemble/bagging_regressor.py +51 -26
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +51 -26
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +51 -26
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +51 -26
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +51 -26
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +51 -26
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +51 -26
- snowflake/ml/modeling/ensemble/isolation_forest.py +51 -26
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +51 -26
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +51 -26
- snowflake/ml/modeling/ensemble/stacking_regressor.py +51 -26
- snowflake/ml/modeling/ensemble/voting_classifier.py +51 -26
- snowflake/ml/modeling/ensemble/voting_regressor.py +51 -26
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +51 -26
- snowflake/ml/modeling/feature_selection/select_fdr.py +51 -26
- snowflake/ml/modeling/feature_selection/select_fpr.py +51 -26
- snowflake/ml/modeling/feature_selection/select_fwe.py +51 -26
- snowflake/ml/modeling/feature_selection/select_k_best.py +51 -26
- snowflake/ml/modeling/feature_selection/select_percentile.py +51 -26
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +51 -26
- snowflake/ml/modeling/feature_selection/variance_threshold.py +51 -26
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +51 -26
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +51 -26
- snowflake/ml/modeling/impute/iterative_imputer.py +51 -26
- snowflake/ml/modeling/impute/knn_imputer.py +51 -26
- snowflake/ml/modeling/impute/missing_indicator.py +51 -26
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +51 -26
- snowflake/ml/modeling/kernel_approximation/nystroem.py +51 -26
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +51 -26
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +51 -26
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +51 -26
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +51 -26
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +51 -26
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/ard_regression.py +51 -26
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +51 -26
- snowflake/ml/modeling/linear_model/elastic_net.py +51 -26
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +51 -26
- snowflake/ml/modeling/linear_model/gamma_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/huber_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/lars.py +51 -26
- snowflake/ml/modeling/linear_model/lars_cv.py +51 -26
- snowflake/ml/modeling/linear_model/lasso.py +51 -26
- snowflake/ml/modeling/linear_model/lasso_cv.py +51 -26
- snowflake/ml/modeling/linear_model/lasso_lars.py +51 -26
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +51 -26
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +51 -26
- snowflake/ml/modeling/linear_model/linear_regression.py +51 -26
- snowflake/ml/modeling/linear_model/logistic_regression.py +51 -26
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +51 -26
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +51 -26
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +51 -26
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +51 -26
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +51 -26
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +51 -26
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +51 -26
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/perceptron.py +51 -26
- snowflake/ml/modeling/linear_model/poisson_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/ransac_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/ridge.py +51 -26
- snowflake/ml/modeling/linear_model/ridge_classifier.py +51 -26
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +51 -26
- snowflake/ml/modeling/linear_model/ridge_cv.py +51 -26
- snowflake/ml/modeling/linear_model/sgd_classifier.py +51 -26
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +51 -26
- snowflake/ml/modeling/linear_model/sgd_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +51 -26
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +51 -26
- snowflake/ml/modeling/manifold/isomap.py +51 -26
- snowflake/ml/modeling/manifold/mds.py +51 -26
- snowflake/ml/modeling/manifold/spectral_embedding.py +51 -26
- snowflake/ml/modeling/manifold/tsne.py +51 -26
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +51 -26
- snowflake/ml/modeling/mixture/gaussian_mixture.py +51 -26
- snowflake/ml/modeling/model_selection/grid_search_cv.py +51 -26
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +51 -26
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +51 -26
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +51 -26
- snowflake/ml/modeling/multiclass/output_code_classifier.py +51 -26
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +51 -26
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +51 -26
- snowflake/ml/modeling/naive_bayes/complement_nb.py +51 -26
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +51 -26
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +51 -26
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +51 -26
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +51 -26
- snowflake/ml/modeling/neighbors/kernel_density.py +51 -26
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +51 -26
- snowflake/ml/modeling/neighbors/nearest_centroid.py +51 -26
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +51 -26
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +51 -26
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +51 -26
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +51 -26
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +51 -26
- snowflake/ml/modeling/neural_network/mlp_classifier.py +51 -26
- snowflake/ml/modeling/neural_network/mlp_regressor.py +51 -26
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +2 -0
- snowflake/ml/modeling/preprocessing/polynomial_features.py +51 -26
- snowflake/ml/modeling/semi_supervised/label_propagation.py +51 -26
- snowflake/ml/modeling/semi_supervised/label_spreading.py +51 -26
- snowflake/ml/modeling/svm/linear_svc.py +51 -26
- snowflake/ml/modeling/svm/linear_svr.py +51 -26
- snowflake/ml/modeling/svm/nu_svc.py +51 -26
- snowflake/ml/modeling/svm/nu_svr.py +51 -26
- snowflake/ml/modeling/svm/svc.py +51 -26
- snowflake/ml/modeling/svm/svr.py +51 -26
- snowflake/ml/modeling/tree/decision_tree_classifier.py +51 -26
- snowflake/ml/modeling/tree/decision_tree_regressor.py +51 -26
- snowflake/ml/modeling/tree/extra_tree_classifier.py +51 -26
- snowflake/ml/modeling/tree/extra_tree_regressor.py +51 -26
- snowflake/ml/modeling/xgboost/xgb_classifier.py +51 -26
- snowflake/ml/modeling/xgboost/xgb_regressor.py +51 -26
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +51 -26
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +51 -26
- snowflake/ml/registry/model_registry.py +74 -56
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.0.2.dist-info → snowflake_ml_python-1.0.3.dist-info}/METADATA +27 -8
- snowflake_ml_python-1.0.3.dist-info/RECORD +259 -0
- snowflake_ml_python-1.0.2.dist-info/RECORD +0 -246
- {snowflake_ml_python-1.0.2.dist-info → snowflake_ml_python-1.0.3.dist-info}/WHEEL +0 -0
@@ -7,6 +7,7 @@
|
|
7
7
|
#
|
8
8
|
import inspect
|
9
9
|
import os
|
10
|
+
import posixpath
|
10
11
|
from typing import Iterable, Optional, Union, List, Any, Dict, Callable, Set
|
11
12
|
from uuid import uuid4
|
12
13
|
|
@@ -27,6 +28,7 @@ from snowflake.ml._internal.utils.temp_file_utils import cleanup_temp_files, get
|
|
27
28
|
from snowflake.snowpark import DataFrame, Session
|
28
29
|
from snowflake.snowpark.functions import pandas_udf, sproc
|
29
30
|
from snowflake.snowpark.types import PandasSeries
|
31
|
+
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
30
32
|
|
31
33
|
from snowflake.ml.model.model_signature import (
|
32
34
|
DataType,
|
@@ -264,7 +266,6 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
264
266
|
sample_weight_col: Optional[str] = None,
|
265
267
|
) -> None:
|
266
268
|
super().__init__()
|
267
|
-
self.id = str(uuid4()).replace("-", "_").upper()
|
268
269
|
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
269
270
|
|
270
271
|
self._deps = list(deps)
|
@@ -291,6 +292,15 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
291
292
|
self.set_drop_input_cols(drop_input_cols)
|
292
293
|
self.set_sample_weight_col(sample_weight_col)
|
293
294
|
|
295
|
+
def _get_rand_id(self) -> str:
|
296
|
+
"""
|
297
|
+
Generate random id to be used in sproc and stage names.
|
298
|
+
|
299
|
+
Returns:
|
300
|
+
Random id string usable in sproc, table, and stage names.
|
301
|
+
"""
|
302
|
+
return str(uuid4()).replace("-", "_").upper()
|
303
|
+
|
294
304
|
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
295
305
|
"""
|
296
306
|
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
@@ -369,7 +379,7 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
369
379
|
cp.dump(self._sklearn_object, local_transform_file)
|
370
380
|
|
371
381
|
# Create temp stage to run fit.
|
372
|
-
transform_stage_name = "SNOWML_TRANSFORM_{safe_id}".format(safe_id=self.
|
382
|
+
transform_stage_name = "SNOWML_TRANSFORM_{safe_id}".format(safe_id=self._get_rand_id())
|
373
383
|
stage_creation_query = f"CREATE OR REPLACE TEMPORARY STAGE {transform_stage_name};"
|
374
384
|
SqlResultValidator(
|
375
385
|
session=session,
|
@@ -382,11 +392,12 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
382
392
|
expected_value=f"Stage area {transform_stage_name} successfully created."
|
383
393
|
).validate()
|
384
394
|
|
385
|
-
|
395
|
+
# Use posixpath to construct stage paths
|
396
|
+
stage_transform_file_name = posixpath.join(transform_stage_name, os.path.basename(local_transform_file_name))
|
397
|
+
stage_result_file_name = posixpath.join(transform_stage_name, os.path.basename(local_transform_file_name))
|
386
398
|
local_result_file_name = get_temp_file_path()
|
387
|
-
stage_result_file_name = os.path.join(transform_stage_name, os.path.basename(local_transform_file_name))
|
388
399
|
|
389
|
-
fit_sproc_name = "SNOWML_FIT_{safe_id}".format(safe_id=self.
|
400
|
+
fit_sproc_name = "SNOWML_FIT_{safe_id}".format(safe_id=self._get_rand_id())
|
390
401
|
statement_params = telemetry.get_function_usage_statement_params(
|
391
402
|
project=_PROJECT,
|
392
403
|
subproject=_SUBPROJECT,
|
@@ -412,6 +423,7 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
412
423
|
replace=True,
|
413
424
|
session=session,
|
414
425
|
statement_params=statement_params,
|
426
|
+
anonymous=True
|
415
427
|
)
|
416
428
|
def fit_wrapper_sproc(
|
417
429
|
session: Session,
|
@@ -420,7 +432,8 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
420
432
|
stage_result_file_name: str,
|
421
433
|
input_cols: List[str],
|
422
434
|
label_cols: List[str],
|
423
|
-
sample_weight_col: Optional[str]
|
435
|
+
sample_weight_col: Optional[str],
|
436
|
+
statement_params: Dict[str, str]
|
424
437
|
) -> str:
|
425
438
|
import cloudpickle as cp
|
426
439
|
import numpy as np
|
@@ -487,15 +500,15 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
487
500
|
api_calls=[Session.call],
|
488
501
|
custom_tags=dict([("autogen", True)]),
|
489
502
|
)
|
490
|
-
sproc_export_file_name =
|
491
|
-
|
503
|
+
sproc_export_file_name = fit_wrapper_sproc(
|
504
|
+
session,
|
492
505
|
query,
|
493
506
|
stage_transform_file_name,
|
494
507
|
stage_result_file_name,
|
495
508
|
identifier.get_unescaped_names(self.input_cols),
|
496
509
|
identifier.get_unescaped_names(self.label_cols),
|
497
510
|
identifier.get_unescaped_names(self.sample_weight_col),
|
498
|
-
statement_params
|
511
|
+
statement_params,
|
499
512
|
)
|
500
513
|
|
501
514
|
if "|" in sproc_export_file_name:
|
@@ -505,7 +518,7 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
505
518
|
print("\n".join(fields[1:]))
|
506
519
|
|
507
520
|
session.file.get(
|
508
|
-
|
521
|
+
posixpath.join(stage_result_file_name, sproc_export_file_name),
|
509
522
|
local_result_file_name,
|
510
523
|
statement_params=statement_params
|
511
524
|
)
|
@@ -551,7 +564,7 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
551
564
|
|
552
565
|
# Register vectorized UDF for batch inference
|
553
566
|
batch_inference_udf_name = "SNOWML_BATCH_INFERENCE_{safe_id}_{method}".format(
|
554
|
-
safe_id=self.
|
567
|
+
safe_id=self._get_rand_id(), method=inference_method)
|
555
568
|
|
556
569
|
# Need to do this since if we use self._sklearn_object directly in the UDF, Snowpark
|
557
570
|
# will try to pickle all of self which fails.
|
@@ -643,7 +656,7 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
643
656
|
return transformed_pandas_df.to_dict("records")
|
644
657
|
|
645
658
|
batch_inference_table_name = "SNOWML_BATCH_INFERENCE_INPUT_TABLE_{safe_id}".format(
|
646
|
-
safe_id=self.
|
659
|
+
safe_id=self._get_rand_id()
|
647
660
|
)
|
648
661
|
|
649
662
|
pass_through_columns = self._get_pass_through_columns(dataset)
|
@@ -808,11 +821,18 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
808
821
|
Transformed dataset.
|
809
822
|
"""
|
810
823
|
if isinstance(dataset, DataFrame):
|
824
|
+
expected_type_inferred = ""
|
825
|
+
# when it is classifier, infer the datatype from label columns
|
826
|
+
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
827
|
+
expected_type_inferred = convert_sp_to_sf_type(
|
828
|
+
self.model_signatures['predict'].outputs[0].as_snowpark_type()
|
829
|
+
)
|
830
|
+
|
811
831
|
output_df = self._batch_inference(
|
812
832
|
dataset=dataset,
|
813
833
|
inference_method="predict",
|
814
834
|
expected_output_cols_list=self.output_cols,
|
815
|
-
expected_output_cols_type=
|
835
|
+
expected_output_cols_type=expected_type_inferred,
|
816
836
|
)
|
817
837
|
elif isinstance(dataset, pd.DataFrame):
|
818
838
|
output_df = self._sklearn_inference(
|
@@ -885,10 +905,10 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
885
905
|
|
886
906
|
def _get_output_column_names(self, output_cols_prefix: str) -> List[str]:
|
887
907
|
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
888
|
-
Returns
|
908
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
889
909
|
"""
|
890
910
|
if getattr(self._sklearn_object, "classes_", None) is None:
|
891
|
-
return []
|
911
|
+
return [output_cols_prefix]
|
892
912
|
|
893
913
|
classes = self._sklearn_object.classes_
|
894
914
|
if isinstance(classes, numpy.ndarray):
|
@@ -1113,7 +1133,7 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
1113
1133
|
cp.dump(self._sklearn_object, local_score_file)
|
1114
1134
|
|
1115
1135
|
# Create temp stage to run score.
|
1116
|
-
score_stage_name = "SNOWML_SCORE_{safe_id}".format(safe_id=self.
|
1136
|
+
score_stage_name = "SNOWML_SCORE_{safe_id}".format(safe_id=self._get_rand_id())
|
1117
1137
|
session = dataset._session
|
1118
1138
|
stage_creation_query = f"CREATE OR REPLACE TEMPORARY STAGE {score_stage_name};"
|
1119
1139
|
SqlResultValidator(
|
@@ -1127,8 +1147,9 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
1127
1147
|
expected_value=f"Stage area {score_stage_name} successfully created."
|
1128
1148
|
).validate()
|
1129
1149
|
|
1130
|
-
|
1131
|
-
|
1150
|
+
# Use posixpath to construct stage paths
|
1151
|
+
stage_score_file_name = posixpath.join(score_stage_name, os.path.basename(local_score_file_name))
|
1152
|
+
score_sproc_name = "SNOWML_SCORE_{safe_id}".format(safe_id=self._get_rand_id())
|
1132
1153
|
statement_params = telemetry.get_function_usage_statement_params(
|
1133
1154
|
project=_PROJECT,
|
1134
1155
|
subproject=_SUBPROJECT,
|
@@ -1154,6 +1175,7 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
1154
1175
|
replace=True,
|
1155
1176
|
session=session,
|
1156
1177
|
statement_params=statement_params,
|
1178
|
+
anonymous=True
|
1157
1179
|
)
|
1158
1180
|
def score_wrapper_sproc(
|
1159
1181
|
session: Session,
|
@@ -1161,7 +1183,8 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
1161
1183
|
stage_score_file_name: str,
|
1162
1184
|
input_cols: List[str],
|
1163
1185
|
label_cols: List[str],
|
1164
|
-
sample_weight_col: Optional[str]
|
1186
|
+
sample_weight_col: Optional[str],
|
1187
|
+
statement_params: Dict[str, str]
|
1165
1188
|
) -> float:
|
1166
1189
|
import cloudpickle as cp
|
1167
1190
|
import numpy as np
|
@@ -1211,14 +1234,14 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
1211
1234
|
api_calls=[Session.call],
|
1212
1235
|
custom_tags=dict([("autogen", True)]),
|
1213
1236
|
)
|
1214
|
-
score =
|
1215
|
-
|
1237
|
+
score = score_wrapper_sproc(
|
1238
|
+
session,
|
1216
1239
|
query,
|
1217
1240
|
stage_score_file_name,
|
1218
1241
|
identifier.get_unescaped_names(self.input_cols),
|
1219
1242
|
identifier.get_unescaped_names(self.label_cols),
|
1220
1243
|
identifier.get_unescaped_names(self.sample_weight_col),
|
1221
|
-
statement_params
|
1244
|
+
statement_params,
|
1222
1245
|
)
|
1223
1246
|
|
1224
1247
|
cleanup_temp_files([local_score_file_name])
|
@@ -1236,18 +1259,20 @@ class NeighborhoodComponentsAnalysis(BaseTransformer):
|
|
1236
1259
|
if self._sklearn_object._estimator_type == 'classifier':
|
1237
1260
|
outputs = _infer_signature(dataset[self.label_cols], "output") # label columns is the desired type for output
|
1238
1261
|
outputs = _rename_features(outputs, self.output_cols) # rename the output columns
|
1239
|
-
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1262
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1263
|
+
([] if self._drop_input_cols else inputs) + outputs)
|
1240
1264
|
# For regressor, the type of predict is float64
|
1241
1265
|
elif self._sklearn_object._estimator_type == 'regressor':
|
1242
1266
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1243
|
-
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1244
|
-
|
1267
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1268
|
+
([] if self._drop_input_cols else inputs) + outputs)
|
1245
1269
|
for prob_func in PROB_FUNCTIONS:
|
1246
1270
|
if hasattr(self, prob_func):
|
1247
1271
|
output_cols_prefix: str = f"{prob_func}_"
|
1248
1272
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1249
1273
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1250
|
-
self._model_signature_dict[prob_func] = ModelSignature(inputs,
|
1274
|
+
self._model_signature_dict[prob_func] = ModelSignature(inputs,
|
1275
|
+
([] if self._drop_input_cols else inputs) + outputs)
|
1251
1276
|
|
1252
1277
|
@property
|
1253
1278
|
def model_signatures(self) -> Dict[str, ModelSignature]:
|
@@ -7,6 +7,7 @@
|
|
7
7
|
#
|
8
8
|
import inspect
|
9
9
|
import os
|
10
|
+
import posixpath
|
10
11
|
from typing import Iterable, Optional, Union, List, Any, Dict, Callable, Set
|
11
12
|
from uuid import uuid4
|
12
13
|
|
@@ -27,6 +28,7 @@ from snowflake.ml._internal.utils.temp_file_utils import cleanup_temp_files, get
|
|
27
28
|
from snowflake.snowpark import DataFrame, Session
|
28
29
|
from snowflake.snowpark.functions import pandas_udf, sproc
|
29
30
|
from snowflake.snowpark.types import PandasSeries
|
31
|
+
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
30
32
|
|
31
33
|
from snowflake.ml.model.model_signature import (
|
32
34
|
DataType,
|
@@ -264,7 +266,6 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
264
266
|
sample_weight_col: Optional[str] = None,
|
265
267
|
) -> None:
|
266
268
|
super().__init__()
|
267
|
-
self.id = str(uuid4()).replace("-", "_").upper()
|
268
269
|
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
269
270
|
|
270
271
|
self._deps = list(deps)
|
@@ -292,6 +293,15 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
292
293
|
self.set_drop_input_cols(drop_input_cols)
|
293
294
|
self.set_sample_weight_col(sample_weight_col)
|
294
295
|
|
296
|
+
def _get_rand_id(self) -> str:
|
297
|
+
"""
|
298
|
+
Generate random id to be used in sproc and stage names.
|
299
|
+
|
300
|
+
Returns:
|
301
|
+
Random id string usable in sproc, table, and stage names.
|
302
|
+
"""
|
303
|
+
return str(uuid4()).replace("-", "_").upper()
|
304
|
+
|
295
305
|
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
296
306
|
"""
|
297
307
|
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
@@ -370,7 +380,7 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
370
380
|
cp.dump(self._sklearn_object, local_transform_file)
|
371
381
|
|
372
382
|
# Create temp stage to run fit.
|
373
|
-
transform_stage_name = "SNOWML_TRANSFORM_{safe_id}".format(safe_id=self.
|
383
|
+
transform_stage_name = "SNOWML_TRANSFORM_{safe_id}".format(safe_id=self._get_rand_id())
|
374
384
|
stage_creation_query = f"CREATE OR REPLACE TEMPORARY STAGE {transform_stage_name};"
|
375
385
|
SqlResultValidator(
|
376
386
|
session=session,
|
@@ -383,11 +393,12 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
383
393
|
expected_value=f"Stage area {transform_stage_name} successfully created."
|
384
394
|
).validate()
|
385
395
|
|
386
|
-
|
396
|
+
# Use posixpath to construct stage paths
|
397
|
+
stage_transform_file_name = posixpath.join(transform_stage_name, os.path.basename(local_transform_file_name))
|
398
|
+
stage_result_file_name = posixpath.join(transform_stage_name, os.path.basename(local_transform_file_name))
|
387
399
|
local_result_file_name = get_temp_file_path()
|
388
|
-
stage_result_file_name = os.path.join(transform_stage_name, os.path.basename(local_transform_file_name))
|
389
400
|
|
390
|
-
fit_sproc_name = "SNOWML_FIT_{safe_id}".format(safe_id=self.
|
401
|
+
fit_sproc_name = "SNOWML_FIT_{safe_id}".format(safe_id=self._get_rand_id())
|
391
402
|
statement_params = telemetry.get_function_usage_statement_params(
|
392
403
|
project=_PROJECT,
|
393
404
|
subproject=_SUBPROJECT,
|
@@ -413,6 +424,7 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
413
424
|
replace=True,
|
414
425
|
session=session,
|
415
426
|
statement_params=statement_params,
|
427
|
+
anonymous=True
|
416
428
|
)
|
417
429
|
def fit_wrapper_sproc(
|
418
430
|
session: Session,
|
@@ -421,7 +433,8 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
421
433
|
stage_result_file_name: str,
|
422
434
|
input_cols: List[str],
|
423
435
|
label_cols: List[str],
|
424
|
-
sample_weight_col: Optional[str]
|
436
|
+
sample_weight_col: Optional[str],
|
437
|
+
statement_params: Dict[str, str]
|
425
438
|
) -> str:
|
426
439
|
import cloudpickle as cp
|
427
440
|
import numpy as np
|
@@ -488,15 +501,15 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
488
501
|
api_calls=[Session.call],
|
489
502
|
custom_tags=dict([("autogen", True)]),
|
490
503
|
)
|
491
|
-
sproc_export_file_name =
|
492
|
-
|
504
|
+
sproc_export_file_name = fit_wrapper_sproc(
|
505
|
+
session,
|
493
506
|
query,
|
494
507
|
stage_transform_file_name,
|
495
508
|
stage_result_file_name,
|
496
509
|
identifier.get_unescaped_names(self.input_cols),
|
497
510
|
identifier.get_unescaped_names(self.label_cols),
|
498
511
|
identifier.get_unescaped_names(self.sample_weight_col),
|
499
|
-
statement_params
|
512
|
+
statement_params,
|
500
513
|
)
|
501
514
|
|
502
515
|
if "|" in sproc_export_file_name:
|
@@ -506,7 +519,7 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
506
519
|
print("\n".join(fields[1:]))
|
507
520
|
|
508
521
|
session.file.get(
|
509
|
-
|
522
|
+
posixpath.join(stage_result_file_name, sproc_export_file_name),
|
510
523
|
local_result_file_name,
|
511
524
|
statement_params=statement_params
|
512
525
|
)
|
@@ -552,7 +565,7 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
552
565
|
|
553
566
|
# Register vectorized UDF for batch inference
|
554
567
|
batch_inference_udf_name = "SNOWML_BATCH_INFERENCE_{safe_id}_{method}".format(
|
555
|
-
safe_id=self.
|
568
|
+
safe_id=self._get_rand_id(), method=inference_method)
|
556
569
|
|
557
570
|
# Need to do this since if we use self._sklearn_object directly in the UDF, Snowpark
|
558
571
|
# will try to pickle all of self which fails.
|
@@ -644,7 +657,7 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
644
657
|
return transformed_pandas_df.to_dict("records")
|
645
658
|
|
646
659
|
batch_inference_table_name = "SNOWML_BATCH_INFERENCE_INPUT_TABLE_{safe_id}".format(
|
647
|
-
safe_id=self.
|
660
|
+
safe_id=self._get_rand_id()
|
648
661
|
)
|
649
662
|
|
650
663
|
pass_through_columns = self._get_pass_through_columns(dataset)
|
@@ -811,11 +824,18 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
811
824
|
Transformed dataset.
|
812
825
|
"""
|
813
826
|
if isinstance(dataset, DataFrame):
|
827
|
+
expected_type_inferred = ""
|
828
|
+
# when it is classifier, infer the datatype from label columns
|
829
|
+
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
830
|
+
expected_type_inferred = convert_sp_to_sf_type(
|
831
|
+
self.model_signatures['predict'].outputs[0].as_snowpark_type()
|
832
|
+
)
|
833
|
+
|
814
834
|
output_df = self._batch_inference(
|
815
835
|
dataset=dataset,
|
816
836
|
inference_method="predict",
|
817
837
|
expected_output_cols_list=self.output_cols,
|
818
|
-
expected_output_cols_type=
|
838
|
+
expected_output_cols_type=expected_type_inferred,
|
819
839
|
)
|
820
840
|
elif isinstance(dataset, pd.DataFrame):
|
821
841
|
output_df = self._sklearn_inference(
|
@@ -886,10 +906,10 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
886
906
|
|
887
907
|
def _get_output_column_names(self, output_cols_prefix: str) -> List[str]:
|
888
908
|
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
889
|
-
Returns
|
909
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
890
910
|
"""
|
891
911
|
if getattr(self._sklearn_object, "classes_", None) is None:
|
892
|
-
return []
|
912
|
+
return [output_cols_prefix]
|
893
913
|
|
894
914
|
classes = self._sklearn_object.classes_
|
895
915
|
if isinstance(classes, numpy.ndarray):
|
@@ -1118,7 +1138,7 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
1118
1138
|
cp.dump(self._sklearn_object, local_score_file)
|
1119
1139
|
|
1120
1140
|
# Create temp stage to run score.
|
1121
|
-
score_stage_name = "SNOWML_SCORE_{safe_id}".format(safe_id=self.
|
1141
|
+
score_stage_name = "SNOWML_SCORE_{safe_id}".format(safe_id=self._get_rand_id())
|
1122
1142
|
session = dataset._session
|
1123
1143
|
stage_creation_query = f"CREATE OR REPLACE TEMPORARY STAGE {score_stage_name};"
|
1124
1144
|
SqlResultValidator(
|
@@ -1132,8 +1152,9 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
1132
1152
|
expected_value=f"Stage area {score_stage_name} successfully created."
|
1133
1153
|
).validate()
|
1134
1154
|
|
1135
|
-
|
1136
|
-
|
1155
|
+
# Use posixpath to construct stage paths
|
1156
|
+
stage_score_file_name = posixpath.join(score_stage_name, os.path.basename(local_score_file_name))
|
1157
|
+
score_sproc_name = "SNOWML_SCORE_{safe_id}".format(safe_id=self._get_rand_id())
|
1137
1158
|
statement_params = telemetry.get_function_usage_statement_params(
|
1138
1159
|
project=_PROJECT,
|
1139
1160
|
subproject=_SUBPROJECT,
|
@@ -1159,6 +1180,7 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
1159
1180
|
replace=True,
|
1160
1181
|
session=session,
|
1161
1182
|
statement_params=statement_params,
|
1183
|
+
anonymous=True
|
1162
1184
|
)
|
1163
1185
|
def score_wrapper_sproc(
|
1164
1186
|
session: Session,
|
@@ -1166,7 +1188,8 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
1166
1188
|
stage_score_file_name: str,
|
1167
1189
|
input_cols: List[str],
|
1168
1190
|
label_cols: List[str],
|
1169
|
-
sample_weight_col: Optional[str]
|
1191
|
+
sample_weight_col: Optional[str],
|
1192
|
+
statement_params: Dict[str, str]
|
1170
1193
|
) -> float:
|
1171
1194
|
import cloudpickle as cp
|
1172
1195
|
import numpy as np
|
@@ -1216,14 +1239,14 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
1216
1239
|
api_calls=[Session.call],
|
1217
1240
|
custom_tags=dict([("autogen", True)]),
|
1218
1241
|
)
|
1219
|
-
score =
|
1220
|
-
|
1242
|
+
score = score_wrapper_sproc(
|
1243
|
+
session,
|
1221
1244
|
query,
|
1222
1245
|
stage_score_file_name,
|
1223
1246
|
identifier.get_unescaped_names(self.input_cols),
|
1224
1247
|
identifier.get_unescaped_names(self.label_cols),
|
1225
1248
|
identifier.get_unescaped_names(self.sample_weight_col),
|
1226
|
-
statement_params
|
1249
|
+
statement_params,
|
1227
1250
|
)
|
1228
1251
|
|
1229
1252
|
cleanup_temp_files([local_score_file_name])
|
@@ -1241,18 +1264,20 @@ class RadiusNeighborsClassifier(BaseTransformer):
|
|
1241
1264
|
if self._sklearn_object._estimator_type == 'classifier':
|
1242
1265
|
outputs = _infer_signature(dataset[self.label_cols], "output") # label columns is the desired type for output
|
1243
1266
|
outputs = _rename_features(outputs, self.output_cols) # rename the output columns
|
1244
|
-
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1267
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1268
|
+
([] if self._drop_input_cols else inputs) + outputs)
|
1245
1269
|
# For regressor, the type of predict is float64
|
1246
1270
|
elif self._sklearn_object._estimator_type == 'regressor':
|
1247
1271
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1248
|
-
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1249
|
-
|
1272
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
1273
|
+
([] if self._drop_input_cols else inputs) + outputs)
|
1250
1274
|
for prob_func in PROB_FUNCTIONS:
|
1251
1275
|
if hasattr(self, prob_func):
|
1252
1276
|
output_cols_prefix: str = f"{prob_func}_"
|
1253
1277
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1254
1278
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1255
|
-
self._model_signature_dict[prob_func] = ModelSignature(inputs,
|
1279
|
+
self._model_signature_dict[prob_func] = ModelSignature(inputs,
|
1280
|
+
([] if self._drop_input_cols else inputs) + outputs)
|
1256
1281
|
|
1257
1282
|
@property
|
1258
1283
|
def model_signatures(self) -> Dict[str, ModelSignature]:
|