snowflake-ml-python 1.4.0__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/ml/_internal/env_utils.py +11 -1
- snowflake/ml/_internal/utils/identifier.py +3 -1
- snowflake/ml/_internal/utils/sql_identifier.py +2 -6
- snowflake/ml/feature_store/feature_store.py +151 -78
- snowflake/ml/feature_store/feature_view.py +12 -24
- snowflake/ml/fileset/sfcfs.py +56 -50
- snowflake/ml/fileset/stage_fs.py +48 -13
- snowflake/ml/model/_client/model/model_version_impl.py +2 -50
- snowflake/ml/model/_client/ops/model_ops.py +78 -29
- snowflake/ml/model/_client/sql/model.py +23 -2
- snowflake/ml/model/_client/sql/model_version.py +22 -1
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +19 -54
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +8 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +6 -10
- snowflake/ml/model/_packager/model_handlers/catboost.py +206 -0
- snowflake/ml/model/_packager/model_handlers/lightgbm.py +218 -0
- snowflake/ml/model/_packager/model_handlers/sklearn.py +3 -0
- snowflake/ml/model/_packager/model_meta/_core_requirements.py +1 -1
- snowflake/ml/model/_packager/model_meta/model_meta.py +36 -6
- snowflake/ml/model/_packager/model_meta/model_meta_schema.py +20 -1
- snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py +3 -1
- snowflake/ml/model/_packager/model_packager.py +2 -2
- snowflake/ml/model/{_model_composer/model_runtime/_runtime_requirements.py → _packager/model_runtime/_snowml_inference_alternative_requirements.py} +1 -1
- snowflake/ml/model/_packager/model_runtime/model_runtime.py +137 -0
- snowflake/ml/model/type_hints.py +21 -2
- snowflake/ml/modeling/_internal/estimator_utils.py +16 -11
- snowflake/ml/modeling/_internal/local_implementations/pandas_handlers.py +4 -1
- snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +13 -14
- snowflake/ml/modeling/_internal/snowpark_implementations/snowpark_handlers.py +5 -5
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +195 -123
- snowflake/ml/modeling/cluster/affinity_propagation.py +195 -123
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +195 -123
- snowflake/ml/modeling/cluster/birch.py +195 -123
- snowflake/ml/modeling/cluster/bisecting_k_means.py +195 -123
- snowflake/ml/modeling/cluster/dbscan.py +195 -123
- snowflake/ml/modeling/cluster/feature_agglomeration.py +195 -123
- snowflake/ml/modeling/cluster/k_means.py +195 -123
- snowflake/ml/modeling/cluster/mean_shift.py +195 -123
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +195 -123
- snowflake/ml/modeling/cluster/optics.py +195 -123
- snowflake/ml/modeling/cluster/spectral_biclustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_clustering.py +195 -123
- snowflake/ml/modeling/cluster/spectral_coclustering.py +195 -123
- snowflake/ml/modeling/compose/column_transformer.py +195 -123
- snowflake/ml/modeling/compose/transformed_target_regressor.py +195 -123
- snowflake/ml/modeling/covariance/elliptic_envelope.py +195 -123
- snowflake/ml/modeling/covariance/empirical_covariance.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso.py +195 -123
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +195 -123
- snowflake/ml/modeling/covariance/ledoit_wolf.py +195 -123
- snowflake/ml/modeling/covariance/min_cov_det.py +195 -123
- snowflake/ml/modeling/covariance/oas.py +195 -123
- snowflake/ml/modeling/covariance/shrunk_covariance.py +195 -123
- snowflake/ml/modeling/decomposition/dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/factor_analysis.py +195 -123
- snowflake/ml/modeling/decomposition/fast_ica.py +195 -123
- snowflake/ml/modeling/decomposition/incremental_pca.py +195 -123
- snowflake/ml/modeling/decomposition/kernel_pca.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +195 -123
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/pca.py +195 -123
- snowflake/ml/modeling/decomposition/sparse_pca.py +195 -123
- snowflake/ml/modeling/decomposition/truncated_svd.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/bagging_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/isolation_forest.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/stacking_regressor.py +195 -123
- snowflake/ml/modeling/ensemble/voting_classifier.py +195 -123
- snowflake/ml/modeling/ensemble/voting_regressor.py +195 -123
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fdr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fpr.py +195 -123
- snowflake/ml/modeling/feature_selection/select_fwe.py +195 -123
- snowflake/ml/modeling/feature_selection/select_k_best.py +195 -123
- snowflake/ml/modeling/feature_selection/select_percentile.py +195 -123
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +195 -123
- snowflake/ml/modeling/feature_selection/variance_threshold.py +195 -123
- snowflake/ml/modeling/framework/_utils.py +8 -1
- snowflake/ml/modeling/framework/base.py +9 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +195 -123
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +195 -123
- snowflake/ml/modeling/impute/iterative_imputer.py +195 -123
- snowflake/ml/modeling/impute/knn_imputer.py +195 -123
- snowflake/ml/modeling/impute/missing_indicator.py +195 -123
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/nystroem.py +195 -123
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +195 -123
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +195 -123
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +195 -123
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +195 -123
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ard_regression.py +195 -123
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/gamma_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/huber_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/lars.py +195 -123
- snowflake/ml/modeling/linear_model/lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +195 -123
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +195 -123
- snowflake/ml/modeling/linear_model/linear_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression.py +195 -123
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +195 -123
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +195 -123
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/perceptron.py +195 -123
- snowflake/ml/modeling/linear_model/poisson_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ransac_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/ridge.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +195 -123
- snowflake/ml/modeling/linear_model/ridge_cv.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_classifier.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +195 -123
- snowflake/ml/modeling/linear_model/sgd_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +195 -123
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +195 -123
- snowflake/ml/modeling/manifold/isomap.py +195 -123
- snowflake/ml/modeling/manifold/mds.py +195 -123
- snowflake/ml/modeling/manifold/spectral_embedding.py +195 -123
- snowflake/ml/modeling/manifold/tsne.py +195 -123
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +195 -123
- snowflake/ml/modeling/mixture/gaussian_mixture.py +195 -123
- snowflake/ml/modeling/model_selection/grid_search_cv.py +42 -18
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +42 -18
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +195 -123
- snowflake/ml/modeling/multiclass/output_code_classifier.py +195 -123
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/complement_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +195 -123
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neighbors/kernel_density.py +195 -123
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_centroid.py +195 -123
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +195 -123
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +195 -123
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +195 -123
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_classifier.py +195 -123
- snowflake/ml/modeling/neural_network/mlp_regressor.py +195 -123
- snowflake/ml/modeling/pipeline/pipeline.py +4 -4
- snowflake/ml/modeling/preprocessing/binarizer.py +1 -5
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +1 -5
- snowflake/ml/modeling/preprocessing/label_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +10 -12
- snowflake/ml/modeling/preprocessing/normalizer.py +1 -5
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +1 -5
- snowflake/ml/modeling/preprocessing/polynomial_features.py +195 -123
- snowflake/ml/modeling/preprocessing/robust_scaler.py +1 -5
- snowflake/ml/modeling/preprocessing/standard_scaler.py +11 -11
- snowflake/ml/modeling/semi_supervised/label_propagation.py +195 -123
- snowflake/ml/modeling/semi_supervised/label_spreading.py +195 -123
- snowflake/ml/modeling/svm/linear_svc.py +195 -123
- snowflake/ml/modeling/svm/linear_svr.py +195 -123
- snowflake/ml/modeling/svm/nu_svc.py +195 -123
- snowflake/ml/modeling/svm/nu_svr.py +195 -123
- snowflake/ml/modeling/svm/svc.py +195 -123
- snowflake/ml/modeling/svm/svr.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/decision_tree_regressor.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_classifier.py +195 -123
- snowflake/ml/modeling/tree/extra_tree_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgb_regressor.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +195 -123
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +195 -123
- snowflake/ml/registry/registry.py +1 -1
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.4.0.dist-info → snowflake_ml_python-1.4.1.dist-info}/METADATA +68 -57
- {snowflake_ml_python-1.4.0.dist-info → snowflake_ml_python-1.4.1.dist-info}/RECORD +202 -200
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +0 -97
- {snowflake_ml_python-1.4.0.dist-info → snowflake_ml_python-1.4.1.dist-info}/LICENSE.txt +0 -0
- {snowflake_ml_python-1.4.0.dist-info → snowflake_ml_python-1.4.1.dist-info}/WHEEL +0 -0
- {snowflake_ml_python-1.4.0.dist-info → snowflake_ml_python-1.4.1.dist-info}/top_level.txt +0 -0
@@ -32,6 +32,15 @@ from snowflake.ml.modeling._internal.transformer_protocols import (
|
|
32
32
|
BatchInferenceKwargsTypedDict,
|
33
33
|
ScoreKwargsTypedDict
|
34
34
|
)
|
35
|
+
from snowflake.ml.model._signatures import utils as model_signature_utils
|
36
|
+
from snowflake.ml.model.model_signature import (
|
37
|
+
BaseFeatureSpec,
|
38
|
+
DataType,
|
39
|
+
FeatureSpec,
|
40
|
+
ModelSignature,
|
41
|
+
_infer_signature,
|
42
|
+
_rename_signature_with_snowflake_identifiers,
|
43
|
+
)
|
35
44
|
|
36
45
|
from snowflake.ml.modeling._internal.model_transformer_builder import ModelTransformerBuilder
|
37
46
|
|
@@ -42,16 +51,6 @@ from snowflake.ml.modeling._internal.estimator_utils import (
|
|
42
51
|
validate_sklearn_args,
|
43
52
|
)
|
44
53
|
|
45
|
-
from snowflake.ml.model.model_signature import (
|
46
|
-
DataType,
|
47
|
-
FeatureSpec,
|
48
|
-
ModelSignature,
|
49
|
-
_infer_signature,
|
50
|
-
_rename_signature_with_snowflake_identifiers,
|
51
|
-
BaseFeatureSpec,
|
52
|
-
)
|
53
|
-
from snowflake.ml.model._signatures import utils as model_signature_utils
|
54
|
-
|
55
54
|
_PROJECT = "ModelDevelopment"
|
56
55
|
# Derive subproject from module name by removing "sklearn"
|
57
56
|
# and converting module name from underscore to CamelCase
|
@@ -426,12 +425,7 @@ class XGBRFRegressor(BaseTransformer):
|
|
426
425
|
)
|
427
426
|
return selected_cols
|
428
427
|
|
429
|
-
|
430
|
-
project=_PROJECT,
|
431
|
-
subproject=_SUBPROJECT,
|
432
|
-
custom_tags=dict([("autogen", True)]),
|
433
|
-
)
|
434
|
-
def fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "XGBRFRegressor":
|
428
|
+
def _fit(self, dataset: Union[DataFrame, pd.DataFrame]) -> "XGBRFRegressor":
|
435
429
|
"""Fit gradient boosting model
|
436
430
|
For more details on this function, see [xgboost.XGBRFRegressor.fit]
|
437
431
|
(https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.XGBRFRegressor.fit)
|
@@ -458,12 +452,14 @@ class XGBRFRegressor(BaseTransformer):
|
|
458
452
|
|
459
453
|
self._snowpark_cols = dataset.select(self.input_cols).columns
|
460
454
|
|
461
|
-
|
455
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
462
456
|
if SNOWML_SPROC_ENV in os.environ:
|
463
457
|
statement_params = telemetry.get_function_usage_statement_params(
|
464
458
|
project=_PROJECT,
|
465
459
|
subproject=_SUBPROJECT,
|
466
|
-
function_name=telemetry.get_statement_params_full_func_name(
|
460
|
+
function_name=telemetry.get_statement_params_full_func_name(
|
461
|
+
inspect.currentframe(), XGBRFRegressor.__class__.__name__
|
462
|
+
),
|
467
463
|
api_calls=[Session.call],
|
468
464
|
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
469
465
|
)
|
@@ -484,7 +480,7 @@ class XGBRFRegressor(BaseTransformer):
|
|
484
480
|
)
|
485
481
|
self._sklearn_object = model_trainer.train()
|
486
482
|
self._is_fitted = True
|
487
|
-
self.
|
483
|
+
self._generate_model_signatures(dataset)
|
488
484
|
return self
|
489
485
|
|
490
486
|
def _batch_inference_validate_snowpark(
|
@@ -560,7 +556,9 @@ class XGBRFRegressor(BaseTransformer):
|
|
560
556
|
# when it is classifier, infer the datatype from label columns
|
561
557
|
if expected_type_inferred == "" and 'predict' in self.model_signatures:
|
562
558
|
# Batch inference takes a single expected output column type. Use the first columns type for now.
|
563
|
-
label_cols_signatures = [
|
559
|
+
label_cols_signatures = [
|
560
|
+
row for row in self.model_signatures['predict'].outputs if row.name in self.output_cols
|
561
|
+
]
|
564
562
|
if len(label_cols_signatures) == 0:
|
565
563
|
error_str = f"Output columns {self.output_cols} do not match model signatures {self.model_signatures['predict'].outputs}."
|
566
564
|
raise exceptions.SnowflakeMLException(
|
@@ -568,25 +566,22 @@ class XGBRFRegressor(BaseTransformer):
|
|
568
566
|
original_exception=ValueError(error_str),
|
569
567
|
)
|
570
568
|
|
571
|
-
expected_type_inferred = convert_sp_to_sf_type(
|
572
|
-
label_cols_signatures[0].as_snowpark_type()
|
573
|
-
)
|
569
|
+
expected_type_inferred = convert_sp_to_sf_type(label_cols_signatures[0].as_snowpark_type())
|
574
570
|
|
575
571
|
self._deps = self._batch_inference_validate_snowpark(dataset=dataset, inference_method=inference_method)
|
576
|
-
assert isinstance(
|
572
|
+
assert isinstance(
|
573
|
+
dataset._session, Session
|
574
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
577
575
|
|
578
576
|
transform_kwargs = dict(
|
579
|
-
session
|
580
|
-
dependencies
|
581
|
-
drop_input_cols
|
582
|
-
expected_output_cols_type
|
577
|
+
session=dataset._session,
|
578
|
+
dependencies=self._deps,
|
579
|
+
drop_input_cols=self._drop_input_cols,
|
580
|
+
expected_output_cols_type=expected_type_inferred,
|
583
581
|
)
|
584
582
|
|
585
583
|
elif isinstance(dataset, pd.DataFrame):
|
586
|
-
transform_kwargs = dict(
|
587
|
-
snowpark_input_cols = self._snowpark_cols,
|
588
|
-
drop_input_cols = self._drop_input_cols
|
589
|
-
)
|
584
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
590
585
|
|
591
586
|
transform_handlers = ModelTransformerBuilder.build(
|
592
587
|
dataset=dataset,
|
@@ -626,7 +621,7 @@ class XGBRFRegressor(BaseTransformer):
|
|
626
621
|
Transformed dataset.
|
627
622
|
"""
|
628
623
|
super()._check_dataset_type(dataset)
|
629
|
-
inference_method="transform"
|
624
|
+
inference_method = "transform"
|
630
625
|
|
631
626
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
632
627
|
# are specific to the type of dataset used.
|
@@ -663,17 +658,14 @@ class XGBRFRegressor(BaseTransformer):
|
|
663
658
|
assert isinstance(dataset._session, Session) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
664
659
|
|
665
660
|
transform_kwargs = dict(
|
666
|
-
session
|
667
|
-
dependencies
|
668
|
-
drop_input_cols
|
669
|
-
expected_output_cols_type
|
661
|
+
session=dataset._session,
|
662
|
+
dependencies=self._deps,
|
663
|
+
drop_input_cols=self._drop_input_cols,
|
664
|
+
expected_output_cols_type=expected_dtype,
|
670
665
|
)
|
671
666
|
|
672
667
|
elif isinstance(dataset, pd.DataFrame):
|
673
|
-
transform_kwargs = dict(
|
674
|
-
snowpark_input_cols = self._snowpark_cols,
|
675
|
-
drop_input_cols = self._drop_input_cols
|
676
|
-
)
|
668
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
677
669
|
|
678
670
|
transform_handlers = ModelTransformerBuilder.build(
|
679
671
|
dataset=dataset,
|
@@ -692,7 +684,11 @@ class XGBRFRegressor(BaseTransformer):
|
|
692
684
|
return output_df
|
693
685
|
|
694
686
|
@available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc]
|
695
|
-
def fit_predict(
|
687
|
+
def fit_predict(
|
688
|
+
self,
|
689
|
+
dataset: Union[DataFrame, pd.DataFrame],
|
690
|
+
output_cols_prefix: str = "fit_predict_",
|
691
|
+
) -> Union[DataFrame, pd.DataFrame]:
|
696
692
|
""" Method not supported for this class.
|
697
693
|
|
698
694
|
|
@@ -717,7 +713,9 @@ class XGBRFRegressor(BaseTransformer):
|
|
717
713
|
)
|
718
714
|
output_result, fitted_estimator = model_trainer.train_fit_predict(
|
719
715
|
drop_input_cols=self._drop_input_cols,
|
720
|
-
expected_output_cols_list=
|
716
|
+
expected_output_cols_list=(
|
717
|
+
self.output_cols if self.output_cols else self._get_output_column_names(output_cols_prefix)
|
718
|
+
),
|
721
719
|
)
|
722
720
|
self._sklearn_object = fitted_estimator
|
723
721
|
self._is_fitted = True
|
@@ -734,6 +732,62 @@ class XGBRFRegressor(BaseTransformer):
|
|
734
732
|
assert self._sklearn_object is not None
|
735
733
|
return self._sklearn_object.embedding_
|
736
734
|
|
735
|
+
|
736
|
+
def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]:
|
737
|
+
""" Returns the list of output columns for predict_proba(), decision_function(), etc.. functions.
|
738
|
+
Returns a list with output_cols_prefix as the only element if the estimator is not a classifier.
|
739
|
+
"""
|
740
|
+
output_cols_prefix = identifier.resolve_identifier(output_cols_prefix)
|
741
|
+
# The following condition is introduced for kneighbors methods, and not used in other methods
|
742
|
+
if output_cols:
|
743
|
+
output_cols = [
|
744
|
+
identifier.concat_names([output_cols_prefix, identifier.resolve_identifier(c)])
|
745
|
+
for c in output_cols
|
746
|
+
]
|
747
|
+
elif getattr(self._sklearn_object, "classes_", None) is None:
|
748
|
+
output_cols = [output_cols_prefix]
|
749
|
+
elif self._sklearn_object is not None:
|
750
|
+
classes = self._sklearn_object.classes_
|
751
|
+
if isinstance(classes, numpy.ndarray):
|
752
|
+
output_cols = [f'{output_cols_prefix}{str(c)}' for c in classes.tolist()]
|
753
|
+
elif isinstance(classes, list) and len(classes) > 0 and isinstance(classes[0], numpy.ndarray):
|
754
|
+
# If the estimator is a multioutput estimator, classes_ will be a list of ndarrays.
|
755
|
+
output_cols = []
|
756
|
+
for i, cl in enumerate(classes):
|
757
|
+
# For binary classification, there is only one output column for each class
|
758
|
+
# ndarray as the two classes are complementary.
|
759
|
+
if len(cl) == 2:
|
760
|
+
output_cols.append(f'{output_cols_prefix}{i}_{cl[0]}')
|
761
|
+
else:
|
762
|
+
output_cols.extend([
|
763
|
+
f'{output_cols_prefix}{i}_{c}' for c in cl.tolist()
|
764
|
+
])
|
765
|
+
else:
|
766
|
+
output_cols = []
|
767
|
+
|
768
|
+
# Make sure column names are valid snowflake identifiers.
|
769
|
+
assert output_cols is not None # Make MyPy happy
|
770
|
+
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
771
|
+
|
772
|
+
return rv
|
773
|
+
|
774
|
+
def _align_expected_output_names(
|
775
|
+
self, method: str, dataset: DataFrame, expected_output_cols_list: List[str], output_cols_prefix: str
|
776
|
+
) -> List[str]:
|
777
|
+
# in case the inferred output column names dimension is different
|
778
|
+
# we use one line of snowpark dataframe and put it into sklearn estimator using pandas
|
779
|
+
output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix)
|
780
|
+
output_df_columns = list(output_df_pd.columns)
|
781
|
+
output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns)
|
782
|
+
if self.sample_weight_col:
|
783
|
+
output_df_columns_set -= set(self.sample_weight_col)
|
784
|
+
# if the dimension of inferred output column names is correct; use it
|
785
|
+
if len(expected_output_cols_list) == len(output_df_columns_set):
|
786
|
+
return expected_output_cols_list
|
787
|
+
# otherwise, use the sklearn estimator's output
|
788
|
+
else:
|
789
|
+
return sorted(list(output_df_columns_set), key=lambda x: output_df_columns.index(x))
|
790
|
+
|
737
791
|
@available_if(original_estimator_has_callable("predict_proba")) # type: ignore[misc]
|
738
792
|
@telemetry.send_api_usage_telemetry(
|
739
793
|
project=_PROJECT,
|
@@ -764,24 +818,28 @@ class XGBRFRegressor(BaseTransformer):
|
|
764
818
|
# are specific to the type of dataset used.
|
765
819
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
766
820
|
|
821
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
822
|
+
|
767
823
|
if isinstance(dataset, DataFrame):
|
768
824
|
self._deps = self._batch_inference_validate_snowpark(
|
769
825
|
dataset=dataset,
|
770
826
|
inference_method=inference_method,
|
771
827
|
)
|
772
|
-
assert isinstance(
|
828
|
+
assert isinstance(
|
829
|
+
dataset._session, Session
|
830
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
773
831
|
transform_kwargs = dict(
|
774
832
|
session=dataset._session,
|
775
833
|
dependencies=self._deps,
|
776
|
-
drop_input_cols
|
834
|
+
drop_input_cols=self._drop_input_cols,
|
777
835
|
expected_output_cols_type="float",
|
778
836
|
)
|
837
|
+
expected_output_cols = self._align_expected_output_names(
|
838
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
839
|
+
)
|
779
840
|
|
780
841
|
elif isinstance(dataset, pd.DataFrame):
|
781
|
-
transform_kwargs = dict(
|
782
|
-
snowpark_input_cols = self._snowpark_cols,
|
783
|
-
drop_input_cols = self._drop_input_cols
|
784
|
-
)
|
842
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
785
843
|
|
786
844
|
transform_handlers = ModelTransformerBuilder.build(
|
787
845
|
dataset=dataset,
|
@@ -793,7 +851,7 @@ class XGBRFRegressor(BaseTransformer):
|
|
793
851
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
794
852
|
inference_method=inference_method,
|
795
853
|
input_cols=self.input_cols,
|
796
|
-
expected_output_cols=
|
854
|
+
expected_output_cols=expected_output_cols,
|
797
855
|
**transform_kwargs
|
798
856
|
)
|
799
857
|
return output_df
|
@@ -823,7 +881,8 @@ class XGBRFRegressor(BaseTransformer):
|
|
823
881
|
Output dataset with log probability of the sample for each class in the model.
|
824
882
|
"""
|
825
883
|
super()._check_dataset_type(dataset)
|
826
|
-
inference_method="predict_log_proba"
|
884
|
+
inference_method = "predict_log_proba"
|
885
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
827
886
|
|
828
887
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
829
888
|
# are specific to the type of dataset used.
|
@@ -834,18 +893,20 @@ class XGBRFRegressor(BaseTransformer):
|
|
834
893
|
dataset=dataset,
|
835
894
|
inference_method=inference_method,
|
836
895
|
)
|
837
|
-
assert isinstance(
|
896
|
+
assert isinstance(
|
897
|
+
dataset._session, Session
|
898
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
838
899
|
transform_kwargs = dict(
|
839
900
|
session=dataset._session,
|
840
901
|
dependencies=self._deps,
|
841
|
-
drop_input_cols
|
902
|
+
drop_input_cols=self._drop_input_cols,
|
842
903
|
expected_output_cols_type="float",
|
843
904
|
)
|
905
|
+
expected_output_cols = self._align_expected_output_names(
|
906
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
907
|
+
)
|
844
908
|
elif isinstance(dataset, pd.DataFrame):
|
845
|
-
transform_kwargs = dict(
|
846
|
-
snowpark_input_cols = self._snowpark_cols,
|
847
|
-
drop_input_cols = self._drop_input_cols
|
848
|
-
)
|
909
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
849
910
|
|
850
911
|
transform_handlers = ModelTransformerBuilder.build(
|
851
912
|
dataset=dataset,
|
@@ -858,7 +919,7 @@ class XGBRFRegressor(BaseTransformer):
|
|
858
919
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
859
920
|
inference_method=inference_method,
|
860
921
|
input_cols=self.input_cols,
|
861
|
-
expected_output_cols=
|
922
|
+
expected_output_cols=expected_output_cols,
|
862
923
|
**transform_kwargs
|
863
924
|
)
|
864
925
|
return output_df
|
@@ -884,30 +945,34 @@ class XGBRFRegressor(BaseTransformer):
|
|
884
945
|
Output dataset with results of the decision function for the samples in input dataset.
|
885
946
|
"""
|
886
947
|
super()._check_dataset_type(dataset)
|
887
|
-
inference_method="decision_function"
|
948
|
+
inference_method = "decision_function"
|
888
949
|
|
889
950
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
890
951
|
# are specific to the type of dataset used.
|
891
952
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
892
953
|
|
954
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
955
|
+
|
893
956
|
if isinstance(dataset, DataFrame):
|
894
957
|
self._deps = self._batch_inference_validate_snowpark(
|
895
958
|
dataset=dataset,
|
896
959
|
inference_method=inference_method,
|
897
960
|
)
|
898
|
-
assert isinstance(
|
961
|
+
assert isinstance(
|
962
|
+
dataset._session, Session
|
963
|
+
) # mypy does not recognize the check in _batch_inference_validate_snowpark()
|
899
964
|
transform_kwargs = dict(
|
900
965
|
session=dataset._session,
|
901
966
|
dependencies=self._deps,
|
902
|
-
drop_input_cols
|
967
|
+
drop_input_cols=self._drop_input_cols,
|
903
968
|
expected_output_cols_type="float",
|
904
969
|
)
|
970
|
+
expected_output_cols = self._align_expected_output_names(
|
971
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
972
|
+
)
|
905
973
|
|
906
974
|
elif isinstance(dataset, pd.DataFrame):
|
907
|
-
transform_kwargs = dict(
|
908
|
-
snowpark_input_cols = self._snowpark_cols,
|
909
|
-
drop_input_cols = self._drop_input_cols
|
910
|
-
)
|
975
|
+
transform_kwargs = dict(snowpark_input_cols=self._snowpark_cols, drop_input_cols=self._drop_input_cols)
|
911
976
|
|
912
977
|
transform_handlers = ModelTransformerBuilder.build(
|
913
978
|
dataset=dataset,
|
@@ -920,7 +985,7 @@ class XGBRFRegressor(BaseTransformer):
|
|
920
985
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
921
986
|
inference_method=inference_method,
|
922
987
|
input_cols=self.input_cols,
|
923
|
-
expected_output_cols=
|
988
|
+
expected_output_cols=expected_output_cols,
|
924
989
|
**transform_kwargs
|
925
990
|
)
|
926
991
|
return output_df
|
@@ -949,12 +1014,14 @@ class XGBRFRegressor(BaseTransformer):
|
|
949
1014
|
Output dataset with probability of the sample for each class in the model.
|
950
1015
|
"""
|
951
1016
|
super()._check_dataset_type(dataset)
|
952
|
-
inference_method="score_samples"
|
1017
|
+
inference_method = "score_samples"
|
953
1018
|
|
954
1019
|
# This dictionary contains optional kwargs for batch inference. These kwargs
|
955
1020
|
# are specific to the type of dataset used.
|
956
1021
|
transform_kwargs: BatchInferenceKwargsTypedDict = dict()
|
957
1022
|
|
1023
|
+
expected_output_cols = self._get_output_column_names(output_cols_prefix)
|
1024
|
+
|
958
1025
|
if isinstance(dataset, DataFrame):
|
959
1026
|
self._deps = self._batch_inference_validate_snowpark(
|
960
1027
|
dataset=dataset,
|
@@ -967,6 +1034,9 @@ class XGBRFRegressor(BaseTransformer):
|
|
967
1034
|
drop_input_cols = self._drop_input_cols,
|
968
1035
|
expected_output_cols_type="float",
|
969
1036
|
)
|
1037
|
+
expected_output_cols = self._align_expected_output_names(
|
1038
|
+
inference_method, dataset, expected_output_cols, output_cols_prefix
|
1039
|
+
)
|
970
1040
|
|
971
1041
|
elif isinstance(dataset, pd.DataFrame):
|
972
1042
|
transform_kwargs = dict(
|
@@ -985,7 +1055,7 @@ class XGBRFRegressor(BaseTransformer):
|
|
985
1055
|
output_df: DATAFRAME_TYPE = transform_handlers.batch_inference(
|
986
1056
|
inference_method=inference_method,
|
987
1057
|
input_cols=self.input_cols,
|
988
|
-
expected_output_cols=
|
1058
|
+
expected_output_cols=expected_output_cols,
|
989
1059
|
**transform_kwargs
|
990
1060
|
)
|
991
1061
|
return output_df
|
@@ -1132,50 +1202,84 @@ class XGBRFRegressor(BaseTransformer):
|
|
1132
1202
|
)
|
1133
1203
|
return output_df
|
1134
1204
|
|
1205
|
+
|
1206
|
+
|
1207
|
+
def to_xgboost(self) -> Any:
|
1208
|
+
"""Get xgboost.XGBRFRegressor object.
|
1209
|
+
"""
|
1210
|
+
if self._sklearn_object is None:
|
1211
|
+
self._sklearn_object = self._create_sklearn_object()
|
1212
|
+
return self._sklearn_object
|
1213
|
+
|
1214
|
+
def to_sklearn(self) -> Any:
|
1215
|
+
raise exceptions.SnowflakeMLException(
|
1216
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1217
|
+
original_exception=AttributeError(
|
1218
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1219
|
+
"to_sklearn()",
|
1220
|
+
"to_xgboost()"
|
1221
|
+
)
|
1222
|
+
),
|
1223
|
+
)
|
1224
|
+
|
1225
|
+
def to_lightgbm(self) -> Any:
|
1226
|
+
raise exceptions.SnowflakeMLException(
|
1227
|
+
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1228
|
+
original_exception=AttributeError(
|
1229
|
+
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1230
|
+
"to_lightgbm()",
|
1231
|
+
"to_xgboost()"
|
1232
|
+
)
|
1233
|
+
),
|
1234
|
+
)
|
1135
1235
|
|
1136
|
-
def
|
1236
|
+
def _get_dependencies(self) -> List[str]:
|
1237
|
+
return self._deps
|
1238
|
+
|
1239
|
+
|
1240
|
+
def _generate_model_signatures(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
1137
1241
|
self._model_signature_dict = dict()
|
1138
1242
|
|
1139
1243
|
PROB_FUNCTIONS = ["predict_log_proba", "predict_proba", "decision_function"]
|
1140
1244
|
|
1141
|
-
inputs = list(_infer_signature(dataset[self.input_cols], "input"))
|
1245
|
+
inputs = list(_infer_signature(dataset[self.input_cols], "input", use_snowflake_identifiers=True))
|
1142
1246
|
outputs: List[BaseFeatureSpec] = []
|
1143
1247
|
if hasattr(self, "predict"):
|
1144
1248
|
# keep mypy happy
|
1145
|
-
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1249
|
+
assert self._sklearn_object is not None and hasattr(self._sklearn_object, "_estimator_type")
|
1146
1250
|
# For classifier, the type of predict is the same as the type of label
|
1147
|
-
if self._sklearn_object._estimator_type ==
|
1148
|
-
|
1251
|
+
if self._sklearn_object._estimator_type == "classifier":
|
1252
|
+
# label columns is the desired type for output
|
1149
1253
|
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
1150
1254
|
# rename the output columns
|
1151
1255
|
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
1152
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1153
|
-
|
1154
|
-
|
1256
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1257
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1258
|
+
)
|
1155
1259
|
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
1156
1260
|
# For outlier models, returns -1 for outliers and 1 for inliers.
|
1157
|
-
# Clusterer returns int64 cluster labels.
|
1261
|
+
# Clusterer returns int64 cluster labels.
|
1158
1262
|
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
1159
1263
|
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
1160
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1161
|
-
|
1162
|
-
|
1163
|
-
|
1264
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1265
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1266
|
+
)
|
1267
|
+
|
1164
1268
|
# For regressor, the type of predict is float64
|
1165
|
-
elif self._sklearn_object._estimator_type ==
|
1269
|
+
elif self._sklearn_object._estimator_type == "regressor":
|
1166
1270
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
1167
|
-
self._model_signature_dict["predict"] = ModelSignature(
|
1168
|
-
|
1169
|
-
|
1170
|
-
|
1271
|
+
self._model_signature_dict["predict"] = ModelSignature(
|
1272
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1273
|
+
)
|
1274
|
+
|
1171
1275
|
for prob_func in PROB_FUNCTIONS:
|
1172
1276
|
if hasattr(self, prob_func):
|
1173
1277
|
output_cols_prefix: str = f"{prob_func}_"
|
1174
1278
|
output_column_names = self._get_output_column_names(output_cols_prefix)
|
1175
1279
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in output_column_names]
|
1176
|
-
self._model_signature_dict[prob_func] = ModelSignature(
|
1177
|
-
|
1178
|
-
|
1280
|
+
self._model_signature_dict[prob_func] = ModelSignature(
|
1281
|
+
inputs, ([] if self._drop_input_cols else inputs) + outputs
|
1282
|
+
)
|
1179
1283
|
|
1180
1284
|
# Output signature names may still need to be renamed, since they were not created with `_infer_signature`.
|
1181
1285
|
items = list(self._model_signature_dict.items())
|
@@ -1188,10 +1292,10 @@ class XGBRFRegressor(BaseTransformer):
|
|
1188
1292
|
"""Returns model signature of current class.
|
1189
1293
|
|
1190
1294
|
Raises:
|
1191
|
-
|
1295
|
+
SnowflakeMLException: If estimator is not fitted, then model signature cannot be inferred
|
1192
1296
|
|
1193
1297
|
Returns:
|
1194
|
-
Dict
|
1298
|
+
Dict with each method and its input output signature
|
1195
1299
|
"""
|
1196
1300
|
if self._model_signature_dict is None:
|
1197
1301
|
raise exceptions.SnowflakeMLException(
|
@@ -1199,35 +1303,3 @@ class XGBRFRegressor(BaseTransformer):
|
|
1199
1303
|
original_exception=RuntimeError("Estimator not fitted before accessing property model_signatures!"),
|
1200
1304
|
)
|
1201
1305
|
return self._model_signature_dict
|
1202
|
-
|
1203
|
-
def to_xgboost(self) -> Any:
|
1204
|
-
"""Get xgboost.XGBRFRegressor object.
|
1205
|
-
"""
|
1206
|
-
if self._sklearn_object is None:
|
1207
|
-
self._sklearn_object = self._create_sklearn_object()
|
1208
|
-
return self._sklearn_object
|
1209
|
-
|
1210
|
-
def to_sklearn(self) -> Any:
|
1211
|
-
raise exceptions.SnowflakeMLException(
|
1212
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1213
|
-
original_exception=AttributeError(
|
1214
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1215
|
-
"to_sklearn()",
|
1216
|
-
"to_xgboost()"
|
1217
|
-
)
|
1218
|
-
),
|
1219
|
-
)
|
1220
|
-
|
1221
|
-
def to_lightgbm(self) -> Any:
|
1222
|
-
raise exceptions.SnowflakeMLException(
|
1223
|
-
error_code=error_codes.METHOD_NOT_ALLOWED,
|
1224
|
-
original_exception=AttributeError(
|
1225
|
-
modeling_error_messages.UNSUPPORTED_MODEL_CONVERSION.format(
|
1226
|
-
"to_lightgbm()",
|
1227
|
-
"to_xgboost()"
|
1228
|
-
)
|
1229
|
-
),
|
1230
|
-
)
|
1231
|
-
|
1232
|
-
def _get_dependencies(self) -> List[str]:
|
1233
|
-
return self._deps
|
@@ -93,7 +93,7 @@ class Registry:
|
|
93
93
|
Log a model with various parameters and metadata.
|
94
94
|
|
95
95
|
Args:
|
96
|
-
model: Model object of supported types such as Scikit-learn, XGBoost, Snowpark ML,
|
96
|
+
model: Model object of supported types such as Scikit-learn, XGBoost, LightGBM, Snowpark ML,
|
97
97
|
PyTorch, TorchScript, Tensorflow, Tensorflow Keras, MLFlow, HuggingFace Pipeline,
|
98
98
|
Sentence Transformers, Peft-finetuned LLM, or Custom Model.
|
99
99
|
model_name: Name to identify the model.
|
snowflake/ml/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
VERSION="1.4.
|
1
|
+
VERSION="1.4.1"
|