snowflake-ml-python 1.19.0__py3-none-any.whl → 1.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,6 +16,7 @@ from snowflake.ml import version as snowml_version
16
16
  from snowflake.ml._internal import env as snowml_env, relax_version_strategy
17
17
  from snowflake.ml._internal.utils import query_result_checker
18
18
  from snowflake.snowpark import context, exceptions, session
19
+ from snowflake.snowpark._internal import utils as snowpark_utils
19
20
 
20
21
 
21
22
  class CONDA_OS(Enum):
@@ -38,6 +39,21 @@ SNOWPARK_ML_PKG_NAME = "snowflake-ml-python"
38
39
  SNOWFLAKE_CONDA_CHANNEL_URL = "https://repo.anaconda.com/pkgs/snowflake"
39
40
 
40
41
 
42
+ def get_execution_context() -> str:
43
+ """Detect execution context: EXTERNAL, SPCS, or SPROC.
44
+
45
+ Returns:
46
+ str: The execution context - "SPROC" if running in a stored procedure,
47
+ "SPCS" if running in SPCS ML runtime, "EXTERNAL" otherwise.
48
+ """
49
+ if snowpark_utils.is_in_stored_procedure(): # type: ignore[no-untyped-call]
50
+ return "SPROC"
51
+ elif snowml_env.IN_ML_RUNTIME:
52
+ return "SPCS"
53
+ else:
54
+ return "EXTERNAL"
55
+
56
+
41
57
  def _validate_pip_requirement_string(req_str: str) -> requirements.Requirement:
42
58
  """Validate the input pip requirement string according to PEP 508.
43
59
 
@@ -16,7 +16,7 @@ from typing_extensions import ParamSpec
16
16
  from snowflake import connector
17
17
  from snowflake.connector import connect, telemetry as connector_telemetry, time_util
18
18
  from snowflake.ml import version as snowml_version
19
- from snowflake.ml._internal import env
19
+ from snowflake.ml._internal import env, env_utils
20
20
  from snowflake.ml._internal.exceptions import (
21
21
  error_codes,
22
22
  exceptions as snowml_exceptions,
@@ -37,6 +37,22 @@ _CONNECTION_TYPES = {
37
37
  _Args = ParamSpec("_Args")
38
38
  _ReturnValue = TypeVar("_ReturnValue")
39
39
 
40
+ _conn: Optional[connector.SnowflakeConnection] = None
41
+
42
+
43
+ def clear_cached_conn() -> None:
44
+ """Clear the cached Snowflake connection. Primarily for testing purposes."""
45
+ global _conn
46
+ if _conn is not None and _conn.is_valid():
47
+ _conn.close()
48
+ _conn = None
49
+
50
+
51
+ def get_cached_conn() -> Optional[connector.SnowflakeConnection]:
52
+ """Get the cached Snowflake connection. Primarily for testing purposes."""
53
+ global _conn
54
+ return _conn
55
+
40
56
 
41
57
  def _get_login_token() -> Union[str, bytes]:
42
58
  with open("/snowflake/session/token") as f:
@@ -44,7 +60,11 @@ def _get_login_token() -> Union[str, bytes]:
44
60
 
45
61
 
46
62
  def _get_snowflake_connection() -> Optional[connector.SnowflakeConnection]:
47
- conn = None
63
+ global _conn
64
+ if _conn is not None and _conn.is_valid():
65
+ return _conn
66
+
67
+ conn: Optional[connector.SnowflakeConnection] = None
48
68
  if os.getenv("SNOWFLAKE_HOST") is not None and os.getenv("SNOWFLAKE_ACCOUNT") is not None:
49
69
  try:
50
70
  conn = connect(
@@ -66,6 +86,13 @@ def _get_snowflake_connection() -> Optional[connector.SnowflakeConnection]:
66
86
  # Failed to get an active session. No connection available.
67
87
  pass
68
88
 
89
+ # cache the connection if it's a SnowflakeConnection. there is a behavior at runtime where it could be a
90
+ # StoredProcConnection perhaps incorrect type hinting somewhere
91
+ if isinstance(conn, connector.SnowflakeConnection):
92
+ # if _conn was expired, we need to copy telemetry data to new connection
93
+ if _conn is not None and conn is not None:
94
+ conn._telemetry._log_batch.extend(_conn._telemetry._log_batch)
95
+ _conn = conn
69
96
  return conn
70
97
 
71
98
 
@@ -113,6 +140,13 @@ class TelemetryField(enum.Enum):
113
140
  FUNC_CAT_USAGE = "usage"
114
141
 
115
142
 
143
+ @enum.unique
144
+ class CustomTagKey(enum.Enum):
145
+ """Keys for custom tags in telemetry."""
146
+
147
+ EXECUTION_CONTEXT = "execution_context"
148
+
149
+
116
150
  class _TelemetrySourceType(enum.Enum):
117
151
  # Automatically inferred telemetry/statement parameters
118
152
  AUTO_TELEMETRY = "SNOWML_AUTO_TELEMETRY"
@@ -441,6 +475,7 @@ def send_api_usage_telemetry(
441
475
  sfqids_extractor: Optional[Callable[..., list[str]]] = None,
442
476
  subproject_extractor: Optional[Callable[[Any], str]] = None,
443
477
  custom_tags: Optional[dict[str, Union[bool, int, str, float]]] = None,
478
+ log_execution_context: bool = True,
444
479
  ) -> Callable[[Callable[_Args, _ReturnValue]], Callable[_Args, _ReturnValue]]:
445
480
  """
446
481
  Decorator that sends API usage telemetry and adds function usage statement parameters to the dataframe returned by
@@ -455,6 +490,8 @@ def send_api_usage_telemetry(
455
490
  sfqids_extractor: Extract sfqids from `self`.
456
491
  subproject_extractor: Extract subproject at runtime from `self`.
457
492
  custom_tags: Custom tags.
493
+ log_execution_context: If True, automatically detect and log execution context
494
+ (EXTERNAL, SPCS, or SPROC) in custom_tags.
458
495
 
459
496
  Returns:
460
497
  Decorator that sends function usage telemetry for any call to the decorated function.
@@ -495,6 +532,11 @@ def send_api_usage_telemetry(
495
532
  if subproject_extractor is not None:
496
533
  subproject_name = subproject_extractor(args[0])
497
534
 
535
+ # Add execution context if enabled
536
+ final_custom_tags = {**custom_tags} if custom_tags is not None else {}
537
+ if log_execution_context:
538
+ final_custom_tags[CustomTagKey.EXECUTION_CONTEXT.value] = env_utils.get_execution_context()
539
+
498
540
  statement_params = get_function_usage_statement_params(
499
541
  project=project,
500
542
  subproject=subproject_name,
@@ -502,7 +544,7 @@ def send_api_usage_telemetry(
502
544
  function_name=_get_full_func_name(func),
503
545
  function_parameters=params,
504
546
  api_calls=api_calls,
505
- custom_tags=custom_tags,
547
+ custom_tags=final_custom_tags,
506
548
  )
507
549
 
508
550
  def update_stmt_params_if_snowpark_df(obj: _ReturnValue, statement_params: dict[str, Any]) -> _ReturnValue:
@@ -538,7 +580,10 @@ def send_api_usage_telemetry(
538
580
  if conn_attr_name:
539
581
  # raise AttributeError if conn attribute does not exist in `self`
540
582
  conn = operator.attrgetter(conn_attr_name)(args[0])
541
- if not isinstance(conn, _CONNECTION_TYPES.get(type(conn).__name__, connector.SnowflakeConnection)):
583
+ if not isinstance(
584
+ conn,
585
+ _CONNECTION_TYPES.get(type(conn).__name__, connector.SnowflakeConnection),
586
+ ):
542
587
  raise TypeError(
543
588
  f"Expected a conn object of type {' or '.join(_CONNECTION_TYPES.keys())} but got {type(conn)}"
544
589
  )
@@ -560,7 +605,7 @@ def send_api_usage_telemetry(
560
605
  func_params=params,
561
606
  api_calls=api_calls,
562
607
  sfqids=sfqids,
563
- custom_tags=custom_tags,
608
+ custom_tags=final_custom_tags,
564
609
  )
565
610
  try:
566
611
  return ctx.run(execute_func_with_statement_params)
@@ -571,7 +616,8 @@ def send_api_usage_telemetry(
571
616
  raise
572
617
  if isinstance(e, snowpark_exceptions.SnowparkClientException):
573
618
  me = snowml_exceptions.SnowflakeMLException(
574
- error_code=error_codes.INTERNAL_SNOWPARK_ERROR, original_exception=e
619
+ error_code=error_codes.INTERNAL_SNOWPARK_ERROR,
620
+ original_exception=e,
575
621
  )
576
622
  else:
577
623
  me = snowml_exceptions.SnowflakeMLException(
@@ -627,7 +673,10 @@ def _get_full_func_name(func: Callable[..., Any]) -> str:
627
673
 
628
674
 
629
675
  def _get_func_params(
630
- func: Callable[..., Any], func_params_to_log: Optional[Iterable[str]], args: Any, kwargs: Any
676
+ func: Callable[..., Any],
677
+ func_params_to_log: Optional[Iterable[str]],
678
+ args: Any,
679
+ kwargs: Any,
631
680
  ) -> dict[str, Any]:
632
681
  """
633
682
  Get function parameters.
@@ -1,4 +1,5 @@
1
1
  import types
2
+ import warnings
2
3
  from typing import TYPE_CHECKING, Optional
3
4
 
4
5
  from snowflake.ml._internal.utils import sql_identifier
@@ -7,6 +8,8 @@ from snowflake.ml.experiment import _experiment_info as experiment_info
7
8
  if TYPE_CHECKING:
8
9
  from snowflake.ml.experiment import experiment_tracking
9
10
 
11
+ METADATA_SIZE_WARNING_MESSAGE = "It is likely that no further metrics or parameters will be logged for this run."
12
+
10
13
 
11
14
  class Run:
12
15
  def __init__(
@@ -20,6 +23,9 @@ class Run:
20
23
  self.experiment_name = experiment_name
21
24
  self.name = run_name
22
25
 
26
+ # Whether we've already shown the user a warning about exceeding the run metadata size limit.
27
+ self._warned_about_metadata_size = False
28
+
23
29
  self._patcher = experiment_info.ExperimentInfoPatcher(
24
30
  experiment_info=self._get_experiment_info(),
25
31
  )
@@ -45,3 +51,12 @@ class Run:
45
51
  ),
46
52
  run_name=self.name.identifier(),
47
53
  )
54
+
55
+ def _warn_about_run_metadata_size(self, sql_error_msg: str) -> None:
56
+ if not self._warned_about_metadata_size:
57
+ warnings.warn(
58
+ f"{sql_error_msg}. {METADATA_SIZE_WARNING_MESSAGE}",
59
+ RuntimeWarning,
60
+ stacklevel=2,
61
+ )
62
+ self._warned_about_metadata_size = True
@@ -283,16 +283,26 @@ class ExperimentTracking:
283
283
  Args:
284
284
  metrics: Dictionary containing metric keys and float values.
285
285
  step: The step of the metrics. Defaults to 0.
286
+
287
+ Raises:
288
+ snowpark.exceptions.SnowparkSQLException: If logging metrics fails due to Snowflake SQL errors,
289
+ except for run metadata size limit errors which will issue a warning instead of raising.
286
290
  """
287
291
  run = self._get_or_start_run()
288
292
  metrics_list = []
289
293
  for key, value in metrics.items():
290
294
  metrics_list.append(entities.Metric(key, value, step))
291
- self._sql_client.modify_run_add_metrics(
292
- experiment_name=run.experiment_name,
293
- run_name=run.name,
294
- metrics=json.dumps([metric.to_dict() for metric in metrics_list]),
295
- )
295
+ try:
296
+ self._sql_client.modify_run_add_metrics(
297
+ experiment_name=run.experiment_name,
298
+ run_name=run.name,
299
+ metrics=json.dumps([metric.to_dict() for metric in metrics_list]),
300
+ )
301
+ except snowpark.exceptions.SnowparkSQLException as e:
302
+ if e.sql_error_code == 400003: # EXPERIMENT_RUN_PROPERTY_SIZE_LIMIT_EXCEEDED
303
+ run._warn_about_run_metadata_size(e.message)
304
+ else:
305
+ raise
296
306
 
297
307
  def log_param(
298
308
  self,
@@ -318,16 +328,26 @@ class ExperimentTracking:
318
328
  Args:
319
329
  params: Dictionary containing parameter keys and values. Values can be of any type, but will be converted
320
330
  to string.
331
+
332
+ Raises:
333
+ snowpark.exceptions.SnowparkSQLException: If logging parameters fails due to Snowflake SQL errors,
334
+ except for run metadata size limit errors which will issue a warning instead of raising.
321
335
  """
322
336
  run = self._get_or_start_run()
323
337
  params_list = []
324
338
  for key, value in params.items():
325
339
  params_list.append(entities.Param(key, str(value)))
326
- self._sql_client.modify_run_add_params(
327
- experiment_name=run.experiment_name,
328
- run_name=run.name,
329
- params=json.dumps([param.to_dict() for param in params_list]),
330
- )
340
+ try:
341
+ self._sql_client.modify_run_add_params(
342
+ experiment_name=run.experiment_name,
343
+ run_name=run.name,
344
+ params=json.dumps([param.to_dict() for param in params_list]),
345
+ )
346
+ except snowpark.exceptions.SnowparkSQLException as e:
347
+ if e.sql_error_code == 400003: # EXPERIMENT_RUN_PROPERTY_SIZE_LIMIT_EXCEEDED
348
+ run._warn_about_run_metadata_size(e.message)
349
+ else:
350
+ raise
331
351
 
332
352
  def log_artifact(
333
353
  self,
@@ -202,6 +202,7 @@ def _configure_role_hierarchy(
202
202
  session.sql(f"GRANT ROLE {producer_role} TO ROLE {session.get_current_role()}").collect()
203
203
 
204
204
  if consumer_role is not None:
205
+ # Create CONSUMER and grant it to PRODUCER to build hierarchy
205
206
  consumer_role = SqlIdentifier(consumer_role)
206
207
  session.sql(f"CREATE ROLE IF NOT EXISTS {consumer_role}").collect()
207
208
  session.sql(f"GRANT ROLE {consumer_role} TO ROLE {producer_role}").collect()
@@ -1200,7 +1200,7 @@ class FeatureStore:
1200
1200
  {self._config.database}.INFORMATION_SCHEMA.DYNAMIC_TABLE_REFRESH_HISTORY (RESULT_LIMIT => 10000)
1201
1201
  )
1202
1202
  WHERE NAME = '{fv_resolved_name}'
1203
- AND SCHEMA_NAME = '{self._config.schema}'
1203
+ AND SCHEMA_NAME = '{self._config.schema.resolved()}'
1204
1204
  """
1205
1205
  )
1206
1206
 
@@ -31,6 +31,7 @@ def parse_bool_env_value(value: Optional[str], default: bool = False) -> bool:
31
31
  class FeatureFlags(Enum):
32
32
  USE_SUBMIT_JOB_V2 = "MLRS_USE_SUBMIT_JOB_V2"
33
33
  ENABLE_RUNTIME_VERSIONS = "MLRS_ENABLE_RUNTIME_VERSIONS"
34
+ ENABLE_STAGE_MOUNT_V2 = "MLRS_ENABLE_STAGE_MOUNT_V2"
34
35
 
35
36
  def is_enabled(self, default: bool = False) -> bool:
36
37
  """Check if the feature flag is enabled.
@@ -620,7 +620,12 @@ def _serialize_callable(func: Callable[..., Any]) -> bytes:
620
620
  try:
621
621
  func_bytes: bytes = cp.dumps(func)
622
622
  return func_bytes
623
- except pickle.PicklingError as e:
623
+ except (pickle.PicklingError, TypeError) as e:
624
+ if isinstance(e, TypeError) and "_thread.lock" in str(e):
625
+ raise RuntimeError(
626
+ "Unable to pickle an object that internally holds a reference to a Session object, "
627
+ "such as a Snowpark DataFrame."
628
+ ) from e
624
629
  if isinstance(func, functools.partial):
625
630
  # Try to find which part of the partial isn't serializable for better debuggability
626
631
  objects = [
@@ -197,7 +197,7 @@ def generate_service_spec(
197
197
  resource_limits["nvidia.com/gpu"] = image_spec.resource_limits.gpu
198
198
 
199
199
  # Add local volumes for ephemeral logs and artifacts
200
- volumes: list[dict[str, str]] = []
200
+ volumes: list[dict[str, Any]] = []
201
201
  volume_mounts: list[dict[str, str]] = []
202
202
  for volume_name, mount_path in [
203
203
  ("system-logs", "/var/log/managedservices/system/mlrs"),
@@ -246,7 +246,16 @@ def generate_service_spec(
246
246
  volumes.append(
247
247
  {
248
248
  "name": constants.STAGE_VOLUME_NAME,
249
- "source": payload.stage_path.as_posix(),
249
+ "source": "stage",
250
+ "stageConfig": {
251
+ "name": payload.stage_path.as_posix(),
252
+ "resources": {
253
+ "requests": {
254
+ "memory": "0Gi",
255
+ "cpu": "0",
256
+ },
257
+ },
258
+ },
250
259
  }
251
260
  )
252
261
 
@@ -286,7 +295,7 @@ def generate_service_spec(
286
295
  "storage",
287
296
  ]
288
297
 
289
- spec_dict = {
298
+ spec_dict: dict[str, Any] = {
290
299
  "containers": [
291
300
  {
292
301
  "name": constants.DEFAULT_CONTAINER_NAME,
snowflake/ml/jobs/job.py CHANGED
@@ -113,7 +113,12 @@ class MLJob(Generic[T], SerializableSessionMixin):
113
113
  """Get the job's artifact storage stage location."""
114
114
  volumes = self._service_spec["spec"]["volumes"]
115
115
  stage_volume = next((v for v in volumes if v["name"] == constants.STAGE_VOLUME_NAME), None)
116
- return cast(str, stage_volume["source"]) if stage_volume else None
116
+ if stage_volume is None:
117
+ return None
118
+ elif "stageConfig" in stage_volume:
119
+ return cast(str, stage_volume["stageConfig"]["name"])
120
+ else:
121
+ return cast(str, stage_volume["source"])
117
122
 
118
123
  @property
119
124
  def _result_path(self) -> str:
@@ -520,6 +520,12 @@ def _submit_job(
520
520
  raise RuntimeError(
521
521
  "Please specify a schema, either in the session context or as a parameter in the job submission"
522
522
  )
523
+ elif e.sql_error_code == 3001 and "schema" in str(e).lower():
524
+ raise RuntimeError(
525
+ "please grant privileges on schema before submitting a job, see",
526
+ "https://docs.snowflake.com/en/developer-guide/snowflake-ml/ml-jobs/access-control-requirements",
527
+ " for more details",
528
+ ) from e
523
529
  raise
524
530
 
525
531
  if feature_flags.FeatureFlags.USE_SUBMIT_JOB_V2.is_enabled(default=True):
@@ -546,6 +552,12 @@ def _submit_job(
546
552
  except SnowparkSQLException as e:
547
553
  if not (e.sql_error_code == 90237 and sp_utils.is_in_stored_procedure()): # type: ignore[no-untyped-call]
548
554
  raise
555
+ elif e.sql_error_code == 3001 and "schema" in str(e).lower():
556
+ raise RuntimeError(
557
+ "please grant privileges on schema before submitting a job, see",
558
+ "https://docs.snowflake.com/en/developer-guide/snowflake-ml/ml-jobs/access-control-requirements"
559
+ " for more details",
560
+ ) from e
549
561
  # SNOW-2390287: SYSTEM$EXECUTE_ML_JOB() is erroneously blocked in owner's rights
550
562
  # stored procedures. This will be fixed in an upcoming release.
551
563
  logger.warning(
@@ -690,6 +702,7 @@ def _do_submit_job_v2(
690
702
  # when feature flag is enabled, we get the local python version and wrap it in a dict
691
703
  # in system function, we can know whether it is python version or image tag or full image URL through the format
692
704
  spec_options["RUNTIME"] = json.dumps({"pythonVersion": f"{sys.version_info.major}.{sys.version_info.minor}"})
705
+
693
706
  job_options = {
694
707
  "EXTERNAL_ACCESS_INTEGRATIONS": external_access_integrations,
695
708
  "QUERY_WAREHOUSE": query_warehouse,
@@ -4,14 +4,18 @@ from snowflake.ml.model._client.ops import service_ops
4
4
 
5
5
 
6
6
  def _get_inference_engine_args(
7
- experimental_options: Optional[dict[str, Any]],
7
+ inference_engine_options: Optional[dict[str, Any]],
8
8
  ) -> Optional[service_ops.InferenceEngineArgs]:
9
- if not experimental_options or "inference_engine" not in experimental_options:
9
+
10
+ if not inference_engine_options:
10
11
  return None
11
12
 
13
+ if "engine" not in inference_engine_options:
14
+ raise ValueError("'engine' field is required in inference_engine_options")
15
+
12
16
  return service_ops.InferenceEngineArgs(
13
- inference_engine=experimental_options["inference_engine"],
14
- inference_engine_args_override=experimental_options.get("inference_engine_args_override"),
17
+ inference_engine=inference_engine_options["engine"],
18
+ inference_engine_args_override=inference_engine_options.get("engine_args_override"),
15
19
  )
16
20
 
17
21
 
@@ -12,7 +12,7 @@ from snowflake.ml import jobs
12
12
  from snowflake.ml._internal import telemetry
13
13
  from snowflake.ml._internal.utils import sql_identifier
14
14
  from snowflake.ml.lineage import lineage_node
15
- from snowflake.ml.model import task, type_hints
15
+ from snowflake.ml.model import openai_signatures, task, type_hints
16
16
  from snowflake.ml.model._client.model import (
17
17
  batch_inference_specs,
18
18
  inference_engine_utils,
@@ -23,6 +23,7 @@ from snowflake.ml.model._model_composer.model_manifest import model_manifest_sch
23
23
  from snowflake.ml.model._model_composer.model_method import utils as model_method_utils
24
24
  from snowflake.ml.model._packager.model_handlers import snowmlmodel
25
25
  from snowflake.ml.model._packager.model_meta import model_meta_schema
26
+ from snowflake.ml.model._signatures import core
26
27
  from snowflake.snowpark import Session, async_job, dataframe
27
28
 
28
29
  _TELEMETRY_PROJECT = "MLOps"
@@ -940,14 +941,16 @@ class ModelVersion(lineage_node.LineageNode):
940
941
  self,
941
942
  statement_params: Optional[dict[str, Any]] = None,
942
943
  ) -> None:
943
- """Check if the model is a HuggingFace pipeline with text-generation task.
944
+ """Check if the model is a HuggingFace pipeline with text-generation task
945
+ and is logged with OPENAI_CHAT_SIGNATURE.
944
946
 
945
947
  Args:
946
948
  statement_params: Optional dictionary of statement parameters to include
947
949
  in the SQL command to fetch model spec.
948
950
 
949
951
  Raises:
950
- ValueError: If the model is not a HuggingFace text-generation model.
952
+ ValueError: If the model is not a HuggingFace text-generation model or
953
+ if the model is not logged with OPENAI_CHAT_SIGNATURE.
951
954
  """
952
955
  # Fetch model spec
953
956
  model_spec = self._get_model_spec(statement_params)
@@ -983,6 +986,21 @@ class ModelVersion(lineage_node.LineageNode):
983
986
  )
984
987
  raise ValueError(f"Inference engine is only supported for task 'text-generation'. {found_tasks_str}")
985
988
 
989
+ # Check if the model is logged with OPENAI_CHAT_SIGNATURE
990
+ signatures_dict = model_spec.get("signatures", {})
991
+
992
+ # Deserialize signatures from model spec to ModelSignature objects for proper semantic comparison.
993
+ deserialized_signatures = {
994
+ func_name: core.ModelSignature.from_dict(sig_dict) for func_name, sig_dict in signatures_dict.items()
995
+ }
996
+
997
+ if deserialized_signatures != openai_signatures.OPENAI_CHAT_SIGNATURE:
998
+ raise ValueError(
999
+ "Inference engine requires the model to be logged with OPENAI_CHAT_SIGNATURE. "
1000
+ f"Found signatures: {signatures_dict}. "
1001
+ "Please log the model with: signatures=openai_signatures.OPENAI_CHAT_SIGNATURE"
1002
+ )
1003
+
986
1004
  @overload
987
1005
  def create_service(
988
1006
  self,
@@ -1001,6 +1019,7 @@ class ModelVersion(lineage_node.LineageNode):
1001
1019
  force_rebuild: bool = False,
1002
1020
  build_external_access_integration: Optional[str] = None,
1003
1021
  block: bool = True,
1022
+ inference_engine_options: Optional[dict[str, Any]] = None,
1004
1023
  experimental_options: Optional[dict[str, Any]] = None,
1005
1024
  ) -> Union[str, async_job.AsyncJob]:
1006
1025
  """Create an inference service with the given spec.
@@ -1034,10 +1053,12 @@ class ModelVersion(lineage_node.LineageNode):
1034
1053
  block: A bool value indicating whether this function will wait until the service is available.
1035
1054
  When it is ``False``, this function executes the underlying service creation asynchronously
1036
1055
  and returns an :class:`AsyncJob`.
1037
- experimental_options: Experimental options for the service creation with custom inference engine.
1038
- Currently, `inference_engine`, `inference_engine_args_override`, and `autocapture` are supported.
1039
- `inference_engine` is the name of the inference engine to use.
1040
- `inference_engine_args_override` is a list of string arguments to pass to the inference engine.
1056
+ inference_engine_options: Options for the service creation with custom inference engine.
1057
+ Supports `engine` and `engine_args_override`.
1058
+ `engine` is the type of the inference engine to use.
1059
+ `engine_args_override` is a list of string arguments to pass to the inference engine.
1060
+ experimental_options: Experimental options for the service creation.
1061
+ Currently only `autocapture` is supported.
1041
1062
  `autocapture` is a boolean to enable/disable inference table.
1042
1063
  """
1043
1064
  ...
@@ -1060,6 +1081,7 @@ class ModelVersion(lineage_node.LineageNode):
1060
1081
  force_rebuild: bool = False,
1061
1082
  build_external_access_integrations: Optional[list[str]] = None,
1062
1083
  block: bool = True,
1084
+ inference_engine_options: Optional[dict[str, Any]] = None,
1063
1085
  experimental_options: Optional[dict[str, Any]] = None,
1064
1086
  ) -> Union[str, async_job.AsyncJob]:
1065
1087
  """Create an inference service with the given spec.
@@ -1093,10 +1115,12 @@ class ModelVersion(lineage_node.LineageNode):
1093
1115
  block: A bool value indicating whether this function will wait until the service is available.
1094
1116
  When it is ``False``, this function executes the underlying service creation asynchronously
1095
1117
  and returns an :class:`AsyncJob`.
1096
- experimental_options: Experimental options for the service creation with custom inference engine.
1097
- Currently, `inference_engine`, `inference_engine_args_override`, and `autocapture` are supported.
1098
- `inference_engine` is the name of the inference engine to use.
1099
- `inference_engine_args_override` is a list of string arguments to pass to the inference engine.
1118
+ inference_engine_options: Options for the service creation with custom inference engine.
1119
+ Supports `engine` and `engine_args_override`.
1120
+ `engine` is the type of the inference engine to use.
1121
+ `engine_args_override` is a list of string arguments to pass to the inference engine.
1122
+ experimental_options: Experimental options for the service creation.
1123
+ Currently only `autocapture` is supported.
1100
1124
  `autocapture` is a boolean to enable/disable inference table.
1101
1125
  """
1102
1126
  ...
@@ -1134,6 +1158,7 @@ class ModelVersion(lineage_node.LineageNode):
1134
1158
  build_external_access_integration: Optional[str] = None,
1135
1159
  build_external_access_integrations: Optional[list[str]] = None,
1136
1160
  block: bool = True,
1161
+ inference_engine_options: Optional[dict[str, Any]] = None,
1137
1162
  experimental_options: Optional[dict[str, Any]] = None,
1138
1163
  ) -> Union[str, async_job.AsyncJob]:
1139
1164
  """Create an inference service with the given spec.
@@ -1169,10 +1194,12 @@ class ModelVersion(lineage_node.LineageNode):
1169
1194
  block: A bool value indicating whether this function will wait until the service is available.
1170
1195
  When it is False, this function executes the underlying service creation asynchronously
1171
1196
  and returns an AsyncJob.
1172
- experimental_options: Experimental options for the service creation with custom inference engine.
1173
- Currently, `inference_engine`, `inference_engine_args_override`, and `autocapture` are supported.
1174
- `inference_engine` is the name of the inference engine to use.
1175
- `inference_engine_args_override` is a list of string arguments to pass to the inference engine.
1197
+ inference_engine_options: Options for the service creation with custom inference engine.
1198
+ Supports `engine` and `engine_args_override`.
1199
+ `engine` is the type of the inference engine to use.
1200
+ `engine_args_override` is a list of string arguments to pass to the inference engine.
1201
+ experimental_options: Experimental options for the service creation.
1202
+ Currently only `autocapture` is supported.
1176
1203
  `autocapture` is a boolean to enable/disable inference table.
1177
1204
 
1178
1205
 
@@ -1209,9 +1236,10 @@ class ModelVersion(lineage_node.LineageNode):
1209
1236
  # Validate GPU support if GPU resources are requested
1210
1237
  self._throw_error_if_gpu_is_not_supported(gpu_requests, statement_params)
1211
1238
 
1212
- inference_engine_args = inference_engine_utils._get_inference_engine_args(experimental_options)
1239
+ inference_engine_args = inference_engine_utils._get_inference_engine_args(inference_engine_options)
1213
1240
 
1214
- # Check if model is HuggingFace text-generation before doing inference engine checks
1241
+ # Check if model is HuggingFace text-generation and is logged with
1242
+ # OPENAI_CHAT_SIGNATURE before doing inference engine checks
1215
1243
  # Only validate if inference engine is actually specified
1216
1244
  if inference_engine_args is not None:
1217
1245
  self._check_huggingface_text_generation_model(statement_params)
@@ -10,7 +10,7 @@ REQUIREMENTS = [
10
10
  "fsspec>=2024.6.1,<2026",
11
11
  "importlib_resources>=6.1.1, <7",
12
12
  "numpy>=1.23,<3",
13
- "packaging>=20.9,<25",
13
+ "packaging>=20.9,<26",
14
14
  "pandas>=2.1.4,<3",
15
15
  "platformdirs<5",
16
16
  "pyarrow<19.0.0",
@@ -303,6 +303,7 @@ class HuggingFacePipelineModel:
303
303
  force_rebuild: bool = False,
304
304
  build_external_access_integrations: Optional[list[str]] = None,
305
305
  block: bool = True,
306
+ inference_engine_options: Optional[dict[str, Any]] = None,
306
307
  experimental_options: Optional[dict[str, Any]] = None,
307
308
  ) -> Union[str, async_job.AsyncJob]:
308
309
  """Logs a Hugging Face model and creates a service in Snowflake.
@@ -330,10 +331,8 @@ class HuggingFacePipelineModel:
330
331
  force_rebuild: Whether to force rebuild the image. Defaults to False.
331
332
  build_external_access_integrations: External access integrations for building the image. Defaults to None.
332
333
  block: Whether to block the operation. Defaults to True.
333
- experimental_options: Experimental options for the service creation with custom inference engine.
334
- Currently, only `inference_engine` and `inference_engine_args_override` are supported.
335
- `inference_engine` is the name of the inference engine to use.
336
- `inference_engine_args_override` is a list of string arguments to pass to the inference engine.
334
+ inference_engine_options: Options for the service creation with custom inference engine. Defaults to None.
335
+ experimental_options: Experimental options for the service creation. Defaults to None.
337
336
 
338
337
  Raises:
339
338
  ValueError: if database and schema name is not provided and session doesn't have a
@@ -377,14 +376,14 @@ class HuggingFacePipelineModel:
377
376
 
378
377
  # Check if model is HuggingFace text-generation before doing inference engine checks
379
378
  inference_engine_args = None
380
- if experimental_options:
379
+ if inference_engine_options:
381
380
  if self.task != "text-generation":
382
381
  raise ValueError(
383
- "Currently, InferenceEngine using experimental_options is only supported for "
382
+ "Currently, InferenceEngine using inference_engine_options is only supported for "
384
383
  "HuggingFace text-generation models."
385
384
  )
386
385
 
387
- inference_engine_args = inference_engine_utils._get_inference_engine_args(experimental_options)
386
+ inference_engine_args = inference_engine_utils._get_inference_engine_args(inference_engine_options)
388
387
 
389
388
  # Enrich inference engine args if inference engine is specified
390
389
  if inference_engine_args is not None:
snowflake/ml/version.py CHANGED
@@ -1,2 +1,2 @@
1
1
  # This is parsed by regex in conda recipe meta file. Make sure not to break it.
2
- VERSION = "1.19.0"
2
+ VERSION = "1.20.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: snowflake-ml-python
3
- Version: 1.19.0
3
+ Version: 1.20.0
4
4
  Summary: The machine learning client library that is used for interacting with Snowflake to build machine learning solutions.
5
5
  Author-email: "Snowflake, Inc" <support@snowflake.com>
6
6
  License:
@@ -240,7 +240,7 @@ Requires-Dist: cryptography
240
240
  Requires-Dist: fsspec[http]<2026,>=2024.6.1
241
241
  Requires-Dist: importlib_resources<7,>=6.1.1
242
242
  Requires-Dist: numpy<3,>=1.23
243
- Requires-Dist: packaging<25,>=20.9
243
+ Requires-Dist: packaging<26,>=20.9
244
244
  Requires-Dist: pandas<3,>=2.1.4
245
245
  Requires-Dist: platformdirs<5
246
246
  Requires-Dist: pyarrow<19.0.0
@@ -415,11 +415,52 @@ NOTE: Version 1.7.0 is used as example here. Please choose the the latest versio
415
415
 
416
416
  # Release History
417
417
 
418
- ## 1.19.0
418
+ ## 1.20.0
419
419
 
420
420
  ### Bug Fixes
421
421
 
422
- * Experiment Tracking (PrPr): No longer throw an exception in `list_artifacts` when run does not have artifacts.
422
+ * Experiment Tracking (PuPr): Reaching the run metadata size limit in `log_metrics` or `log_params` will warn the user
423
+ instead of raising an exception.
424
+
425
+ ### Behavior Changes
426
+
427
+ ### New Features
428
+
429
+ * Registry (PrPr): Introducing vLLM as a backend inference engine. The `create_service` API will now
430
+ accept `inference_engine_options` as an argument.
431
+
432
+ ```python
433
+ from snowflake.ml.model.inference_engine import InferenceEngine
434
+
435
+ mv = snowflake_registry.log_model(
436
+ model=generator,
437
+ model_name=...,
438
+ ...,
439
+ # Specifying OPENAI_CHAT_SIGNATURE is necessary to use vLLM inference engine
440
+ signatures=openai_signatures.OPENAI_CHAT_SIGNATURE,
441
+ )
442
+
443
+ mv.create_service(
444
+ service_name=my_serv,
445
+ service_compute_pool=...,
446
+ ...,
447
+ inference_engine_options={
448
+ "engine": InferenceEngine.VLLM,
449
+ "engine_args_override": [
450
+ "--max-model-len=2048",
451
+ "--gpu-memory-utilization=0.9"
452
+ ]
453
+ }
454
+ )
455
+ ```
456
+
457
+ ### Deprecations
458
+
459
+ ## 1.19.0 (11-13-2025)
460
+
461
+ ### Bug Fixes
462
+
463
+ * Experiment Tracking (PuPr): No longer throw an exception in `list_artifacts` when run does not have artifacts.
423
464
  * Registry: Fix `get_version_by_alias`: now requires an exact match of snowflake identifier.
424
465
 
425
466
  ### Behavior Changes
@@ -10,15 +10,15 @@ snowflake/cortex/_sse_client.py,sha256=sLYgqAfTOPADCnaWH2RWAJi8KbU_7gSRsTUDcDD5T
10
10
  snowflake/cortex/_summarize.py,sha256=7GH8zqfIdOiHA5w4b6EvJEKEWhaTrL4YA6iDGbn7BNM,1307
11
11
  snowflake/cortex/_translate.py,sha256=9ZGjvAnJFisbzJ_bXnt4pyug5UzhHJRXW8AhGQEersM,1652
12
12
  snowflake/cortex/_util.py,sha256=krNTpbkFLXwdFqy1bd0xi7ZmOzOHRnIfHdQCPiLZJxk,3288
13
- snowflake/ml/version.py,sha256=E_REf6eP-Jb1wbXaox0ybf5CMbKewy8U_8Nru3GYxPU,99
13
+ snowflake/ml/version.py,sha256=eXm1ZjZ4bkRp_ba2n0F5kutVHnNUvpW7L4_SMMkOxeM,99
14
14
  snowflake/ml/_internal/env.py,sha256=EY_2KVe8oR3LgKWdaeRb5rRU-NDNXJppPDsFJmMZUUY,265
15
- snowflake/ml/_internal/env_utils.py,sha256=x6ID94g6FYoMX3afp0zoUHzBvuvPyiE2F6RDpxx5Cq0,30967
15
+ snowflake/ml/_internal/env_utils.py,sha256=Xx03pV_qEIVJJY--J3ZmnqK9Ugf0Os3O2vrF8xOyq_c,31500
16
16
  snowflake/ml/_internal/file_utils.py,sha256=7sA6loOeSfmGP4yx16P4usT9ZtRqG3ycnXu7_Tk7dOs,14206
17
17
  snowflake/ml/_internal/init_utils.py,sha256=WhrlvS-xcmKErSpwg6cUk6XDQ5lQcwDqPJnU7cooMIg,2672
18
18
  snowflake/ml/_internal/migrator_utils.py,sha256=k3erO8x3YJcX6nkKeyJAUNGg1qjE3RFmD-W6dtLzIH0,161
19
19
  snowflake/ml/_internal/platform_capabilities.py,sha256=5cpeKpsxCObjOsPIz38noIusWw4n5KXOvPqRPiF3Kj4,7627
20
20
  snowflake/ml/_internal/relax_version_strategy.py,sha256=MYEIZrx1HfKNhl9Na3GN50ipX8c0MKIj9nwxjB0IC0Y,484
21
- snowflake/ml/_internal/telemetry.py,sha256=SL5_yXvRoqvmIckTpTHVwGsgPyg06uIIPZHMZWfdkzo,31922
21
+ snowflake/ml/_internal/telemetry.py,sha256=oN0NDuKYabmc85IGPDz48ktFpPlo9Xlhw0qGJdOtFNY,33684
22
22
  snowflake/ml/_internal/type_utils.py,sha256=bNNW0I9rOvwhx-Y274vGd0qWA0fMIPA3SGnaDE09wvc,2198
23
23
  snowflake/ml/_internal/exceptions/dataset_error_messages.py,sha256=h7uGJbxBM6se-TW_64LKGGGdBCbwflzbBnmijWKX3Gc,285
24
24
  snowflake/ml/_internal/exceptions/dataset_errors.py,sha256=TqESe8cDfWurJdv5X0DOwgzBfHCEqga_F3WQipYbdqg,741
@@ -65,21 +65,21 @@ snowflake/ml/dataset/dataset_metadata.py,sha256=lcNvugBkP8YEkGMQqaV8SlHs5mwUKsUS
65
65
  snowflake/ml/dataset/dataset_reader.py,sha256=mZsG9HyWUGgfotrGkLrunyEsOm_659mH-Sn2OyG6A-Q,5036
66
66
  snowflake/ml/experiment/__init__.py,sha256=r7qdyPd3jwxzqvksim2ju5j_LrnYQrta0ZI6XpWUqmc,109
67
67
  snowflake/ml/experiment/_experiment_info.py,sha256=iaJ65x6nzBYJ5djleSOzBtMpZUJCUDlRpaDw0pu-dcU,2533
68
- snowflake/ml/experiment/experiment_tracking.py,sha256=B_7_u0tOZ2_ftNQZJriY_-IfNVsAOEJonzAJahFRYis,16793
68
+ snowflake/ml/experiment/experiment_tracking.py,sha256=ASalDFuUjuNHmiwJJ1EKW1u1ENLGGFEHYeQVZuPuqkU,17828
69
69
  snowflake/ml/experiment/utils.py,sha256=3bpbkilc5vvFjnti-kcyhhjAd9Ga3LqiKqJDwORiATY,628
70
70
  snowflake/ml/experiment/_client/artifact.py,sha256=R2WB4Y_kqv43BWLfXv8SEDINn1Bnevzgb-mH5LyvgGk,3035
71
71
  snowflake/ml/experiment/_client/experiment_tracking_sql_client.py,sha256=7AuC9VvDmH04PnyuCxSJt-YcwEm8cmkfmxixVN7dSbQ,8167
72
72
  snowflake/ml/experiment/_entities/__init__.py,sha256=11XxkvAzosydf5owNmMzLwXZdQ2NtNKRM-MMra4ND2k,247
73
73
  snowflake/ml/experiment/_entities/experiment.py,sha256=lKmQj59K8fGDWVwRqeIesxorrChb-m78vX_WUmI7PV0,225
74
- snowflake/ml/experiment/_entities/run.py,sha256=JkhiS4UZWuRm3ZSLgc2uktedeag5Voih2r02YFr6DQk,1621
74
+ snowflake/ml/experiment/_entities/run.py,sha256=6_R35nI24PzIWMrwPKDif5ZINAAE6J0R7p4UmlT-m4o,2251
75
75
  snowflake/ml/experiment/_entities/run_metadata.py,sha256=25cIg8FnAYHk5SoTg_StzL10_BkomL7xrhMmWxUTU8E,366
76
76
  snowflake/ml/experiment/callback/keras.py,sha256=I_O2SBYttFNChO2Sc_C6xQh03r3ymSFB4eN2TS41Dgs,2680
77
77
  snowflake/ml/experiment/callback/lightgbm.py,sha256=qu4m8WV6Rqxa39X7g7ZBd1zJ8icYEkBBF3Kh3C1VpHU,2754
78
78
  snowflake/ml/experiment/callback/xgboost.py,sha256=F547AXZ7Gv39cyIrgRdxVE8MQ3VlNi5JqKKW0Z5RlQo,2754
79
79
  snowflake/ml/feature_store/__init__.py,sha256=MJr2Gp_EimDgDxD6DtenOEdLTzg6NYPfdNiPM-5rEtw,406
80
- snowflake/ml/feature_store/access_manager.py,sha256=Q5ImMXRY8WA5X5dpBMzHnIJmeyKVShjNAlbn3cQb4N8,10654
80
+ snowflake/ml/feature_store/access_manager.py,sha256=ZuLk2IQE2H-XSV96Z6mf_KzF2J-kjaDf-_t-0nCxnTg,10724
81
81
  snowflake/ml/feature_store/entity.py,sha256=ViOSlqCV17ouiO4iH-_KvkvJZqSzpf-nfsjijG6G1Uk,4047
82
- snowflake/ml/feature_store/feature_store.py,sha256=wJliNeSifIK-zlx1a4aIhji9th0sExDxJs_MytzppZ4,172323
82
+ snowflake/ml/feature_store/feature_store.py,sha256=9rb-CT1F-gI1hb8JzqD9CBRz2Q9Vd3c2HtRbl7pSdu4,172334
83
83
  snowflake/ml/feature_store/feature_view.py,sha256=OHhhk33DJa1-P0YG0g9XQxlMrt761yRpZ3CO1y4mtwc,44329
84
84
  snowflake/ml/feature_store/examples/example_helper.py,sha256=eaD2vLe7y4C5hMZQTeMXylbTtLacbq9gJcAluGHrkug,12470
85
85
  snowflake/ml/feature_store/examples/airline_features/entities.py,sha256=mzHRS-InHpXON0eHds-QLmi7nK9ciOnCruhPZI4niLs,438
@@ -110,8 +110,8 @@ snowflake/ml/fileset/snowfs.py,sha256=uF5QluYtiJ-HezGIhF55dONi3t0E6N7ByaVAIAlM3n
110
110
  snowflake/ml/fileset/stage_fs.py,sha256=SnkgCta6_5G6Ljl-Nzctr4yavhHUSlNKN3je0ojp54E,20685
111
111
  snowflake/ml/jobs/__init__.py,sha256=h176wKqEylZs5cdWdzWHuUrSAcwctDdw4tUhIpy-mO4,657
112
112
  snowflake/ml/jobs/decorators.py,sha256=mQgdWvvCwD7q79cSFKZHKegXGh2j1u8WM64UD3lCKr4,3428
113
- snowflake/ml/jobs/job.py,sha256=GeV8uCaoupuahHe8so4DyVPEvHoenEekdn-WLr-2Nj0,27580
114
- snowflake/ml/jobs/manager.py,sha256=yYxY8E-0V8PIIwBTtDDaWCwqZHe8HpUM2C7nTu7gPLs,29110
113
+ snowflake/ml/jobs/job.py,sha256=eK2D0DurFJGNp7GDrsqn4TTkHdvLR1hFVJrgfET5GOU,27739
114
+ snowflake/ml/jobs/manager.py,sha256=_JEK7ozkEoEL26f0QJPBOhFrFr2U_-BnS_Cs5_R6PIw,29846
115
115
  snowflake/ml/jobs/_interop/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
116
116
  snowflake/ml/jobs/_interop/data_utils.py,sha256=xUO5YlhUKFVCDtbjam5gP2lka3lfoknTLr7syNAVxK0,4074
117
117
  snowflake/ml/jobs/_interop/dto_schema.py,sha256=NhoQ6WJa7uLO9VJojEENVVZhZMfL_G1VPPSSUYmmhO8,2750
@@ -122,12 +122,12 @@ snowflake/ml/jobs/_interop/results.py,sha256=nQ07XJ1BZEkPB4xa12pbGyaKqR8sWCoSzx0
122
122
  snowflake/ml/jobs/_interop/utils.py,sha256=TWFkUcAYmb-fpTwEL8idkk3XxlZ8vLz4X_gyS78PSi4,5552
123
123
  snowflake/ml/jobs/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
124
124
  snowflake/ml/jobs/_utils/constants.py,sha256=Wp2s_wBX5WZnxo3cdrsitnb9Ze0OUGmH26sofDFrdMI,4613
125
- snowflake/ml/jobs/_utils/feature_flags.py,sha256=c69OYFOZyXVmj87VKRh-rp_MP-3I1gJXhxBSiXAprbQ,1612
125
+ snowflake/ml/jobs/_utils/feature_flags.py,sha256=dLWBVIjyB2vsa4Vtm7Yhty6DOi8Nn73_YSjuYf73Y7A,1669
126
126
  snowflake/ml/jobs/_utils/function_payload_utils.py,sha256=4LBaStMdhRxcqwRkwFje-WwiEKRWnBfkaOYouF3N3Kg,1308
127
- snowflake/ml/jobs/_utils/payload_utils.py,sha256=INq_G1flV-Sa2riuqKwx5DOWTUegcDF01jfmJKpBcao,31101
127
+ snowflake/ml/jobs/_utils/payload_utils.py,sha256=IZr8aIadP0vQrKsSdCTFYX2ZYJNzGsijFqH219fjOX4,31382
128
128
  snowflake/ml/jobs/_utils/query_helper.py,sha256=1-XK-y4iukbR1693qAELprRbHmJDM4YoEBHov8IYbHU,1115
129
129
  snowflake/ml/jobs/_utils/runtime_env_utils.py,sha256=fqa3ctf_CAOSv1zT__01Qp9T058mKgMjXuEkBZqKUqA,2247
130
- snowflake/ml/jobs/_utils/spec_utils.py,sha256=Ch-3iKezKWXgSJm-xpHOW7ZpMBjIZvSNiEZGL9CyA2w,16346
130
+ snowflake/ml/jobs/_utils/spec_utils.py,sha256=j7fqkVb7PzW5jV3IkBlialzOEs_V1rKT4DkM1IeGmfU,16623
131
131
  snowflake/ml/jobs/_utils/stage_utils.py,sha256=YjN7cQFRcAUT1JvNZDSiNw8KiCF4HJ6ymkOYMhYJwE0,5297
132
132
  snowflake/ml/jobs/_utils/types.py,sha256=uOf7TPPWfIRALZhD6Li3AgizPOktPXv8_6iVK2grkgc,2587
133
133
  snowflake/ml/jobs/_utils/scripts/constants.py,sha256=YyIWZqQPYOTtgCY6SfyJjk2A98I5RQVmrOuLtET5Pqg,173
@@ -148,9 +148,9 @@ snowflake/ml/model/task.py,sha256=Zp5JaLB-YfX5p_HSaw81P3J7UnycQq5EMa87A35VOaQ,28
148
148
  snowflake/ml/model/type_hints.py,sha256=hoIq3KOscvp9rqJnmgWHW3IGwCSwiCVbklFAqSQekr4,11225
149
149
  snowflake/ml/model/volatility.py,sha256=qu-wqe9oKkRwXwE2qkKygxTWzUypQYEk3UjsqOGRl_I,1144
150
150
  snowflake/ml/model/_client/model/batch_inference_specs.py,sha256=0SlLTpZW_gzNP5IH_8cFnqjArxM0zVjA5nBLKnKAnz8,4396
151
- snowflake/ml/model/_client/model/inference_engine_utils.py,sha256=L8HnoAEbnN5YAcMlsgNbeqfyZbiOyrNMxj7rD4DcjyU,1878
151
+ snowflake/ml/model/_client/model/inference_engine_utils.py,sha256=yPkdImi2qP1uG1WzLKCBZgXV-DiIBVpImEosIjYJk8Y,1958
152
152
  snowflake/ml/model/_client/model/model_impl.py,sha256=Yabrbir5vPMOnsVmQJ23YN7vqhi756Jcm6pfO8Aq92o,17469
153
- snowflake/ml/model/_client/model/model_version_impl.py,sha256=v03RFKRnu7Gr8lovgRjE-MUK1MY_-RD5zjrrrbliJq4,60470
153
+ snowflake/ml/model/_client/model/model_version_impl.py,sha256=EjZONsvuhZaWGqUm-y3wu5m968E99yoqQZQnZcXiw9Q,61859
154
154
  snowflake/ml/model/_client/ops/metadata_ops.py,sha256=qpK6PL3OyfuhyOmpvLCpHLy6vCxbZbp1HlEvakFGwv4,4884
155
155
  snowflake/ml/model/_client/ops/model_ops.py,sha256=e6z-Pd-yslMFokzJV-ZKNK3m5dyIyl9Zk1TQX5lmgRY,50903
156
156
  snowflake/ml/model/_client/ops/service_ops.py,sha256=Ey_yvKQvFnD4dafjFtPA3aaU1GTGqrdlgIpjrfYC8Ew,47143
@@ -203,7 +203,7 @@ snowflake/ml/model/_packager/model_meta/model_meta_schema.py,sha256=mtKRbHQb6Hq2
203
203
  snowflake/ml/model/_packager/model_meta_migrator/base_migrator.py,sha256=8zTgq3n6TBXv7Vcwmf7b9wjK3m-9HHMsY0Qy1Rs-sZ4,1305
204
204
  snowflake/ml/model/_packager/model_meta_migrator/migrator_plans.py,sha256=5butM-lyaDRhCAO2BaCOIQufpAxAfSAinsNuGqbbjMU,1029
205
205
  snowflake/ml/model/_packager/model_meta_migrator/migrator_v1.py,sha256=cyZVvBGM3nF1IVqDKfYstLCchNO-ZhSkPvLM4aU7J5c,2066
206
- snowflake/ml/model/_packager/model_runtime/_snowml_inference_alternative_requirements.py,sha256=pdqy_tKGOlQyhuSh5ZhmOXxmC2dK_VPycdghrWrq5PI,904
206
+ snowflake/ml/model/_packager/model_runtime/_snowml_inference_alternative_requirements.py,sha256=uzIkTNkxIyfSzBB8mKMHxU754Lk6BWVgoyjCsgPBANQ,904
207
207
  snowflake/ml/model/_packager/model_runtime/model_runtime.py,sha256=xEf-S9QurEOeQzrNxlc-4-S_VkHsVO1eNS4UR0hWwHU,5495
208
208
  snowflake/ml/model/_packager/model_task/model_task_utils.py,sha256=_nm3Irl5W6Oa8_OnJyp3bLeA9QAbV9ygGCsgHI70GX4,6641
209
209
  snowflake/ml/model/_signatures/base_handler.py,sha256=4CTZKKbg4WIz_CmXjyVy8tKZW-5OFcz0J8XVPHm2dfQ,1269
@@ -216,7 +216,7 @@ snowflake/ml/model/_signatures/pytorch_handler.py,sha256=Xy-ITCCX_EgHcyIIqeYSDUI
216
216
  snowflake/ml/model/_signatures/snowpark_handler.py,sha256=aNGPa2v0kTMuSZ80NBdHeAWYva0Nc1vo17ZjQwIjf2E,7621
217
217
  snowflake/ml/model/_signatures/tensorflow_handler.py,sha256=_yrvMg-w_jJoYuyrGXKPX4Dv7Vt8z1e6xIKiWGuZcc4,5660
218
218
  snowflake/ml/model/_signatures/utils.py,sha256=NYZwDtuMV91ryJflBhfrRnu1sq45ej30uEo9_scNbhg,16387
219
- snowflake/ml/model/models/huggingface_pipeline.py,sha256=zx5OXigB6La6GDJxjsy4PkZtE2eIkZ2cbSBBxlmqyfU,22601
219
+ snowflake/ml/model/models/huggingface_pipeline.py,sha256=k6oFoVNE3tqWmaM8BCaC7Xe2eD1-00UIEOadGWtiT9g,22494
220
220
  snowflake/ml/modeling/_internal/estimator_utils.py,sha256=dfPPWO-RHf5C3Tya3VQ4KEqoa32pm-WKwRrjzjDInLk,13956
221
221
  snowflake/ml/modeling/_internal/model_specifications.py,sha256=3wFMcKPCSoiEzU7Mx6RVem89BRlBBENpX__-Rd7GwdU,4851
222
222
  snowflake/ml/modeling/_internal/model_trainer.py,sha256=5Ck1lbdyzcd-TpzAxEyovIN9fjaaVIqugyMHXt0wzH0,971
@@ -451,8 +451,8 @@ snowflake/ml/utils/connection_params.py,sha256=NSBUgcs-DXPRHs1BKpxdSubbJx1yrFRlM
451
451
  snowflake/ml/utils/html_utils.py,sha256=L4pzpvFd20SIk4rie2kTAtcQjbxBHfjKmxonMAT2OoA,7665
452
452
  snowflake/ml/utils/sparse.py,sha256=zLBNh-ynhGpKH5TFtopk0YLkHGvv0yq1q-sV59YQKgg,3819
453
453
  snowflake/ml/utils/sql_client.py,sha256=pSe2od6Pkh-8NwG3D-xqN76_uNf-ohOtVbT55HeQg1Y,668
454
- snowflake_ml_python-1.19.0.dist-info/licenses/LICENSE.txt,sha256=PdEp56Av5m3_kl21iFkVTX_EbHJKFGEdmYeIO1pL_Yk,11365
455
- snowflake_ml_python-1.19.0.dist-info/METADATA,sha256=AgdELvFlnXyalB8a6NU6Fer0MffT245XRWf94InGRY4,97868
456
- snowflake_ml_python-1.19.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
457
- snowflake_ml_python-1.19.0.dist-info/top_level.txt,sha256=TY0gFSHKDdZy3THb0FGomyikWQasEGldIR1O0HGOHVw,10
458
- snowflake_ml_python-1.19.0.dist-info/RECORD,,
454
+ snowflake_ml_python-1.20.0.dist-info/licenses/LICENSE.txt,sha256=PdEp56Av5m3_kl21iFkVTX_EbHJKFGEdmYeIO1pL_Yk,11365
455
+ snowflake_ml_python-1.20.0.dist-info/METADATA,sha256=Ud6fGclw-XBNGupTN1LqbS9xDo99t4HupqYi-yNRXVE,98864
456
+ snowflake_ml_python-1.20.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
457
+ snowflake_ml_python-1.20.0.dist-info/top_level.txt,sha256=TY0gFSHKDdZy3THb0FGomyikWQasEGldIR1O0HGOHVw,10
458
+ snowflake_ml_python-1.20.0.dist-info/RECORD,,