snowflake-ml-python 1.24.0__py3-none-any.whl → 1.25.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -73,15 +73,19 @@ class ArrowIngestor(data_ingestor.DataIngestor, mixins.SerializableSessionMixin)
73
73
  self._schema: Optional[pa.Schema] = None
74
74
 
75
75
  @classmethod
76
- def from_sources(cls, session: snowpark.Session, sources: Sequence[data_source.DataSource]) -> "ArrowIngestor":
76
+ def from_sources(
77
+ cls, session: snowpark.Session, sources: Sequence[data_source.DataSource], **kwargs: Any
78
+ ) -> "ArrowIngestor":
77
79
  if session is None:
78
80
  raise ValueError("Session is required")
81
+ # Skipping kwargs until needed to avoid impact other workflows.
79
82
  return cls(session, sources)
80
83
 
81
84
  @classmethod
82
85
  def from_ray_dataset(
83
86
  cls,
84
87
  ray_ds: "ray.data.Dataset",
88
+ **kwargs: Any,
85
89
  ) -> "ArrowIngestor":
86
90
  raise NotImplementedError
87
91
 
@@ -94,7 +94,7 @@ class DataConnector:
94
94
  **kwargs: Any,
95
95
  ) -> DataConnectorType:
96
96
  ingestor_class = ingestor_class or cls.DEFAULT_INGESTOR_CLASS
97
- ray_ingestor = ingestor_class.from_ray_dataset(ray_ds=ray_ds)
97
+ ray_ingestor = ingestor_class.from_ray_dataset(ray_ds=ray_ds, **kwargs)
98
98
  return cls(ray_ingestor, **kwargs)
99
99
 
100
100
  @classmethod
@@ -111,7 +111,7 @@ class DataConnector:
111
111
  **kwargs: Any,
112
112
  ) -> DataConnectorType:
113
113
  ingestor_class = ingestor_class or cls.DEFAULT_INGESTOR_CLASS
114
- ingestor = ingestor_class.from_sources(session, sources)
114
+ ingestor = ingestor_class.from_sources(session, sources, **kwargs)
115
115
  return cls(ingestor, **kwargs)
116
116
 
117
117
  @property
@@ -16,7 +16,7 @@ DataIngestorType = TypeVar("DataIngestorType", bound="DataIngestor")
16
16
  class DataIngestor(Protocol):
17
17
  @classmethod
18
18
  def from_sources(
19
- cls: type[DataIngestorType], session: snowpark.Session, sources: Sequence[data_source.DataSource]
19
+ cls: type[DataIngestorType], session: snowpark.Session, sources: Sequence[data_source.DataSource], **kwargs: Any
20
20
  ) -> DataIngestorType:
21
21
  raise NotImplementedError
22
22
 
@@ -24,6 +24,7 @@ class DataIngestor(Protocol):
24
24
  def from_ray_dataset(
25
25
  cls: type[DataIngestorType],
26
26
  ray_ds: "ray.data.Dataset",
27
+ **kwargs: Any,
27
28
  ) -> DataIngestorType:
28
29
  raise NotImplementedError
29
30
 
@@ -3,7 +3,7 @@ import functools
3
3
  import types
4
4
  from typing import Callable, Optional
5
5
 
6
- from snowflake.ml import model
6
+ from snowflake.ml.model._client.model import model_version_impl
7
7
  from snowflake.ml.registry._manager import model_manager
8
8
 
9
9
 
@@ -23,7 +23,7 @@ class ExperimentInfoPatcher:
23
23
  """
24
24
 
25
25
  # Store original method at class definition time to avoid recursive patching
26
- _original_log_model: Callable[..., model.ModelVersion] = model_manager.ModelManager.log_model
26
+ _original_log_model: Callable[..., model_version_impl.ModelVersion] = model_manager.ModelManager.log_model
27
27
 
28
28
  # Stack of active experiment_info contexts for nested experiment support
29
29
  _experiment_info_stack: list[ExperimentInfo] = []
@@ -36,7 +36,7 @@ class ExperimentInfoPatcher:
36
36
  if not ExperimentInfoPatcher._experiment_info_stack:
37
37
 
38
38
  @functools.wraps(ExperimentInfoPatcher._original_log_model)
39
- def patched(*args, **kwargs) -> model.ModelVersion: # type: ignore[no-untyped-def]
39
+ def patched(*args, **kwargs) -> model_version_impl.ModelVersion: # type: ignore[no-untyped-def]
40
40
  # Use the most recent (top of stack) experiment_info for nested contexts
41
41
  current_experiment_info = ExperimentInfoPatcher._experiment_info_stack[-1]
42
42
  return ExperimentInfoPatcher._original_log_model(
@@ -1,6 +1,11 @@
1
1
  import os
2
- from enum import Enum
3
- from typing import Optional
2
+ from typing import Callable, Optional, Union
3
+
4
+ from snowflake.ml._internal.utils.snowflake_env import SnowflakeCloudType
5
+ from snowflake.snowpark import context as sp_context
6
+
7
+ # Default value type: can be a bool or a callable that returns a bool
8
+ DefaultValue = Union[bool, Callable[[], bool]]
4
9
 
5
10
 
6
11
  def parse_bool_env_value(value: Optional[str], default: bool = False) -> bool:
@@ -28,22 +33,101 @@ def parse_bool_env_value(value: Optional[str], default: bool = False) -> bool:
28
33
  return default
29
34
 
30
35
 
31
- class FeatureFlags(Enum):
32
- USE_SUBMIT_JOB_V2 = "MLRS_USE_SUBMIT_JOB_V2"
33
- ENABLE_RUNTIME_VERSIONS = "MLRS_ENABLE_RUNTIME_VERSIONS"
34
- ENABLE_STAGE_MOUNT_V2 = "MLRS_ENABLE_STAGE_MOUNT_V2"
36
+ def _enabled_in_clouds(*clouds: SnowflakeCloudType) -> Callable[[], bool]:
37
+ """Create a callable that checks if the current environment is in any of the specified clouds.
38
+
39
+ This factory function returns a callable that can be used as a dynamic default
40
+ for feature flags. The returned callable will check if the current Snowflake
41
+ session is connected to a region in any of the specified cloud providers.
42
+
43
+ Args:
44
+ *clouds: One or more SnowflakeCloudType values to check against.
45
+
46
+ Returns:
47
+ A callable that returns True if running in any of the specified clouds,
48
+ False otherwise (including when no session is available).
49
+
50
+ Example:
51
+ >>> # Enable feature only in GCP
52
+ >>> default=_enabled_in_clouds(SnowflakeCloudType.GCP)
53
+ >>>
54
+ >>> # Enable feature in both GCP and Azure
55
+ >>> default=_enabled_in_clouds(SnowflakeCloudType.GCP, SnowflakeCloudType.AZURE)
56
+ """
57
+ cloud_set = frozenset(clouds)
58
+
59
+ def check() -> bool:
60
+ try:
61
+ from snowflake.ml._internal.utils.snowflake_env import get_current_cloud
62
+
63
+ session = sp_context.get_active_session()
64
+ current_cloud = get_current_cloud(session, default=SnowflakeCloudType.AWS)
65
+ return current_cloud in cloud_set
66
+ except Exception:
67
+ # If we can't determine the cloud (no session, SQL error, etc.),
68
+ # default to False for safety
69
+ return False
70
+
71
+ return check
35
72
 
36
- def is_enabled(self, default: bool = False) -> bool:
37
- """Check if the feature flag is enabled.
73
+
74
+ class _FeatureFlag:
75
+ """A feature flag backed by an environment variable with a configurable default.
76
+
77
+ The default value can be a constant boolean or a callable that dynamically
78
+ determines the default based on runtime context (e.g., cloud provider).
79
+ """
80
+
81
+ def __init__(self, env_var: str, default: DefaultValue = False) -> None:
82
+ """Initialize a feature flag.
38
83
 
39
84
  Args:
40
- default: The default value to return if the environment variable is not set.
85
+ env_var: The environment variable name that controls this flag.
86
+ default: The default value when the env var is not set. Can be:
87
+ - A boolean constant (True/False)
88
+ - A callable that returns a boolean (evaluated at check time)
89
+ """
90
+ self._env_var = env_var
91
+ self._default = default
92
+
93
+ @property
94
+ def value(self) -> str:
95
+ """Return the environment variable name (for compatibility with Enum-style access)."""
96
+ return self._env_var
97
+
98
+ def _get_default(self) -> bool:
99
+ """Get the default value, calling it if it's a callable."""
100
+ if callable(self._default):
101
+ return self._default()
102
+ return self._default
103
+
104
+ def is_enabled(self) -> bool:
105
+ """Check if the feature flag is enabled.
106
+
107
+ First checks the environment variable. If not set or unrecognized,
108
+ falls back to the configured default value.
41
109
 
42
110
  Returns:
43
- True if the environment variable is set to a truthy value,
44
- False if set to a falsy value, or the default value if not set.
111
+ True if the feature is enabled, False otherwise.
45
112
  """
46
- return parse_bool_env_value(os.getenv(self.value), default)
113
+ env_value = os.getenv(self._env_var)
114
+ if env_value is not None:
115
+ # Environment variable is set, parse it
116
+ result = parse_bool_env_value(env_value, default=self._get_default())
117
+ return result
118
+ else:
119
+ # Environment variable not set, use the default
120
+ return self._get_default()
47
121
 
48
122
  def __str__(self) -> str:
49
- return self.value
123
+ return self._env_var
124
+
125
+
126
+ class FeatureFlags:
127
+ """Collection of feature flags for ML Jobs."""
128
+
129
+ ENABLE_RUNTIME_VERSIONS = _FeatureFlag("MLRS_ENABLE_RUNTIME_VERSIONS", default=True)
130
+ ENABLE_STAGE_MOUNT_V2 = _FeatureFlag(
131
+ "MLRS_ENABLE_STAGE_MOUNT_V2",
132
+ default=_enabled_in_clouds(SnowflakeCloudType.GCP),
133
+ )
@@ -49,8 +49,14 @@ class MLJobDefinition(Generic[_Args, _ReturnValue], SerializableSessionMixin):
49
49
  self.spec_options = spec_options
50
50
  self.compute_pool = compute_pool
51
51
  self.session = session or sp_context.get_active_session()
52
- self.database = database or self.session.get_current_database()
53
- self.schema = schema or self.session.get_current_schema()
52
+ resolved_database = database or self.session.get_current_database()
53
+ resolved_schema = schema or self.session.get_current_schema()
54
+ if resolved_database is None:
55
+ raise ValueError("Database must be specified either in the session context or as a parameter.")
56
+ if resolved_schema is None:
57
+ raise ValueError("Schema must be specified either in the session context or as a parameter.")
58
+ self.database = identifier.resolve_identifier(resolved_database)
59
+ self.schema = identifier.resolve_identifier(resolved_schema)
54
60
  self.job_definition_id = identifier.get_schema_level_object_identifier(self.database, self.schema, name)
55
61
  self.entrypoint_args = entrypoint_args
56
62
 
@@ -190,7 +196,7 @@ class MLJobDefinition(Generic[_Args, _ReturnValue], SerializableSessionMixin):
190
196
  )
191
197
  raise
192
198
 
193
- if runtime_environment is None and feature_flags.FeatureFlags.ENABLE_RUNTIME_VERSIONS.is_enabled(default=True):
199
+ if runtime_environment is None and feature_flags.FeatureFlags.ENABLE_RUNTIME_VERSIONS.is_enabled():
194
200
  # Pass a JSON object for runtime versions so it serializes as nested JSON in options
195
201
  runtime_environment = json.dumps({"pythonVersion": f"{sys.version_info.major}.{sys.version_info.minor}"})
196
202
 
@@ -204,7 +210,7 @@ class MLJobDefinition(Generic[_Args, _ReturnValue], SerializableSessionMixin):
204
210
  enable_metrics=enable_metrics,
205
211
  spec_overrides=spec_overrides,
206
212
  runtime=runtime_environment if runtime_environment else None,
207
- enable_stage_mount_v2=feature_flags.FeatureFlags.ENABLE_STAGE_MOUNT_V2.is_enabled(default=True),
213
+ enable_stage_mount_v2=feature_flags.FeatureFlags.ENABLE_STAGE_MOUNT_V2.is_enabled(),
208
214
  )
209
215
 
210
216
  job_options = types.JobOptions(
@@ -33,6 +33,12 @@ _BATCH_INFERENCE_TEMPORARY_FOLDER = "_temporary"
33
33
  VLLM_SUPPORTED_TASKS = [
34
34
  "text-generation",
35
35
  "image-text-to-text",
36
+ "video-text-to-text",
37
+ "audio-text-to-text",
38
+ ]
39
+ VALID_OPENAI_SIGNATURES = [
40
+ openai_signatures.OPENAI_CHAT_SIGNATURE,
41
+ openai_signatures.OPENAI_CHAT_SIGNATURE_WITH_CONTENT_FORMAT_STRING,
36
42
  ]
37
43
 
38
44
 
@@ -1140,16 +1146,11 @@ class ModelVersion(lineage_node.LineageNode):
1140
1146
  func_name: core.ModelSignature.from_dict(sig_dict) for func_name, sig_dict in signatures_dict.items()
1141
1147
  }
1142
1148
 
1143
- if deserialized_signatures not in [
1144
- openai_signatures.OPENAI_CHAT_SIGNATURE,
1145
- openai_signatures.OPENAI_CHAT_SIGNATURE_WITH_CONTENT_FORMAT_STRING,
1146
- ]:
1149
+ if deserialized_signatures not in VALID_OPENAI_SIGNATURES:
1147
1150
  raise ValueError(
1148
- "Inference engine requires the model to be logged with openai_signatures.OPENAI_CHAT_SIGNATURE or "
1149
- "openai_signatures.OPENAI_CHAT_SIGNATURE_WITH_CONTENT_FORMAT_STRING. "
1151
+ "Inference engine requires the model to be logged with one of the following signatures: "
1152
+ f"{VALID_OPENAI_SIGNATURES}. Please log the model again with one of these supported signatures."
1150
1153
  f"Found signatures: {signatures_dict}. "
1151
- "Please log the model again with: signatures=openai_signatures.OPENAI_CHAT_SIGNATURE or "
1152
- "signatures=openai_signatures.OPENAI_CHAT_SIGNATURE_WITH_CONTENT_FORMAT_STRING"
1153
1154
  )
1154
1155
 
1155
1156
  @overload
@@ -1161,6 +1162,7 @@ class ModelVersion(lineage_node.LineageNode):
1161
1162
  service_compute_pool: str,
1162
1163
  image_repo: Optional[str] = None,
1163
1164
  ingress_enabled: bool = False,
1165
+ min_instances: int = 0,
1164
1166
  max_instances: int = 1,
1165
1167
  cpu_requests: Optional[str] = None,
1166
1168
  memory_requests: Optional[str] = None,
@@ -1187,8 +1189,10 @@ class ModelVersion(lineage_node.LineageNode):
1187
1189
  will be used.
1188
1190
  ingress_enabled: If true, creates an service endpoint associated with the service. User must have
1189
1191
  BIND SERVICE ENDPOINT privilege on the account.
1190
- max_instances: The maximum number of inference service instances to run. The same value it set to
1191
- MIN_INSTANCES property of the service.
1192
+ min_instances: The minimum number of instances for the inference service. The service will automatically
1193
+ scale between min_instances and max_instances based on traffic and hardware utilization. If set to
1194
+ 0 (default), the service will automatically suspend after a period of inactivity.
1195
+ max_instances: The maximum number of instances for the inference service.
1192
1196
  cpu_requests: The cpu limit for CPU based inference. Can be an integer, fractional or string values. If
1193
1197
  None, we attempt to utilize all the vCPU of the node.
1194
1198
  memory_requests: The memory limit with for CPU based inference. Can be an integer or a fractional value, but
@@ -1224,6 +1228,7 @@ class ModelVersion(lineage_node.LineageNode):
1224
1228
  service_compute_pool: str,
1225
1229
  image_repo: Optional[str] = None,
1226
1230
  ingress_enabled: bool = False,
1231
+ min_instances: int = 0,
1227
1232
  max_instances: int = 1,
1228
1233
  cpu_requests: Optional[str] = None,
1229
1234
  memory_requests: Optional[str] = None,
@@ -1250,8 +1255,10 @@ class ModelVersion(lineage_node.LineageNode):
1250
1255
  will be used.
1251
1256
  ingress_enabled: If true, creates an service endpoint associated with the service. User must have
1252
1257
  BIND SERVICE ENDPOINT privilege on the account.
1253
- max_instances: The maximum number of inference service instances to run. The same value it set to
1254
- MIN_INSTANCES property of the service.
1258
+ min_instances: The minimum number of instances for the inference service. The service will automatically
1259
+ scale between min_instances and max_instances based on traffic and hardware utilization. If set to
1260
+ 0 (default), the service will automatically suspend after a period of inactivity.
1261
+ max_instances: The maximum number of instances for the inference service.
1255
1262
  cpu_requests: The cpu limit for CPU based inference. Can be an integer, fractional or string values. If
1256
1263
  None, we attempt to utilize all the vCPU of the node.
1257
1264
  memory_requests: The memory limit with for CPU based inference. Can be an integer or a fractional value, but
@@ -1301,6 +1308,7 @@ class ModelVersion(lineage_node.LineageNode):
1301
1308
  service_compute_pool: str,
1302
1309
  image_repo: Optional[str] = None,
1303
1310
  ingress_enabled: bool = False,
1311
+ min_instances: int = 0,
1304
1312
  max_instances: int = 1,
1305
1313
  cpu_requests: Optional[str] = None,
1306
1314
  memory_requests: Optional[str] = None,
@@ -1328,8 +1336,10 @@ class ModelVersion(lineage_node.LineageNode):
1328
1336
  will be used.
1329
1337
  ingress_enabled: If true, creates an service endpoint associated with the service. User must have
1330
1338
  BIND SERVICE ENDPOINT privilege on the account.
1331
- max_instances: The maximum number of inference service instances to run. The same value it set to
1332
- MIN_INSTANCES property of the service.
1339
+ min_instances: The minimum number of instances for the inference service. The service will automatically
1340
+ scale between min_instances and max_instances based on traffic and hardware utilization. If set to
1341
+ 0 (default), the service will automatically suspend after a period of inactivity.
1342
+ max_instances: The maximum number of instances for the inference service.
1333
1343
  cpu_requests: The cpu limit for CPU based inference. Can be an integer, fractional or string values. If
1334
1344
  None, we attempt to utilize all the vCPU of the node.
1335
1345
  memory_requests: The memory limit with for CPU based inference. Can be an integer or a fractional value, but
@@ -1419,6 +1429,7 @@ class ModelVersion(lineage_node.LineageNode):
1419
1429
  service_compute_pool_name=sql_identifier.SqlIdentifier(service_compute_pool),
1420
1430
  image_repo_name=image_repo,
1421
1431
  ingress_enabled=ingress_enabled,
1432
+ min_instances=min_instances,
1422
1433
  max_instances=max_instances,
1423
1434
  cpu_requests=cpu_requests,
1424
1435
  memory_requests=memory_requests,
@@ -175,6 +175,7 @@ class ServiceOperator:
175
175
  service_compute_pool_name: sql_identifier.SqlIdentifier,
176
176
  image_repo_name: Optional[str],
177
177
  ingress_enabled: bool,
178
+ min_instances: int,
178
179
  max_instances: int,
179
180
  cpu_requests: Optional[str],
180
181
  memory_requests: Optional[str],
@@ -241,6 +242,7 @@ class ServiceOperator:
241
242
  service_name=service_name,
242
243
  inference_compute_pool_name=service_compute_pool_name,
243
244
  ingress_enabled=ingress_enabled,
245
+ min_instances=min_instances,
244
246
  max_instances=max_instances,
245
247
  cpu=cpu_requests,
246
248
  memory=memory_requests,
@@ -829,15 +831,13 @@ class ServiceOperator:
829
831
  service_seen_before = False
830
832
 
831
833
  while True:
832
- # Check if async job has failed (but don't return on success - we need specific service status)
834
+ # Check if async job has completed
833
835
  if async_job.is_done():
834
836
  try:
835
837
  async_job.result()
836
- # Async job completed successfully, but we're waiting for a specific service status
837
- # This might mean the service completed and was cleaned up
838
- module_logger.debug(
839
- f"Async job completed but we're still waiting for {service_name} to reach {target_status.value}"
840
- )
838
+ # Async job completed successfully - deployment is done
839
+ module_logger.debug(f"Async job completed successfully, returning from wait for {service_name}")
840
+ return
841
841
  except Exception as e:
842
842
  raise RuntimeError(f"Service deployment failed: {e}")
843
843
 
@@ -140,6 +140,7 @@ class ModelDeploymentSpec:
140
140
  service_database_name: Optional[sql_identifier.SqlIdentifier] = None,
141
141
  service_schema_name: Optional[sql_identifier.SqlIdentifier] = None,
142
142
  ingress_enabled: bool = True,
143
+ min_instances: int = 0,
143
144
  max_instances: int = 1,
144
145
  cpu: Optional[str] = None,
145
146
  memory: Optional[str] = None,
@@ -156,6 +157,7 @@ class ModelDeploymentSpec:
156
157
  service_database_name: Database name for the service.
157
158
  service_schema_name: Schema name for the service.
158
159
  ingress_enabled: Whether ingress is enabled.
160
+ min_instances: Minimum number of service instances.
159
161
  max_instances: Maximum number of service instances.
160
162
  cpu: CPU requirement.
161
163
  memory: Memory requirement.
@@ -187,6 +189,7 @@ class ModelDeploymentSpec:
187
189
  name=fq_service_name,
188
190
  compute_pool=inference_compute_pool_name.identifier(),
189
191
  ingress_enabled=ingress_enabled,
192
+ min_instances=min_instances,
190
193
  max_instances=max_instances,
191
194
  autocapture=autocapture,
192
195
  **self._inference_spec,
@@ -26,6 +26,7 @@ class Service(BaseModel):
26
26
  name: str
27
27
  compute_pool: str
28
28
  ingress_enabled: bool
29
+ min_instances: int
29
30
  max_instances: int
30
31
  cpu: Optional[str] = None
31
32
  memory: Optional[str] = None
@@ -105,6 +105,7 @@ class HuggingFacePipelineModel(huggingface.TransformersPipeline):
105
105
  image_repo: Optional[str] = None,
106
106
  image_build_compute_pool: Optional[str] = None,
107
107
  ingress_enabled: bool = False,
108
+ min_instances: int = 0,
108
109
  max_instances: int = 1,
109
110
  cpu_requests: Optional[str] = None,
110
111
  memory_requests: Optional[str] = None,
@@ -133,6 +134,7 @@ class HuggingFacePipelineModel(huggingface.TransformersPipeline):
133
134
  image_build_compute_pool: The name of the compute pool used to build the model inference image. It uses
134
135
  the service compute pool if None.
135
136
  ingress_enabled: Whether ingress is enabled. Defaults to False.
137
+ min_instances: Minimum number of instances. Defaults to 0.
136
138
  max_instances: Maximum number of instances. Defaults to 1.
137
139
  cpu_requests: CPU requests configuration. Defaults to None.
138
140
  memory_requests: Memory requests configuration. Defaults to None.
@@ -225,6 +227,7 @@ class HuggingFacePipelineModel(huggingface.TransformersPipeline):
225
227
  service_compute_pool_name=sql_identifier.SqlIdentifier(service_compute_pool),
226
228
  image_repo_name=image_repo,
227
229
  ingress_enabled=ingress_enabled,
230
+ min_instances=min_instances,
228
231
  max_instances=max_instances,
229
232
  cpu_requests=cpu_requests,
230
233
  memory_requests=memory_requests,
@@ -88,6 +88,96 @@ _OPENAI_CHAT_SIGNATURE_SPEC = core.ModelSignature(
88
88
  ],
89
89
  )
90
90
 
91
+ _OPENAI_CHAT_SIGNATURE_WITH_PARAMS_SPEC = core.ModelSignature(
92
+ inputs=[
93
+ core.FeatureGroupSpec(
94
+ name="messages",
95
+ specs=[
96
+ core.FeatureGroupSpec(
97
+ name="content",
98
+ specs=[
99
+ core.FeatureSpec(name="type", dtype=core.DataType.STRING),
100
+ # Text prompts
101
+ core.FeatureSpec(name="text", dtype=core.DataType.STRING),
102
+ # Image URL prompts
103
+ core.FeatureGroupSpec(
104
+ name="image_url",
105
+ specs=[
106
+ # Base64 encoded image URL or image URL
107
+ core.FeatureSpec(name="url", dtype=core.DataType.STRING),
108
+ # Image detail level (e.g., "low", "high", "auto")
109
+ core.FeatureSpec(name="detail", dtype=core.DataType.STRING),
110
+ ],
111
+ ),
112
+ # Video URL prompts
113
+ core.FeatureGroupSpec(
114
+ name="video_url",
115
+ specs=[
116
+ # Base64 encoded video URL
117
+ core.FeatureSpec(name="url", dtype=core.DataType.STRING),
118
+ ],
119
+ ),
120
+ # Audio prompts
121
+ core.FeatureGroupSpec(
122
+ name="input_audio",
123
+ specs=[
124
+ core.FeatureSpec(name="data", dtype=core.DataType.STRING),
125
+ core.FeatureSpec(name="format", dtype=core.DataType.STRING),
126
+ ],
127
+ ),
128
+ ],
129
+ shape=(-1,),
130
+ ),
131
+ core.FeatureSpec(name="name", dtype=core.DataType.STRING),
132
+ core.FeatureSpec(name="role", dtype=core.DataType.STRING),
133
+ core.FeatureSpec(name="title", dtype=core.DataType.STRING),
134
+ ],
135
+ shape=(-1,),
136
+ ),
137
+ ],
138
+ outputs=[
139
+ core.FeatureSpec(name="id", dtype=core.DataType.STRING),
140
+ core.FeatureSpec(name="object", dtype=core.DataType.STRING),
141
+ core.FeatureSpec(name="created", dtype=core.DataType.FLOAT),
142
+ core.FeatureSpec(name="model", dtype=core.DataType.STRING),
143
+ core.FeatureGroupSpec(
144
+ name="choices",
145
+ specs=[
146
+ core.FeatureSpec(name="index", dtype=core.DataType.INT32),
147
+ core.FeatureGroupSpec(
148
+ name="message",
149
+ specs=[
150
+ core.FeatureSpec(name="content", dtype=core.DataType.STRING),
151
+ core.FeatureSpec(name="name", dtype=core.DataType.STRING),
152
+ core.FeatureSpec(name="role", dtype=core.DataType.STRING),
153
+ ],
154
+ ),
155
+ core.FeatureSpec(name="logprobs", dtype=core.DataType.STRING),
156
+ core.FeatureSpec(name="finish_reason", dtype=core.DataType.STRING),
157
+ ],
158
+ shape=(-1,),
159
+ ),
160
+ core.FeatureGroupSpec(
161
+ name="usage",
162
+ specs=[
163
+ core.FeatureSpec(name="completion_tokens", dtype=core.DataType.INT32),
164
+ core.FeatureSpec(name="prompt_tokens", dtype=core.DataType.INT32),
165
+ core.FeatureSpec(name="total_tokens", dtype=core.DataType.INT32),
166
+ ],
167
+ ),
168
+ ],
169
+ params=[
170
+ core.ParamSpec(name="temperature", dtype=core.DataType.DOUBLE, default_value=1.0),
171
+ core.ParamSpec(name="max_completion_tokens", dtype=core.DataType.INT64, default_value=250),
172
+ core.ParamSpec(name="stop", dtype=core.DataType.STRING, default_value=""),
173
+ core.ParamSpec(name="n", dtype=core.DataType.INT32, default_value=1),
174
+ core.ParamSpec(name="stream", dtype=core.DataType.BOOL, default_value=False),
175
+ core.ParamSpec(name="top_p", dtype=core.DataType.DOUBLE, default_value=1.0),
176
+ core.ParamSpec(name="frequency_penalty", dtype=core.DataType.DOUBLE, default_value=0.0),
177
+ core.ParamSpec(name="presence_penalty", dtype=core.DataType.DOUBLE, default_value=0.0),
178
+ ],
179
+ )
180
+
91
181
  _OPENAI_CHAT_SIGNATURE_SPEC_WITH_CONTENT_FORMAT_STRING = core.ModelSignature(
92
182
  inputs=[
93
183
  core.FeatureGroupSpec(
@@ -142,6 +232,62 @@ _OPENAI_CHAT_SIGNATURE_SPEC_WITH_CONTENT_FORMAT_STRING = core.ModelSignature(
142
232
  ],
143
233
  )
144
234
 
235
+ _OPENAI_CHAT_SIGNATURE_WITH_PARAMS_SPEC_WITH_CONTENT_FORMAT_STRING = core.ModelSignature(
236
+ inputs=[
237
+ core.FeatureGroupSpec(
238
+ name="messages",
239
+ specs=[
240
+ core.FeatureSpec(name="content", dtype=core.DataType.STRING),
241
+ core.FeatureSpec(name="name", dtype=core.DataType.STRING),
242
+ core.FeatureSpec(name="role", dtype=core.DataType.STRING),
243
+ core.FeatureSpec(name="title", dtype=core.DataType.STRING),
244
+ ],
245
+ shape=(-1,),
246
+ ),
247
+ ],
248
+ outputs=[
249
+ core.FeatureSpec(name="id", dtype=core.DataType.STRING),
250
+ core.FeatureSpec(name="object", dtype=core.DataType.STRING),
251
+ core.FeatureSpec(name="created", dtype=core.DataType.FLOAT),
252
+ core.FeatureSpec(name="model", dtype=core.DataType.STRING),
253
+ core.FeatureGroupSpec(
254
+ name="choices",
255
+ specs=[
256
+ core.FeatureSpec(name="index", dtype=core.DataType.INT32),
257
+ core.FeatureGroupSpec(
258
+ name="message",
259
+ specs=[
260
+ core.FeatureSpec(name="content", dtype=core.DataType.STRING),
261
+ core.FeatureSpec(name="name", dtype=core.DataType.STRING),
262
+ core.FeatureSpec(name="role", dtype=core.DataType.STRING),
263
+ ],
264
+ ),
265
+ core.FeatureSpec(name="logprobs", dtype=core.DataType.STRING),
266
+ core.FeatureSpec(name="finish_reason", dtype=core.DataType.STRING),
267
+ ],
268
+ shape=(-1,),
269
+ ),
270
+ core.FeatureGroupSpec(
271
+ name="usage",
272
+ specs=[
273
+ core.FeatureSpec(name="completion_tokens", dtype=core.DataType.INT32),
274
+ core.FeatureSpec(name="prompt_tokens", dtype=core.DataType.INT32),
275
+ core.FeatureSpec(name="total_tokens", dtype=core.DataType.INT32),
276
+ ],
277
+ ),
278
+ ],
279
+ params=[
280
+ core.ParamSpec(name="temperature", dtype=core.DataType.DOUBLE, default_value=1.0),
281
+ core.ParamSpec(name="max_completion_tokens", dtype=core.DataType.INT64, default_value=250),
282
+ core.ParamSpec(name="stop", dtype=core.DataType.STRING, default_value=""),
283
+ core.ParamSpec(name="n", dtype=core.DataType.INT32, default_value=1),
284
+ core.ParamSpec(name="stream", dtype=core.DataType.BOOL, default_value=False),
285
+ core.ParamSpec(name="top_p", dtype=core.DataType.DOUBLE, default_value=1.0),
286
+ core.ParamSpec(name="frequency_penalty", dtype=core.DataType.DOUBLE, default_value=0.0),
287
+ core.ParamSpec(name="presence_penalty", dtype=core.DataType.DOUBLE, default_value=0.0),
288
+ ],
289
+ )
290
+
145
291
 
146
292
  # Refer vLLM documentation: https://docs.vllm.ai/en/stable/serving/openai_compatible_server/#chat-template
147
293
 
@@ -152,3 +298,11 @@ OPENAI_CHAT_SIGNATURE_WITH_CONTENT_FORMAT_STRING = {"__call__": _OPENAI_CHAT_SIG
152
298
  # This is the default signature.
153
299
  # The content format allows vLLM to handler content parts like text, image, video, audio, file, etc.
154
300
  OPENAI_CHAT_SIGNATURE = {"__call__": _OPENAI_CHAT_SIGNATURE_SPEC}
301
+
302
+ # Use this signature to leverage ParamSpec with the default ChatML template.
303
+ OPENAI_CHAT_WITH_PARAMS_SIGNATURE = {"__call__": _OPENAI_CHAT_SIGNATURE_WITH_PARAMS_SPEC}
304
+
305
+ # Use this signature to leverage ParamSpec with the content format string.
306
+ OPENAI_CHAT_WITH_PARAMS_SIGNATURE_WITH_CONTENT_FORMAT_STRING = {
307
+ "__call__": _OPENAI_CHAT_SIGNATURE_WITH_PARAMS_SPEC_WITH_CONTENT_FORMAT_STRING
308
+ }
@@ -193,12 +193,11 @@ class ModelParameterReconciler:
193
193
  if enable_explainability:
194
194
  if only_spcs or not is_warehouse_runnable:
195
195
  raise ValueError(
196
- "`enable_explainability` cannot be set to True when the model is not runnable in WH "
197
- "or the target platforms include SPCS."
196
+ "`enable_explainability` cannot be set to True when the model cannot run in Warehouse."
198
197
  )
199
198
  elif has_both_platforms:
200
199
  warnings.warn(
201
- ("Explain function will only be available for model deployed to warehouse."),
200
+ ("Explain function will only be available for model deployed to Warehouse."),
202
201
  category=UserWarning,
203
202
  stacklevel=2,
204
203
  )
snowflake/ml/version.py CHANGED
@@ -1,2 +1,2 @@
1
1
  # This is parsed by regex in conda recipe meta file. Make sure not to break it.
2
- VERSION = "1.24.0"
2
+ VERSION = "1.25.1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: snowflake-ml-python
3
- Version: 1.24.0
3
+ Version: 1.25.1
4
4
  Summary: The machine learning client library that is used for interacting with Snowflake to build machine learning solutions.
5
5
  Author-email: "Snowflake, Inc" <support@snowflake.com>
6
6
  License:
@@ -417,6 +417,43 @@ NOTE: Version 1.7.0 is used as example here. Please choose the the latest versio
417
417
 
418
418
  # Release History
419
419
 
420
+ ## 1.25.1
421
+
422
+ ### Bug Fixes
423
+
424
+ * ML Job: Reverted changes related to the introduction of ML Job Definitions.
425
+
426
+ ## 1.25.0
427
+
428
+ ### New Features
429
+
430
+ * ML Job: Added support for creating ML job definitions and launching jobs with different
431
+ arguments without re-uploading payloads.
432
+
433
+ * Inference Autocapture (PuPr): The `create_service` API will now accept `autocapture` as a new argument to indicate
434
+ whether inference data will be captured.
435
+
436
+ * Model serving: Introduced the `min_instances` field in the `mv.create_service()` and
437
+ `HuggingFacePipelineModel.log_model_and_create_service()` APIs (defaulting to 0). The service now launches
438
+ with the `min_instances` and automatically scales between `min_instances` and `max_instances` based on
439
+ traffic and hardware utilization. When `min_instances` is set to 0, the service will automatically suspend
440
+ if no traffic is detected for a period of time.
441
+
442
+ ### Bug Fixes
443
+
444
+ ### Behavior Changes
445
+
446
+ * Inference Autocapture (PuPr): `list_services()` now shows `autocapture_enabled` column to indicate if model
447
+ service has autocapture enabled.
448
+
449
+ * Model serving: The `mv.create_service()` and `HuggingFacePipelineModel.log_model_and_create_service()` APIs now
450
+ include a `min_instances` field (defaulting to 0). When these APIs are called without specifying `min_instances`,
451
+ the system will now launch the service with 1 instance and enable auto scaling. This replaces the previous behavior,
452
+ where `min_instances` was automatically set to match `max_instances`, resulting in the immediate launch of the
453
+ maximum number of instances.
454
+
455
+ ### Deprecations
456
+
420
457
  ## 1.24.0
421
458
 
422
459
  ### New Features
@@ -520,7 +557,15 @@ x_df = pd.DataFrame.from_records(
520
557
  [
521
558
  {
522
559
  "messages": [
523
- {"role": "system", "content": "Complete the sentence."},
560
+ {
561
+ "role": "system",
562
+ "content": [
563
+ {
564
+ "type": "text",
565
+ "text": "Complete the sentence."
566
+ },
567
+ ]
568
+ },
524
569
  {
525
570
  "role": "user",
526
571
  "content": [
@@ -10,7 +10,7 @@ snowflake/cortex/_sse_client.py,sha256=sLYgqAfTOPADCnaWH2RWAJi8KbU_7gSRsTUDcDD5T
10
10
  snowflake/cortex/_summarize.py,sha256=7GH8zqfIdOiHA5w4b6EvJEKEWhaTrL4YA6iDGbn7BNM,1307
11
11
  snowflake/cortex/_translate.py,sha256=9ZGjvAnJFisbzJ_bXnt4pyug5UzhHJRXW8AhGQEersM,1652
12
12
  snowflake/cortex/_util.py,sha256=krNTpbkFLXwdFqy1bd0xi7ZmOzOHRnIfHdQCPiLZJxk,3288
13
- snowflake/ml/version.py,sha256=esv6fY3HPIokUy4DELkMBOWM2pRI2gTUZ-kkGwHiP-M,99
13
+ snowflake/ml/version.py,sha256=oU0Z3v9Lg_mwDt9f5axBWDQv8y6QGuNh3xnDahFqq8w,99
14
14
  snowflake/ml/_internal/env.py,sha256=EY_2KVe8oR3LgKWdaeRb5rRU-NDNXJppPDsFJmMZUUY,265
15
15
  snowflake/ml/_internal/env_utils.py,sha256=Xx03pV_qEIVJJY--J3ZmnqK9Ugf0Os3O2vrF8xOyq_c,31500
16
16
  snowflake/ml/_internal/file_utils.py,sha256=7sA6loOeSfmGP4yx16P4usT9ZtRqG3ycnXu7_Tk7dOs,14206
@@ -53,19 +53,19 @@ snowflake/ml/_internal/utils/table_manager.py,sha256=Wf3JXLUzdCiffKF9PJj7edHY7us
53
53
  snowflake/ml/_internal/utils/temp_file_utils.py,sha256=eHyyvxHfj4Z3FIS6VWgNyw5bFjNi5cSGYmY1hzyqzwY,1534
54
54
  snowflake/ml/_internal/utils/url.py,sha256=V3Y5zwNhJouy_cyLTa2rogg5nQZ-Ag-7Rmq-qPPEjmg,1219
55
55
  snowflake/ml/data/__init__.py,sha256=nm5VhN98Lzxr4kb679kglQfqbDbHhd9zYsnFJiQiThg,351
56
- snowflake/ml/data/data_connector.py,sha256=ZAgnXWEmOjR_3tremS2gNgLen7Rzrt1Z3ZtkXJrZtCo,14154
57
- snowflake/ml/data/data_ingestor.py,sha256=0TFc8qo4TZwdHMaBUBTZ7T8kkZfLGVmStvEx9KrXPHU,1165
56
+ snowflake/ml/data/data_connector.py,sha256=rzSW-z6YR5RQZH8aj7gbjyHhdwecCYKbrkk6-SReE68,14174
57
+ snowflake/ml/data/data_ingestor.py,sha256=Fxy1wuw0-6dWjJhBn9o5ZhzXKIpaaM5Y6Ji9XZGO5g0,1203
58
58
  snowflake/ml/data/data_source.py,sha256=HjBO1xqTyJfAvEAGESUIdke0KvSj5S5-FcI2D2zgejI,512
59
59
  snowflake/ml/data/ingestor_utils.py,sha256=JOv7Kvs0DNhsXUjl940ZULDkeTjIcePCfQ9aL_NteV0,2721
60
60
  snowflake/ml/data/torch_utils.py,sha256=1IgXiqxLgUh0yyNqchOSps5gLqmMOglSctoifjJIDFI,3591
61
- snowflake/ml/data/_internal/arrow_ingestor.py,sha256=8QSHNrFfvOmlMHxbF0Uexnw0KBpfvqXuVKbqjxiLOFk,15901
61
+ snowflake/ml/data/_internal/arrow_ingestor.py,sha256=0YfgMGDqq72O28nrV_LOgH-lpM1oljDhyn--BdNYKG0,16025
62
62
  snowflake/ml/dataset/__init__.py,sha256=nESj7YEI2u90Oxyit_hKCQMWb7N1BlEM3Ho2Fm0MfHo,274
63
63
  snowflake/ml/dataset/dataset.py,sha256=Uo99ZfAIpY9LZ4_gMsQfY_SwUpPnbfkuEcViHmSV6HA,21067
64
64
  snowflake/ml/dataset/dataset_factory.py,sha256=Fym4ICK-B1j6Om4ENwWxEvryq3ZKoCslBSZDBenmjOo,1615
65
65
  snowflake/ml/dataset/dataset_metadata.py,sha256=lcNvugBkP8YEkGMQqaV8SlHs5mwUKsUS8GgaPGNm6wM,4145
66
66
  snowflake/ml/dataset/dataset_reader.py,sha256=mZsG9HyWUGgfotrGkLrunyEsOm_659mH-Sn2OyG6A-Q,5036
67
67
  snowflake/ml/experiment/__init__.py,sha256=r7qdyPd3jwxzqvksim2ju5j_LrnYQrta0ZI6XpWUqmc,109
68
- snowflake/ml/experiment/_experiment_info.py,sha256=iaJ65x6nzBYJ5djleSOzBtMpZUJCUDlRpaDw0pu-dcU,2533
68
+ snowflake/ml/experiment/_experiment_info.py,sha256=B47YwCfp7qr1ZNnvDXzjFuk69R4_Qa3a-nVs5gE1OXc,2592
69
69
  snowflake/ml/experiment/experiment_tracking.py,sha256=X4R4S6TjWkRB6F6RkPoFY4iOO44of4YMj_whcKoSayk,21841
70
70
  snowflake/ml/experiment/utils.py,sha256=5lanWEq6tgWnOMHCX4FnfBmpIQEIgH57Cz0YtpaAa2Y,830
71
71
  snowflake/ml/experiment/_client/artifact.py,sha256=R2WB4Y_kqv43BWLfXv8SEDINn1Bnevzgb-mH5LyvgGk,3035
@@ -116,7 +116,7 @@ snowflake/ml/fileset/stage_fs.py,sha256=SnkgCta6_5G6Ljl-Nzctr4yavhHUSlNKN3je0ojp
116
116
  snowflake/ml/jobs/__init__.py,sha256=JypKzxERpcn4yJ7FILA98Gl0sFDEGkAIQ35b1iSzaXg,741
117
117
  snowflake/ml/jobs/decorators.py,sha256=mQgdWvvCwD7q79cSFKZHKegXGh2j1u8WM64UD3lCKr4,3428
118
118
  snowflake/ml/jobs/job.py,sha256=wEeIk5RZhkaJ56Zb1cBxsTAUJIo4QyhpRWaZaiYBuGY,27697
119
- snowflake/ml/jobs/job_definition.py,sha256=f1lZoTdn1AulJTHstAN8yDd-8Cx4kv4A0KBm-TAxJq4,10377
119
+ snowflake/ml/jobs/job_definition.py,sha256=95qK57PyrT65VRVEXRSKaG_VUkHiI2mJ7cpWMkTP9B4,10791
120
120
  snowflake/ml/jobs/manager.py,sha256=heuOXEn7Y5Gb5s2y55GEx6i9mr-Z2tTL-b0rPGYv3ao,20469
121
121
  snowflake/ml/jobs/_interop/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
122
122
  snowflake/ml/jobs/_interop/data_utils.py,sha256=xUO5YlhUKFVCDtbjam5gP2lka3lfoknTLr7syNAVxK0,4074
@@ -128,7 +128,7 @@ snowflake/ml/jobs/_interop/results.py,sha256=nQ07XJ1BZEkPB4xa12pbGyaKqR8sWCoSzx0
128
128
  snowflake/ml/jobs/_interop/utils.py,sha256=TWFkUcAYmb-fpTwEL8idkk3XxlZ8vLz4X_gyS78PSi4,5552
129
129
  snowflake/ml/jobs/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
130
130
  snowflake/ml/jobs/_utils/constants.py,sha256=7KMhM-7KEursLjFBlj4xaQ4uGFJtjJ12rlx55hcBCQ0,4677
131
- snowflake/ml/jobs/_utils/feature_flags.py,sha256=dLWBVIjyB2vsa4Vtm7Yhty6DOi8Nn73_YSjuYf73Y7A,1669
131
+ snowflake/ml/jobs/_utils/feature_flags.py,sha256=j1PvXFTc83ds6I1575X0xnfIz-qRlnUwna8ZhQN1pUw,4831
132
132
  snowflake/ml/jobs/_utils/function_payload_utils.py,sha256=4LBaStMdhRxcqwRkwFje-WwiEKRWnBfkaOYouF3N3Kg,1308
133
133
  snowflake/ml/jobs/_utils/payload_utils.py,sha256=X9nG3kKlxr-2EfnLXhLwHdXoAhQS_MaTPfXPydlE_JU,33824
134
134
  snowflake/ml/jobs/_utils/query_helper.py,sha256=DxezZzftVT7WZzf0uzEn0l6U7BFLNU4U4W_IRCzgbaI,1265
@@ -152,7 +152,7 @@ snowflake/ml/model/custom_model.py,sha256=sdyKhT-QNNtTeu3idu6BExZNVyjUD4YTU8cru3
152
152
  snowflake/ml/model/event_handler.py,sha256=pojleQVM9TPNeDvliTvon2Sfxqbf2WWxrOebo1SaEHo,7211
153
153
  snowflake/ml/model/inference_engine.py,sha256=L0nwySY2Qwp3JzuRpPS87r0--m3HTUNUgZXYyOPJjyk,66
154
154
  snowflake/ml/model/model_signature.py,sha256=ae1tkh3Rw9MzJSxmVT9kb0PwD3TANtKbWwp6b8-cItE,32847
155
- snowflake/ml/model/openai_signatures.py,sha256=wu7l9V6-fWNJoAdE3R25hYBAmVfND5oaWdsUXilxBDo,7329
155
+ snowflake/ml/model/openai_signatures.py,sha256=ny3aAd301t3h_4Pb60MkrY0LZO6QxpyIlHYnjv120VQ,14755
156
156
  snowflake/ml/model/target_platform.py,sha256=H5d-wtuKQyVlq9x33vPtYZAlR5ka0ytcKRYgwlKl0bQ,390
157
157
  snowflake/ml/model/task.py,sha256=Zp5JaLB-YfX5p_HSaw81P3J7UnycQq5EMa87A35VOaQ,286
158
158
  snowflake/ml/model/type_hints.py,sha256=Xxa6b9ezbvXYvSIN5R4Zv6Dro4ZH74-eW4cno92VTJE,11475
@@ -160,15 +160,15 @@ snowflake/ml/model/volatility.py,sha256=qu-wqe9oKkRwXwE2qkKygxTWzUypQYEk3UjsqOGR
160
160
  snowflake/ml/model/_client/model/batch_inference_specs.py,sha256=6q3XzYWaO1CH-JqonJr12n6NuRhQDvPns_FAq3yvrN4,6114
161
161
  snowflake/ml/model/_client/model/inference_engine_utils.py,sha256=yPkdImi2qP1uG1WzLKCBZgXV-DiIBVpImEosIjYJk8Y,1958
162
162
  snowflake/ml/model/_client/model/model_impl.py,sha256=Yabrbir5vPMOnsVmQJ23YN7vqhi756Jcm6pfO8Aq92o,17469
163
- snowflake/ml/model/_client/model/model_version_impl.py,sha256=kJ9tG_MQJzmIXss4K7IISoB7uO9rkjcH8kef5ypBy2s,69613
163
+ snowflake/ml/model/_client/model/model_version_impl.py,sha256=3R39U2xAWJdvZktJ_h1sAaxHmum7wd1dQP1dJE5PmbE,70403
164
164
  snowflake/ml/model/_client/ops/deployment_step.py,sha256=9kxKDr9xcD4KmVM-9O4_tm3ytkllQVoElJD793VI84Q,1428
165
165
  snowflake/ml/model/_client/ops/metadata_ops.py,sha256=qpK6PL3OyfuhyOmpvLCpHLy6vCxbZbp1HlEvakFGwv4,4884
166
166
  snowflake/ml/model/_client/ops/model_ops.py,sha256=s5-N9RlaJzndh0D1sQa4BnYb0N-FKYgx86O0d2H5mKg,53819
167
167
  snowflake/ml/model/_client/ops/param_utils.py,sha256=MPPerO8wYNYIuig4rFoG_YI5idD_dBlrWXgmAXcR2nM,5160
168
- snowflake/ml/model/_client/ops/service_ops.py,sha256=ZPZDJFEAaiQPS1Rni_cZ-mWCE_gxHixTV6cWmoTZZBE,48049
168
+ snowflake/ml/model/_client/ops/service_ops.py,sha256=9eWK3R_iOCkqafR_bK2OpAy0fdpjqYAK447ozZZF_Lg,47910
169
169
  snowflake/ml/model/_client/service/import_model_spec_schema.py,sha256=SlEX1GiPlB8whMCmiwKUopnrGlm4fkQOQbTW2KyVTFU,554
170
- snowflake/ml/model/_client/service/model_deployment_spec.py,sha256=Qg7cR9jQTnbWHIV5FyxUZocxWH0nFEem9wdOt5osKIw,20149
171
- snowflake/ml/model/_client/service/model_deployment_spec_schema.py,sha256=2FCfhyg7F2XYVWKFybeEQ9Fq3goZ3XKmJdH_CuRqPpI,2649
170
+ snowflake/ml/model/_client/service/model_deployment_spec.py,sha256=pRwwlD9h6V76fAOtFR1XgxXEv1fQjfdJCxBZgjgocE0,20286
171
+ snowflake/ml/model/_client/service/model_deployment_spec_schema.py,sha256=axEX0gnUqnLav575xzpJ9lDWNwr9vyjyay7soo7Hwgk,2672
172
172
  snowflake/ml/model/_client/sql/_base.py,sha256=Qrm8M92g3MHb-QnSLUlbd8iVKCRxLhG_zr5M2qmXwJ8,1473
173
173
  snowflake/ml/model/_client/sql/model.py,sha256=nstZ8zR7MkXVEfhqLt7PWMik6dZr06nzq7VsF5NVNow,5840
174
174
  snowflake/ml/model/_client/sql/model_version.py,sha256=SOYr13YEq0mxgIatsSchOq0aKUgdPhKO3clRQ6AMa7U,24766
@@ -230,7 +230,7 @@ snowflake/ml/model/_signatures/snowpark_handler.py,sha256=aNGPa2v0kTMuSZ80NBdHeA
230
230
  snowflake/ml/model/_signatures/tensorflow_handler.py,sha256=_yrvMg-w_jJoYuyrGXKPX4Dv7Vt8z1e6xIKiWGuZcc4,5660
231
231
  snowflake/ml/model/_signatures/utils.py,sha256=hoc_UuMxfPWkVmoMEE7U-XNRcSgDPyRIdyDVK0JLcfE,21685
232
232
  snowflake/ml/model/models/huggingface.py,sha256=VO84lBizmSALntWCnK4O_eY_Cq2uzMooyHtfJXuFkew,13791
233
- snowflake/ml/model/models/huggingface_pipeline.py,sha256=yJ7NW97EW5GRtOPYCYuNjoPIgXbAIyOEKW9P1trt9vY,14226
233
+ snowflake/ml/model/models/huggingface_pipeline.py,sha256=tLTj1YJumNs_pdHGmA1Us5HIi8EYm38X6jqv2j85TMo,14378
234
234
  snowflake/ml/modeling/_internal/estimator_utils.py,sha256=dfPPWO-RHf5C3Tya3VQ4KEqoa32pm-WKwRrjzjDInLk,13956
235
235
  snowflake/ml/modeling/_internal/model_specifications.py,sha256=3wFMcKPCSoiEzU7Mx6RVem89BRlBBENpX__-Rd7GwdU,4851
236
236
  snowflake/ml/modeling/_internal/model_trainer.py,sha256=5Ck1lbdyzcd-TpzAxEyovIN9fjaaVIqugyMHXt0wzH0,971
@@ -459,14 +459,14 @@ snowflake/ml/monitoring/entities/model_monitor_config.py,sha256=auy9BD0IoyUpZPZX
459
459
  snowflake/ml/registry/__init__.py,sha256=XdPQK9ejYkSJVrSQ7HD3jKQO0hKq2mC4bPCB6qrtH3U,76
460
460
  snowflake/ml/registry/registry.py,sha256=_vtQCh4DmhnPusTKWJteRPJkDpLFEfG150cjED70sOA,34611
461
461
  snowflake/ml/registry/_manager/model_manager.py,sha256=splK5YGErt-eDIy6UbZAB3VKsGMZSJk2_MzfgrIQOhY,26306
462
- snowflake/ml/registry/_manager/model_parameter_reconciler.py,sha256=BTi8WNQCW1TiW8jd9LbR6mrIHE29ehocKSh6mVQX0vI,15325
462
+ snowflake/ml/registry/_manager/model_parameter_reconciler.py,sha256=92YX67YMLmqbJN_emr49c3QWyFVGc1DUn6JvA3Y2qqo,15267
463
463
  snowflake/ml/utils/authentication.py,sha256=TQV3E8YDHAPXA3dS8JWDmb_Zm8P0d9c8kCexRI4nefo,3106
464
464
  snowflake/ml/utils/connection_params.py,sha256=NSBUgcs-DXPRHs1BKpxdSubbJx1yrFRlMPBp-bE3Ugc,8308
465
465
  snowflake/ml/utils/html_utils.py,sha256=4g1EPuD8EnOAK7BCYiY8Wp3ZrdDkNOcUDrDAbUYxLfs,9954
466
466
  snowflake/ml/utils/sparse.py,sha256=zLBNh-ynhGpKH5TFtopk0YLkHGvv0yq1q-sV59YQKgg,3819
467
467
  snowflake/ml/utils/sql_client.py,sha256=pSe2od6Pkh-8NwG3D-xqN76_uNf-ohOtVbT55HeQg1Y,668
468
- snowflake_ml_python-1.24.0.dist-info/licenses/LICENSE.txt,sha256=PdEp56Av5m3_kl21iFkVTX_EbHJKFGEdmYeIO1pL_Yk,11365
469
- snowflake_ml_python-1.24.0.dist-info/METADATA,sha256=sR-h6xi0sM7Z04GM-Jd3clq-_jE_I9fC-3VyPcUB8r0,105348
470
- snowflake_ml_python-1.24.0.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
471
- snowflake_ml_python-1.24.0.dist-info/top_level.txt,sha256=TY0gFSHKDdZy3THb0FGomyikWQasEGldIR1O0HGOHVw,10
472
- snowflake_ml_python-1.24.0.dist-info/RECORD,,
468
+ snowflake_ml_python-1.25.1.dist-info/licenses/LICENSE.txt,sha256=PdEp56Av5m3_kl21iFkVTX_EbHJKFGEdmYeIO1pL_Yk,11365
469
+ snowflake_ml_python-1.25.1.dist-info/METADATA,sha256=brYA-SAFfm2yssPWtVtE1sSId2v5q28m3OKgujz1JsY,107142
470
+ snowflake_ml_python-1.25.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
471
+ snowflake_ml_python-1.25.1.dist-info/top_level.txt,sha256=TY0gFSHKDdZy3THb0FGomyikWQasEGldIR1O0HGOHVw,10
472
+ snowflake_ml_python-1.25.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.1)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5