mlrun 1.10.0rc16__py3-none-any.whl → 1.10.1rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (101) hide show
  1. mlrun/__init__.py +22 -2
  2. mlrun/artifacts/document.py +6 -1
  3. mlrun/artifacts/llm_prompt.py +21 -15
  4. mlrun/artifacts/model.py +3 -3
  5. mlrun/common/constants.py +9 -0
  6. mlrun/common/formatters/artifact.py +1 -0
  7. mlrun/common/model_monitoring/helpers.py +86 -0
  8. mlrun/common/schemas/__init__.py +2 -0
  9. mlrun/common/schemas/auth.py +2 -0
  10. mlrun/common/schemas/function.py +10 -0
  11. mlrun/common/schemas/hub.py +30 -18
  12. mlrun/common/schemas/model_monitoring/__init__.py +2 -0
  13. mlrun/common/schemas/model_monitoring/constants.py +30 -6
  14. mlrun/common/schemas/model_monitoring/functions.py +13 -4
  15. mlrun/common/schemas/model_monitoring/model_endpoints.py +11 -0
  16. mlrun/common/schemas/pipeline.py +1 -1
  17. mlrun/common/schemas/serving.py +3 -0
  18. mlrun/common/schemas/workflow.py +1 -0
  19. mlrun/common/secrets.py +22 -1
  20. mlrun/config.py +34 -21
  21. mlrun/datastore/__init__.py +11 -3
  22. mlrun/datastore/azure_blob.py +162 -47
  23. mlrun/datastore/base.py +265 -7
  24. mlrun/datastore/datastore.py +10 -5
  25. mlrun/datastore/datastore_profile.py +61 -5
  26. mlrun/datastore/model_provider/huggingface_provider.py +367 -0
  27. mlrun/datastore/model_provider/mock_model_provider.py +87 -0
  28. mlrun/datastore/model_provider/model_provider.py +211 -74
  29. mlrun/datastore/model_provider/openai_provider.py +243 -71
  30. mlrun/datastore/s3.py +24 -2
  31. mlrun/datastore/store_resources.py +4 -4
  32. mlrun/datastore/storeytargets.py +2 -3
  33. mlrun/datastore/utils.py +15 -3
  34. mlrun/db/base.py +27 -19
  35. mlrun/db/httpdb.py +57 -48
  36. mlrun/db/nopdb.py +25 -10
  37. mlrun/execution.py +55 -13
  38. mlrun/hub/__init__.py +15 -0
  39. mlrun/hub/module.py +181 -0
  40. mlrun/k8s_utils.py +105 -16
  41. mlrun/launcher/base.py +13 -6
  42. mlrun/launcher/local.py +2 -0
  43. mlrun/model.py +9 -3
  44. mlrun/model_monitoring/api.py +66 -27
  45. mlrun/model_monitoring/applications/__init__.py +1 -1
  46. mlrun/model_monitoring/applications/base.py +388 -138
  47. mlrun/model_monitoring/applications/context.py +2 -4
  48. mlrun/model_monitoring/applications/results.py +4 -7
  49. mlrun/model_monitoring/controller.py +239 -101
  50. mlrun/model_monitoring/db/_schedules.py +36 -13
  51. mlrun/model_monitoring/db/_stats.py +4 -3
  52. mlrun/model_monitoring/db/tsdb/base.py +29 -9
  53. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +4 -5
  54. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +154 -50
  55. mlrun/model_monitoring/db/tsdb/tdengine/writer_graph_steps.py +51 -0
  56. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +17 -4
  57. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +245 -51
  58. mlrun/model_monitoring/helpers.py +28 -5
  59. mlrun/model_monitoring/stream_processing.py +45 -14
  60. mlrun/model_monitoring/writer.py +220 -1
  61. mlrun/platforms/__init__.py +3 -2
  62. mlrun/platforms/iguazio.py +7 -3
  63. mlrun/projects/operations.py +16 -11
  64. mlrun/projects/pipelines.py +2 -2
  65. mlrun/projects/project.py +157 -69
  66. mlrun/run.py +97 -20
  67. mlrun/runtimes/__init__.py +18 -0
  68. mlrun/runtimes/base.py +14 -6
  69. mlrun/runtimes/daskjob.py +1 -0
  70. mlrun/runtimes/local.py +5 -2
  71. mlrun/runtimes/mounts.py +20 -2
  72. mlrun/runtimes/nuclio/__init__.py +1 -0
  73. mlrun/runtimes/nuclio/application/application.py +147 -17
  74. mlrun/runtimes/nuclio/function.py +72 -27
  75. mlrun/runtimes/nuclio/serving.py +102 -20
  76. mlrun/runtimes/pod.py +213 -21
  77. mlrun/runtimes/utils.py +49 -9
  78. mlrun/secrets.py +54 -13
  79. mlrun/serving/remote.py +79 -6
  80. mlrun/serving/routers.py +23 -41
  81. mlrun/serving/server.py +230 -40
  82. mlrun/serving/states.py +605 -232
  83. mlrun/serving/steps.py +62 -0
  84. mlrun/serving/system_steps.py +136 -81
  85. mlrun/serving/v2_serving.py +9 -10
  86. mlrun/utils/helpers.py +215 -83
  87. mlrun/utils/logger.py +3 -1
  88. mlrun/utils/notifications/notification/base.py +18 -0
  89. mlrun/utils/notifications/notification/git.py +2 -4
  90. mlrun/utils/notifications/notification/mail.py +38 -15
  91. mlrun/utils/notifications/notification/slack.py +2 -4
  92. mlrun/utils/notifications/notification/webhook.py +2 -5
  93. mlrun/utils/notifications/notification_pusher.py +1 -1
  94. mlrun/utils/version/version.json +2 -2
  95. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.1rc4.dist-info}/METADATA +51 -50
  96. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.1rc4.dist-info}/RECORD +100 -95
  97. mlrun/api/schemas/__init__.py +0 -259
  98. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.1rc4.dist-info}/WHEEL +0 -0
  99. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.1rc4.dist-info}/entry_points.txt +0 -0
  100. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.1rc4.dist-info}/licenses/LICENSE +0 -0
  101. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.1rc4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,367 @@
1
+ # Copyright 2025 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import threading
15
+ from typing import TYPE_CHECKING, Any, Optional, Union
16
+
17
+ import mlrun
18
+ from mlrun.datastore.model_provider.model_provider import (
19
+ InvokeResponseFormat,
20
+ ModelProvider,
21
+ UsageResponseKeys,
22
+ )
23
+
24
+ if TYPE_CHECKING:
25
+ from transformers.pipelines.base import Pipeline
26
+ from transformers.pipelines.text_generation import ChatType
27
+
28
+
29
+ class HuggingFaceProvider(ModelProvider):
30
+ """
31
+ HuggingFaceProvider is a wrapper around the Hugging Face Transformers pipeline
32
+ that provides an interface for interacting with a wide range of Hugging Face models.
33
+
34
+ It supports synchronous operations, enabling flexible integration into various workflows.
35
+
36
+ This class extends the ModelProvider base class and implements Hugging Face-specific
37
+ functionality, including pipeline initialization, default text generation operations,
38
+ and custom operations tailored to the Hugging Face Transformers pipeline API.
39
+
40
+ Note: The pipeline object will download the model (if not already cached) and load it
41
+ into memory for inference. Ensure you have the required CPU/GPU and memory to use this operation.
42
+ """
43
+
44
+ # locks for threading use cases
45
+ _client_lock = threading.Lock()
46
+
47
+ def __init__(
48
+ self,
49
+ parent,
50
+ schema,
51
+ name,
52
+ endpoint="",
53
+ secrets: Optional[dict] = None,
54
+ default_invoke_kwargs: Optional[dict] = None,
55
+ ):
56
+ endpoint = endpoint or mlrun.mlconf.model_providers.huggingface_default_model
57
+ if schema != "huggingface":
58
+ raise mlrun.errors.MLRunInvalidArgumentError(
59
+ "HuggingFaceProvider supports only 'huggingface' as the provider kind."
60
+ )
61
+ super().__init__(
62
+ parent=parent,
63
+ kind=schema,
64
+ name=name,
65
+ endpoint=endpoint,
66
+ secrets=secrets,
67
+ default_invoke_kwargs=default_invoke_kwargs,
68
+ )
69
+ self.options = self.get_client_options()
70
+ self._expected_operation_type = None
71
+ self._download_model()
72
+
73
+ @staticmethod
74
+ def _extract_string_output(response: list[dict]) -> str:
75
+ """
76
+ Extracts the first generated string from Hugging Face pipeline output
77
+ """
78
+ if not isinstance(response, list) or len(response) == 0:
79
+ raise ValueError("Empty or invalid pipeline output")
80
+ if len(response) != 1:
81
+ raise mlrun.errors.MLRunInvalidArgumentError(
82
+ "HuggingFaceProvider: extracting string from response is only supported for single-response outputs"
83
+ )
84
+ return response[0].get("generated_text")
85
+
86
+ @classmethod
87
+ def parse_endpoint_and_path(cls, endpoint, subpath) -> (str, str):
88
+ if endpoint and subpath:
89
+ endpoint = endpoint + subpath
90
+ # In HuggingFace, "/" in a model name is part of the name — `subpath` is not used.
91
+ subpath = ""
92
+ return endpoint, subpath
93
+
94
+ @property
95
+ def client(self) -> Any:
96
+ """
97
+ Lazily return the HuggingFace-pipeline client.
98
+
99
+ If the client has not been initialized yet, it will be created
100
+ by calling `load_client`.
101
+ """
102
+ self.load_client()
103
+ return self._client
104
+
105
+ def _download_model(self):
106
+ """
107
+ Pre-downloads model files locally to prevent race conditions in multiprocessing.
108
+
109
+ Uses snapshot_download with local_dir_use_symlinks=False to ensure proper
110
+ file copying for safe concurrent access across multiple processes.
111
+
112
+ :raises:
113
+ ImportError: If huggingface_hub package is not installed.
114
+ """
115
+ try:
116
+ from huggingface_hub import snapshot_download
117
+
118
+ # Download the model and tokenizer files directly to the cache.
119
+ snapshot_download(
120
+ repo_id=self.model,
121
+ local_dir_use_symlinks=False,
122
+ token=self._get_secret_or_env("HF_TOKEN") or None,
123
+ )
124
+ except ImportError as exc:
125
+ raise ImportError("huggingface_hub package is not installed") from exc
126
+
127
+ def _response_handler(
128
+ self,
129
+ response: Union[str, list],
130
+ invoke_response_format: InvokeResponseFormat = InvokeResponseFormat.FULL,
131
+ messages: Union[str, list[str], "ChatType", list["ChatType"]] = None,
132
+ **kwargs,
133
+ ) -> Union[str, list, dict[str, Any]]:
134
+ """
135
+ Processes and formats the raw response from the HuggingFace pipeline according to the specified format.
136
+
137
+ The response should exclude the user’s input (no repetition in the output).
138
+ This can be accomplished by invoking the pipeline with `return_full_text=False`.
139
+
140
+ :param response: The raw response from the HuggingFace pipeline, typically a list of dictionaries
141
+ containing generated text sequences.
142
+ :param invoke_response_format: Determines how the response should be processed and returned. Options:
143
+
144
+ - STRING: Return only the main generated content as a string,
145
+ for single-answer responses.
146
+ - USAGE: Return a dictionary combining the string response with
147
+ token usage statistics:
148
+
149
+ .. code-block:: json
150
+
151
+ {
152
+ "answer": "<generated_text>",
153
+ "usage": {
154
+ "prompt_tokens": <int>,
155
+ "completion_tokens": <int>,
156
+ "total_tokens": <int>
157
+ }
158
+ }
159
+
160
+ Note: Token counts are estimated after answer generation and
161
+ may differ from the actual tokens generated by the model due to
162
+ internal decoding behavior and implementation details.
163
+
164
+ - FULL: Return the full raw response object.
165
+
166
+ :param messages: The original input messages used for token count estimation in USAGE mode.
167
+ Can be a string, list of strings, or chat format messages.
168
+ :param kwargs: Additional parameters for response processing.
169
+
170
+ :return: The processed response in the format specified by `invoke_response_format`.
171
+ Can be a string, dictionary, or the original response object.
172
+
173
+ :raises MLRunInvalidArgumentError: If extracting the string response fails.
174
+ :raises MLRunRuntimeError: If applying the chat template to the model fails during token usage calculation.
175
+ """
176
+ if InvokeResponseFormat.is_str_response(invoke_response_format.value):
177
+ str_response = self._extract_string_output(response)
178
+ if invoke_response_format == InvokeResponseFormat.STRING:
179
+ return str_response
180
+ if invoke_response_format == InvokeResponseFormat.USAGE:
181
+ tokenizer = self.client.tokenizer
182
+ if not isinstance(messages, str):
183
+ try:
184
+ messages = tokenizer.apply_chat_template(
185
+ messages, tokenize=False, add_generation_prompt=True
186
+ )
187
+ except Exception as e:
188
+ raise mlrun.errors.MLRunRuntimeError(
189
+ f"Failed to apply chat template using the tokenizer for model '{self.model}'. "
190
+ "This may indicate that the tokenizer does not support chat formatting, "
191
+ "or that the input format is invalid. "
192
+ f"Original error: {e}"
193
+ )
194
+ prompt_tokens = len(tokenizer.encode(messages))
195
+ completion_tokens = len(tokenizer.encode(str_response))
196
+ total_tokens = prompt_tokens + completion_tokens
197
+ usage = {
198
+ "prompt_tokens": prompt_tokens,
199
+ "completion_tokens": completion_tokens,
200
+ "total_tokens": total_tokens,
201
+ }
202
+ response = {
203
+ UsageResponseKeys.ANSWER: str_response,
204
+ UsageResponseKeys.USAGE: usage,
205
+ }
206
+ return response
207
+
208
+ def load_client(self) -> None:
209
+ """
210
+ Initializes the Hugging Face pipeline using the provided options.
211
+
212
+ This method imports the `pipeline` function from the `transformers` package,
213
+ creates a pipeline instance with the specified task and model (from `self.options`),
214
+ and assigns it to `self._client`.
215
+
216
+ Note: Hugging Face pipelines are synchronous and do not support async invocation.
217
+
218
+ :raises:
219
+ ImportError: If the `transformers` package is not installed.
220
+ """
221
+ if self._client:
222
+ return
223
+ try:
224
+ from transformers import pipeline, AutoModelForCausalLM # noqa
225
+ from transformers import AutoTokenizer # noqa
226
+ from transformers.pipelines.base import Pipeline # noqa
227
+
228
+ self.options["model_kwargs"] = self.options.get("model_kwargs", {})
229
+ self.options["model_kwargs"]["local_files_only"] = True
230
+ with self._client_lock:
231
+ self._client = pipeline(model=self.model, **self.options)
232
+ self._expected_operation_type = Pipeline
233
+ except ImportError as exc:
234
+ raise ImportError("transformers package is not installed") from exc
235
+
236
+ def get_client_options(self):
237
+ res = dict(
238
+ task=self._get_secret_or_env("HF_TASK") or "text-generation",
239
+ token=self._get_secret_or_env("HF_TOKEN"),
240
+ device=self._get_secret_or_env("HF_DEVICE"),
241
+ device_map=self._get_secret_or_env("HF_DEVICE_MAP"),
242
+ trust_remote_code=self._get_secret_or_env("HF_TRUST_REMOTE_CODE"),
243
+ model_kwargs=self._get_secret_or_env("HF_MODEL_KWARGS"),
244
+ )
245
+ return self._sanitize_options(res)
246
+
247
+ def custom_invoke(
248
+ self, operation: Optional["Pipeline"] = None, **invoke_kwargs
249
+ ) -> Union[list, dict, Any]:
250
+ """
251
+ Invokes a HuggingFace pipeline operation with the given keyword arguments.
252
+
253
+ This method provides flexibility to use a custom pipeline object for specific tasks
254
+ (e.g., image classification, sentiment analysis).
255
+
256
+ The operation must be a Pipeline object from the transformers library that accepts keyword arguments.
257
+
258
+ Example:
259
+ ```python
260
+ from transformers import pipeline
261
+ from PIL import Image
262
+
263
+ # Using custom pipeline for image classification
264
+ image = Image.open(image_path)
265
+ pipeline_object = pipeline("image-classification", model="microsoft/resnet-50")
266
+ result = hf_provider.custom_invoke(
267
+ pipeline_object,
268
+ inputs=image,
269
+ )
270
+ ```
271
+
272
+ :param operation: A Pipeline object from the transformers library.
273
+ If not provided, defaults to the provider's configured pipeline.
274
+ :param invoke_kwargs: Keyword arguments to pass to the pipeline operation.
275
+ These are merged with `default_invoke_kwargs` and may include
276
+ parameters such as `inputs`, `max_length`, `temperature`, or task-specific options.
277
+
278
+ :return: The full response returned by the pipeline operation.
279
+ Format depends on the pipeline task (list for text generation,
280
+ dict for classification, etc.).
281
+
282
+ :raises MLRunInvalidArgumentError: If the operation is not a valid Pipeline object.
283
+
284
+ """
285
+ invoke_kwargs = self.get_invoke_kwargs(invoke_kwargs)
286
+ if operation:
287
+ if not isinstance(operation, self._expected_operation_type):
288
+ raise mlrun.errors.MLRunInvalidArgumentError(
289
+ "Huggingface operation must inherit" " from 'Pipeline' object"
290
+ )
291
+ return operation(**invoke_kwargs)
292
+ else:
293
+ return self.client(**invoke_kwargs)
294
+
295
+ def invoke(
296
+ self,
297
+ messages: Union[str, list[str], "ChatType", list["ChatType"]],
298
+ invoke_response_format: InvokeResponseFormat = InvokeResponseFormat.FULL,
299
+ **invoke_kwargs,
300
+ ) -> Union[str, list, dict[str, Any]]:
301
+ """
302
+ HuggingFace-specific implementation of model invocation using the synchronous pipeline client.
303
+ Invokes a HuggingFace model operation for text generation tasks.
304
+
305
+ Note: Ensure your environment has sufficient computational resources (CPU/GPU and memory) to run the model.
306
+
307
+ :param messages:
308
+ Input for the text generation model. Can be provided in multiple formats:
309
+
310
+ - A single string: Direct text input for generation
311
+ - A list of strings: Multiple text inputs for batch processing
312
+ - Chat format: A list of dictionaries with "role" and "content" keys:
313
+
314
+ .. code-block:: json
315
+
316
+ [
317
+ {"role": "system", "content": "You are a helpful assistant."},
318
+ {"role": "user", "content": "What is the capital of France?"}
319
+ ]
320
+
321
+ :param invoke_response_format: InvokeResponseFormat
322
+ Specifies the format of the returned response. Options:
323
+
324
+ - "string": Returns only the generated text content, extracted from a single response.
325
+ - "usage": Combines the generated text with metadata (e.g., token usage), returning a dictionary:
326
+
327
+ .. code-block:: json
328
+ {
329
+ "answer": "<generated_text>",
330
+ "usage": {
331
+ "prompt_tokens": <int>,
332
+ "completion_tokens": <int>,
333
+ "total_tokens": <int>
334
+ }
335
+ }
336
+
337
+ Note: For usage mode, the model tokenizer should support apply_chat_template.
338
+
339
+ - "full": Returns the raw response object from the HuggingFace model,
340
+ typically a list of generated sequences (dictionaries).
341
+ This format does not include token usage statistics.
342
+
343
+ :param invoke_kwargs:
344
+ Additional keyword arguments passed to the HuggingFace pipeline.
345
+
346
+ :return:
347
+ A string, dictionary, or list of model outputs, depending on `invoke_response_format`.
348
+
349
+ :raises MLRunInvalidArgumentError:
350
+ If the pipeline task is not "text-generation" or if the response contains multiple outputs when extracting
351
+ string content.
352
+ :raises MLRunRuntimeError:
353
+ If using "usage" response mode and the model tokenizer does not support chat template formatting.
354
+ """
355
+ if self.client.task != "text-generation":
356
+ raise mlrun.errors.MLRunInvalidArgumentError(
357
+ "HuggingFaceProvider.invoke supports text-generation task only"
358
+ )
359
+ if InvokeResponseFormat.is_str_response(invoke_response_format.value):
360
+ invoke_kwargs["return_full_text"] = False
361
+ response = self.custom_invoke(text_inputs=messages, **invoke_kwargs)
362
+ response = self._response_handler(
363
+ messages=messages,
364
+ response=response,
365
+ invoke_response_format=invoke_response_format,
366
+ )
367
+ return response
@@ -0,0 +1,87 @@
1
+ # Copyright 2023 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Optional, Union
16
+
17
+ import mlrun
18
+ from mlrun.datastore.model_provider.model_provider import (
19
+ InvokeResponseFormat,
20
+ ModelProvider,
21
+ UsageResponseKeys,
22
+ )
23
+
24
+
25
+ class MockModelProvider(ModelProvider):
26
+ support_async = False
27
+
28
+ def __init__(
29
+ self,
30
+ parent,
31
+ kind,
32
+ name,
33
+ endpoint="",
34
+ secrets: Optional[dict] = None,
35
+ default_invoke_kwargs: Optional[dict] = None,
36
+ ):
37
+ super().__init__(
38
+ parent=parent, name=name, kind=kind, endpoint=endpoint, secrets=secrets
39
+ )
40
+ self.default_invoke_kwargs = default_invoke_kwargs or {}
41
+ self._client = None
42
+ self._async_client = None
43
+
44
+ @staticmethod
45
+ def _extract_string_output(response: Any) -> str:
46
+ """
47
+ Extracts string response from response object
48
+ """
49
+ pass
50
+
51
+ def load_client(self) -> None:
52
+ """
53
+ Initializes the SDK client for the model provider with the given keyword arguments
54
+ and assigns it to an instance attribute (e.g., self._client).
55
+
56
+ Subclasses should override this method to:
57
+ - Create and configure the provider-specific client instance.
58
+ - Assign the client instance to self._client.
59
+ """
60
+
61
+ pass
62
+
63
+ def invoke(
64
+ self,
65
+ messages: Union[list[dict], Any],
66
+ invoke_response_format: InvokeResponseFormat = InvokeResponseFormat.FULL,
67
+ **invoke_kwargs,
68
+ ) -> Union[str, dict[str, Any], Any]:
69
+ if invoke_response_format == InvokeResponseFormat.STRING:
70
+ return (
71
+ "You are using a mock model provider, no actual inference is performed."
72
+ )
73
+ elif invoke_response_format == InvokeResponseFormat.FULL:
74
+ return {
75
+ UsageResponseKeys.USAGE: {"prompt_tokens": 0, "completion_tokens": 0},
76
+ UsageResponseKeys.ANSWER: "You are using a mock model provider, no actual inference is performed.",
77
+ "extra": {},
78
+ }
79
+ elif invoke_response_format == InvokeResponseFormat.USAGE:
80
+ return {
81
+ UsageResponseKeys.ANSWER: "You are using a mock model provider, no actual inference is performed.",
82
+ UsageResponseKeys.USAGE: {"prompt_tokens": 0, "completion_tokens": 0},
83
+ }
84
+ else:
85
+ raise mlrun.errors.MLRunInvalidArgumentError(
86
+ f"Unsupported invoke response format: {invoke_response_format}"
87
+ )