mistralai 1.9.11__py3-none-any.whl → 1.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. mistralai/_hooks/registration.py +5 -0
  2. mistralai/_hooks/tracing.py +75 -0
  3. mistralai/_version.py +2 -2
  4. mistralai/accesses.py +8 -8
  5. mistralai/agents.py +29 -17
  6. mistralai/chat.py +41 -29
  7. mistralai/classifiers.py +13 -1
  8. mistralai/conversations.py +294 -62
  9. mistralai/documents.py +19 -3
  10. mistralai/embeddings.py +13 -7
  11. mistralai/extra/README.md +1 -1
  12. mistralai/extra/mcp/auth.py +10 -11
  13. mistralai/extra/mcp/base.py +17 -16
  14. mistralai/extra/mcp/sse.py +13 -15
  15. mistralai/extra/mcp/stdio.py +5 -6
  16. mistralai/extra/observability/__init__.py +15 -0
  17. mistralai/extra/observability/otel.py +372 -0
  18. mistralai/extra/run/context.py +33 -43
  19. mistralai/extra/run/result.py +29 -30
  20. mistralai/extra/run/tools.py +34 -23
  21. mistralai/extra/struct_chat.py +15 -8
  22. mistralai/extra/utils/response_format.py +5 -3
  23. mistralai/files.py +6 -0
  24. mistralai/fim.py +17 -5
  25. mistralai/mistral_agents.py +229 -1
  26. mistralai/mistral_jobs.py +39 -13
  27. mistralai/models/__init__.py +99 -3
  28. mistralai/models/agent.py +15 -2
  29. mistralai/models/agentconversation.py +11 -3
  30. mistralai/models/agentcreationrequest.py +6 -2
  31. mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
  32. mistralai/models/agents_api_v1_agents_getop.py +40 -3
  33. mistralai/models/agents_api_v1_agents_listop.py +72 -2
  34. mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
  35. mistralai/models/agents_api_v1_conversations_listop.py +39 -2
  36. mistralai/models/agentscompletionrequest.py +21 -6
  37. mistralai/models/agentscompletionstreamrequest.py +21 -6
  38. mistralai/models/agentupdaterequest.py +18 -2
  39. mistralai/models/audioencoding.py +13 -0
  40. mistralai/models/audioformat.py +19 -0
  41. mistralai/models/audiotranscriptionrequest.py +2 -0
  42. mistralai/models/batchjobin.py +26 -5
  43. mistralai/models/batchjobout.py +5 -0
  44. mistralai/models/batchrequest.py +48 -0
  45. mistralai/models/chatcompletionrequest.py +22 -5
  46. mistralai/models/chatcompletionstreamrequest.py +22 -5
  47. mistralai/models/classificationrequest.py +37 -3
  48. mistralai/models/conversationrequest.py +15 -4
  49. mistralai/models/conversationrestartrequest.py +50 -2
  50. mistralai/models/conversationrestartstreamrequest.py +50 -2
  51. mistralai/models/conversationstreamrequest.py +15 -4
  52. mistralai/models/documentout.py +26 -10
  53. mistralai/models/documentupdatein.py +24 -3
  54. mistralai/models/embeddingrequest.py +19 -11
  55. mistralai/models/files_api_routes_list_filesop.py +7 -0
  56. mistralai/models/fimcompletionrequest.py +8 -9
  57. mistralai/models/fimcompletionstreamrequest.py +8 -9
  58. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
  59. mistralai/models/libraries_documents_list_v1op.py +15 -2
  60. mistralai/models/libraryout.py +10 -7
  61. mistralai/models/listfilesout.py +35 -4
  62. mistralai/models/modelcapabilities.py +13 -4
  63. mistralai/models/modelconversation.py +8 -2
  64. mistralai/models/ocrpageobject.py +26 -5
  65. mistralai/models/ocrrequest.py +17 -1
  66. mistralai/models/ocrtableobject.py +31 -0
  67. mistralai/models/prediction.py +4 -0
  68. mistralai/models/requestsource.py +7 -0
  69. mistralai/models/responseformat.py +4 -2
  70. mistralai/models/responseformats.py +0 -1
  71. mistralai/models/sharingdelete.py +36 -5
  72. mistralai/models/sharingin.py +36 -5
  73. mistralai/models/sharingout.py +3 -3
  74. mistralai/models/toolexecutiondeltaevent.py +13 -4
  75. mistralai/models/toolexecutiondoneevent.py +13 -4
  76. mistralai/models/toolexecutionentry.py +9 -4
  77. mistralai/models/toolexecutionstartedevent.py +13 -4
  78. mistralai/models/toolfilechunk.py +11 -4
  79. mistralai/models/toolreferencechunk.py +13 -4
  80. mistralai/models_.py +2 -14
  81. mistralai/ocr.py +18 -0
  82. mistralai/transcriptions.py +4 -4
  83. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/METADATA +162 -152
  84. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/RECORD +168 -144
  85. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
  86. mistralai_azure/_version.py +3 -3
  87. mistralai_azure/basesdk.py +15 -5
  88. mistralai_azure/chat.py +59 -98
  89. mistralai_azure/models/__init__.py +50 -3
  90. mistralai_azure/models/chatcompletionrequest.py +16 -4
  91. mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
  92. mistralai_azure/models/httpvalidationerror.py +11 -6
  93. mistralai_azure/models/mistralazureerror.py +26 -0
  94. mistralai_azure/models/no_response_error.py +13 -0
  95. mistralai_azure/models/prediction.py +4 -0
  96. mistralai_azure/models/responseformat.py +4 -2
  97. mistralai_azure/models/responseformats.py +0 -1
  98. mistralai_azure/models/responsevalidationerror.py +25 -0
  99. mistralai_azure/models/sdkerror.py +30 -14
  100. mistralai_azure/models/systemmessage.py +7 -3
  101. mistralai_azure/models/systemmessagecontentchunks.py +21 -0
  102. mistralai_azure/models/thinkchunk.py +35 -0
  103. mistralai_azure/ocr.py +15 -36
  104. mistralai_azure/utils/__init__.py +18 -5
  105. mistralai_azure/utils/eventstreaming.py +10 -0
  106. mistralai_azure/utils/serializers.py +3 -2
  107. mistralai_azure/utils/unmarshal_json_response.py +24 -0
  108. mistralai_gcp/_hooks/types.py +7 -0
  109. mistralai_gcp/_version.py +4 -4
  110. mistralai_gcp/basesdk.py +27 -25
  111. mistralai_gcp/chat.py +75 -98
  112. mistralai_gcp/fim.py +39 -74
  113. mistralai_gcp/httpclient.py +6 -16
  114. mistralai_gcp/models/__init__.py +321 -116
  115. mistralai_gcp/models/assistantmessage.py +1 -1
  116. mistralai_gcp/models/chatcompletionrequest.py +36 -7
  117. mistralai_gcp/models/chatcompletionresponse.py +6 -6
  118. mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
  119. mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
  120. mistralai_gcp/models/deltamessage.py +1 -1
  121. mistralai_gcp/models/fimcompletionrequest.py +3 -9
  122. mistralai_gcp/models/fimcompletionresponse.py +6 -6
  123. mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
  124. mistralai_gcp/models/httpvalidationerror.py +11 -6
  125. mistralai_gcp/models/imageurl.py +1 -1
  126. mistralai_gcp/models/jsonschema.py +1 -1
  127. mistralai_gcp/models/mistralgcperror.py +26 -0
  128. mistralai_gcp/models/mistralpromptmode.py +8 -0
  129. mistralai_gcp/models/no_response_error.py +13 -0
  130. mistralai_gcp/models/prediction.py +4 -0
  131. mistralai_gcp/models/responseformat.py +5 -3
  132. mistralai_gcp/models/responseformats.py +0 -1
  133. mistralai_gcp/models/responsevalidationerror.py +25 -0
  134. mistralai_gcp/models/sdkerror.py +30 -14
  135. mistralai_gcp/models/systemmessage.py +7 -3
  136. mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
  137. mistralai_gcp/models/thinkchunk.py +35 -0
  138. mistralai_gcp/models/toolmessage.py +1 -1
  139. mistralai_gcp/models/usageinfo.py +71 -8
  140. mistralai_gcp/models/usermessage.py +1 -1
  141. mistralai_gcp/sdk.py +12 -10
  142. mistralai_gcp/sdkconfiguration.py +0 -7
  143. mistralai_gcp/types/basemodel.py +3 -3
  144. mistralai_gcp/utils/__init__.py +143 -45
  145. mistralai_gcp/utils/datetimes.py +23 -0
  146. mistralai_gcp/utils/enums.py +67 -27
  147. mistralai_gcp/utils/eventstreaming.py +10 -0
  148. mistralai_gcp/utils/forms.py +49 -28
  149. mistralai_gcp/utils/serializers.py +33 -3
  150. mistralai_gcp/utils/unmarshal_json_response.py +24 -0
  151. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,13 +1,11 @@
1
+ import inspect
1
2
  import itertools
3
+ import json
2
4
  import logging
3
5
  from dataclasses import dataclass
4
- import inspect
5
-
6
- from pydantic import Field, create_model
7
- from pydantic.fields import FieldInfo
8
- import json
9
- from typing import cast, Callable, Sequence, Any, ForwardRef, get_type_hints, Union
6
+ from typing import Any, Callable, ForwardRef, Sequence, cast, get_type_hints
10
7
 
8
+ import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes
11
9
  from griffe import (
12
10
  Docstring,
13
11
  DocstringSectionKind,
@@ -15,9 +13,13 @@ from griffe import (
15
13
  DocstringParameter,
16
14
  DocstringSection,
17
15
  )
16
+ from opentelemetry import trace
17
+ from pydantic import Field, create_model
18
+ from pydantic.fields import FieldInfo
18
19
 
19
20
  from mistralai.extra.exceptions import RunException
20
21
  from mistralai.extra.mcp.base import MCPClientProtocol
22
+ from mistralai.extra.observability.otel import GenAISpanEnum, MistralAIAttributes, set_available_attributes
21
23
  from mistralai.extra.run.result import RunOutputEntries
22
24
  from mistralai.models import (
23
25
  FunctionResultEntry,
@@ -51,7 +53,7 @@ class RunMCPTool:
51
53
  mcp_client: MCPClientProtocol
52
54
 
53
55
 
54
- RunTool = Union[RunFunction, RunCoroutine, RunMCPTool]
56
+ RunTool = RunFunction | RunCoroutine | RunMCPTool
55
57
 
56
58
 
57
59
  def _get_function_description(docstring_sections: list[DocstringSection]) -> str:
@@ -191,22 +193,31 @@ async def create_function_result(
191
193
  if isinstance(function_call.arguments, str)
192
194
  else function_call.arguments
193
195
  )
194
- try:
195
- if isinstance(run_tool, RunFunction):
196
- res = run_tool.callable(**arguments)
197
- elif isinstance(run_tool, RunCoroutine):
198
- res = await run_tool.awaitable(**arguments)
199
- elif isinstance(run_tool, RunMCPTool):
200
- res = await run_tool.mcp_client.execute_tool(function_call.name, arguments)
201
- except Exception as e:
202
- if continue_on_fn_error is True:
203
- return FunctionResultEntry(
204
- tool_call_id=function_call.tool_call_id,
205
- result=f"Error while executing {function_call.name}: {str(e)}",
206
- )
207
- raise RunException(
208
- f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'"
209
- ) from e
196
+ tracer = trace.get_tracer(__name__)
197
+ with tracer.start_as_current_span(GenAISpanEnum.function_call(function_call.name)) as span:
198
+ try:
199
+ if isinstance(run_tool, RunFunction):
200
+ res = run_tool.callable(**arguments)
201
+ elif isinstance(run_tool, RunCoroutine):
202
+ res = await run_tool.awaitable(**arguments)
203
+ elif isinstance(run_tool, RunMCPTool):
204
+ res = await run_tool.mcp_client.execute_tool(function_call.name, arguments)
205
+ function_call_attributes = {
206
+ gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value,
207
+ gen_ai_attributes.GEN_AI_TOOL_CALL_ID: function_call.id,
208
+ MistralAIAttributes.MISTRAL_AI_TOOL_CALL_ARGUMENTS: str(function_call.arguments),
209
+ gen_ai_attributes.GEN_AI_TOOL_NAME: function_call.name
210
+ }
211
+ set_available_attributes(span, function_call_attributes)
212
+ except Exception as e:
213
+ if continue_on_fn_error is True:
214
+ return FunctionResultEntry(
215
+ tool_call_id=function_call.tool_call_id,
216
+ result=f"Error while executing {function_call.name}: {str(e)}",
217
+ )
218
+ raise RunException(
219
+ f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'"
220
+ ) from e
210
221
 
211
222
  return FunctionResultEntry(
212
223
  tool_call_id=function_call.tool_call_id,
@@ -1,19 +1,26 @@
1
- from ..models import ChatCompletionResponse, ChatCompletionChoice, AssistantMessage
2
- from .utils.response_format import CustomPydanticModel, pydantic_model_from_json
3
- from typing import List, Optional, Type, Generic
4
- from pydantic import BaseModel
5
1
  import json
2
+ from typing import Generic
3
+
4
+ from ..models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse
5
+ from .utils.response_format import CustomPydanticModel, pydantic_model_from_json
6
+
6
7
 
7
8
  class ParsedAssistantMessage(AssistantMessage, Generic[CustomPydanticModel]):
8
- parsed: Optional[CustomPydanticModel]
9
+ parsed: CustomPydanticModel | None
10
+
9
11
 
10
12
  class ParsedChatCompletionChoice(ChatCompletionChoice, Generic[CustomPydanticModel]):
11
- message: Optional[ParsedAssistantMessage[CustomPydanticModel]] # type: ignore
13
+ message: ParsedAssistantMessage[CustomPydanticModel] | None # type: ignore
14
+
12
15
 
13
16
  class ParsedChatCompletionResponse(ChatCompletionResponse, Generic[CustomPydanticModel]):
14
- choices: Optional[List[ParsedChatCompletionChoice[CustomPydanticModel]]] # type: ignore
17
+ choices: list[ParsedChatCompletionChoice[CustomPydanticModel]] | None # type: ignore
18
+
15
19
 
16
- def convert_to_parsed_chat_completion_response(response: ChatCompletionResponse, response_format: Type[BaseModel]) -> ParsedChatCompletionResponse:
20
+ def convert_to_parsed_chat_completion_response(
21
+ response: ChatCompletionResponse,
22
+ response_format: type[CustomPydanticModel],
23
+ ) -> ParsedChatCompletionResponse[CustomPydanticModel]:
17
24
  parsed_choices = []
18
25
 
19
26
  if response.choices:
@@ -1,5 +1,6 @@
1
+ from typing import Any, TypeVar
2
+
1
3
  from pydantic import BaseModel
2
- from typing import TypeVar, Any, Type, Dict
3
4
  from ...models import JSONSchema, ResponseFormat
4
5
  from ._pydantic_helper import rec_strict_json_schema
5
6
 
@@ -7,7 +8,7 @@ CustomPydanticModel = TypeVar("CustomPydanticModel", bound=BaseModel)
7
8
 
8
9
 
9
10
  def response_format_from_pydantic_model(
10
- model: Type[CustomPydanticModel],
11
+ model: type[CustomPydanticModel],
11
12
  ) -> ResponseFormat:
12
13
  """Generate a strict JSON schema from a pydantic model."""
13
14
  model_schema = rec_strict_json_schema(model.model_json_schema())
@@ -18,7 +19,8 @@ def response_format_from_pydantic_model(
18
19
 
19
20
 
20
21
  def pydantic_model_from_json(
21
- json_data: Dict[str, Any], pydantic_model: Type[CustomPydanticModel]
22
+ json_data: dict[str, Any],
23
+ pydantic_model: type[CustomPydanticModel],
22
24
  ) -> CustomPydanticModel:
23
25
  """Parse a JSON schema into a pydantic model."""
24
26
  return pydantic_model.model_validate(json_data)
mistralai/files.py CHANGED
@@ -212,6 +212,7 @@ class Files(BaseSDK):
212
212
  *,
213
213
  page: Optional[int] = 0,
214
214
  page_size: Optional[int] = 100,
215
+ include_total: Optional[bool] = True,
215
216
  sample_type: OptionalNullable[List[models.SampleType]] = UNSET,
216
217
  source: OptionalNullable[List[models.Source]] = UNSET,
217
218
  search: OptionalNullable[str] = UNSET,
@@ -227,6 +228,7 @@ class Files(BaseSDK):
227
228
 
228
229
  :param page:
229
230
  :param page_size:
231
+ :param include_total:
230
232
  :param sample_type:
231
233
  :param source:
232
234
  :param search:
@@ -249,6 +251,7 @@ class Files(BaseSDK):
249
251
  request = models.FilesAPIRoutesListFilesRequest(
250
252
  page=page,
251
253
  page_size=page_size,
254
+ include_total=include_total,
252
255
  sample_type=sample_type,
253
256
  source=source,
254
257
  search=search,
@@ -310,6 +313,7 @@ class Files(BaseSDK):
310
313
  *,
311
314
  page: Optional[int] = 0,
312
315
  page_size: Optional[int] = 100,
316
+ include_total: Optional[bool] = True,
313
317
  sample_type: OptionalNullable[List[models.SampleType]] = UNSET,
314
318
  source: OptionalNullable[List[models.Source]] = UNSET,
315
319
  search: OptionalNullable[str] = UNSET,
@@ -325,6 +329,7 @@ class Files(BaseSDK):
325
329
 
326
330
  :param page:
327
331
  :param page_size:
332
+ :param include_total:
328
333
  :param sample_type:
329
334
  :param source:
330
335
  :param search:
@@ -347,6 +352,7 @@ class Files(BaseSDK):
347
352
  request = models.FilesAPIRoutesListFilesRequest(
348
353
  page=page,
349
354
  page_size=page_size,
355
+ include_total=include_total,
350
356
  sample_type=sample_type,
351
357
  source=source,
352
358
  search=search,
mistralai/fim.py CHANGED
@@ -6,7 +6,7 @@ from mistralai._hooks import HookContext
6
6
  from mistralai.types import OptionalNullable, UNSET
7
7
  from mistralai.utils import eventstreaming, get_security_from_env
8
8
  from mistralai.utils.unmarshal_json_response import unmarshal_json_response
9
- from typing import Any, Mapping, Optional, Union
9
+ from typing import Any, Dict, Mapping, Optional, Union
10
10
 
11
11
 
12
12
  class Fim(BaseSDK):
@@ -28,6 +28,7 @@ class Fim(BaseSDK):
28
28
  ]
29
29
  ] = None,
30
30
  random_seed: OptionalNullable[int] = UNSET,
31
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
31
32
  suffix: OptionalNullable[str] = UNSET,
32
33
  min_tokens: OptionalNullable[int] = UNSET,
33
34
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -39,7 +40,7 @@ class Fim(BaseSDK):
39
40
 
40
41
  FIM completion.
41
42
 
42
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
43
+ :param model: ID of the model with FIM to use.
43
44
  :param prompt: The text/code to complete.
44
45
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
45
46
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -47,6 +48,7 @@ class Fim(BaseSDK):
47
48
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
48
49
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
49
50
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
51
+ :param metadata:
50
52
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
51
53
  :param min_tokens: The minimum number of tokens to generate in the completion.
52
54
  :param retries: Override the default retry configuration for this method
@@ -72,6 +74,7 @@ class Fim(BaseSDK):
72
74
  stream=stream,
73
75
  stop=stop,
74
76
  random_seed=random_seed,
77
+ metadata=metadata,
75
78
  prompt=prompt,
76
79
  suffix=suffix,
77
80
  min_tokens=min_tokens,
@@ -152,6 +155,7 @@ class Fim(BaseSDK):
152
155
  ]
153
156
  ] = None,
154
157
  random_seed: OptionalNullable[int] = UNSET,
158
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
155
159
  suffix: OptionalNullable[str] = UNSET,
156
160
  min_tokens: OptionalNullable[int] = UNSET,
157
161
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -163,7 +167,7 @@ class Fim(BaseSDK):
163
167
 
164
168
  FIM completion.
165
169
 
166
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
170
+ :param model: ID of the model with FIM to use.
167
171
  :param prompt: The text/code to complete.
168
172
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
169
173
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -171,6 +175,7 @@ class Fim(BaseSDK):
171
175
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
172
176
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
173
177
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
178
+ :param metadata:
174
179
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
175
180
  :param min_tokens: The minimum number of tokens to generate in the completion.
176
181
  :param retries: Override the default retry configuration for this method
@@ -196,6 +201,7 @@ class Fim(BaseSDK):
196
201
  stream=stream,
197
202
  stop=stop,
198
203
  random_seed=random_seed,
204
+ metadata=metadata,
199
205
  prompt=prompt,
200
206
  suffix=suffix,
201
207
  min_tokens=min_tokens,
@@ -276,6 +282,7 @@ class Fim(BaseSDK):
276
282
  ]
277
283
  ] = None,
278
284
  random_seed: OptionalNullable[int] = UNSET,
285
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
279
286
  suffix: OptionalNullable[str] = UNSET,
280
287
  min_tokens: OptionalNullable[int] = UNSET,
281
288
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -287,7 +294,7 @@ class Fim(BaseSDK):
287
294
 
288
295
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
289
296
 
290
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
297
+ :param model: ID of the model with FIM to use.
291
298
  :param prompt: The text/code to complete.
292
299
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
293
300
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -295,6 +302,7 @@ class Fim(BaseSDK):
295
302
  :param stream:
296
303
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
297
304
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
305
+ :param metadata:
298
306
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
299
307
  :param min_tokens: The minimum number of tokens to generate in the completion.
300
308
  :param retries: Override the default retry configuration for this method
@@ -320,6 +328,7 @@ class Fim(BaseSDK):
320
328
  stream=stream,
321
329
  stop=stop,
322
330
  random_seed=random_seed,
331
+ metadata=metadata,
323
332
  prompt=prompt,
324
333
  suffix=suffix,
325
334
  min_tokens=min_tokens,
@@ -408,6 +417,7 @@ class Fim(BaseSDK):
408
417
  ]
409
418
  ] = None,
410
419
  random_seed: OptionalNullable[int] = UNSET,
420
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
411
421
  suffix: OptionalNullable[str] = UNSET,
412
422
  min_tokens: OptionalNullable[int] = UNSET,
413
423
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -419,7 +429,7 @@ class Fim(BaseSDK):
419
429
 
420
430
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
421
431
 
422
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
432
+ :param model: ID of the model with FIM to use.
423
433
  :param prompt: The text/code to complete.
424
434
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
425
435
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -427,6 +437,7 @@ class Fim(BaseSDK):
427
437
  :param stream:
428
438
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
429
439
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
440
+ :param metadata:
430
441
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
431
442
  :param min_tokens: The minimum number of tokens to generate in the completion.
432
443
  :param retries: Override the default retry configuration for this method
@@ -452,6 +463,7 @@ class Fim(BaseSDK):
452
463
  stream=stream,
453
464
  stop=stop,
454
465
  random_seed=random_seed,
466
+ metadata=metadata,
455
467
  prompt=prompt,
456
468
  suffix=suffix,
457
469
  min_tokens=min_tokens,