mistralai 1.8.1__py3-none-any.whl → 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. mistralai/_hooks/types.py +7 -0
  2. mistralai/_version.py +3 -3
  3. mistralai/agents.py +16 -0
  4. mistralai/basesdk.py +12 -20
  5. mistralai/beta.py +1 -1
  6. mistralai/chat.py +16 -0
  7. mistralai/classifiers.py +8 -0
  8. mistralai/conversations.py +35 -15
  9. mistralai/embeddings.py +2 -0
  10. mistralai/extra/run/context.py +2 -4
  11. mistralai/files.py +12 -0
  12. mistralai/fim.py +4 -0
  13. mistralai/httpclient.py +6 -16
  14. mistralai/jobs.py +10 -0
  15. mistralai/mistral_agents.py +10 -0
  16. mistralai/mistral_jobs.py +8 -0
  17. mistralai/models/__init__.py +1357 -722
  18. mistralai/models/agent.py +1 -1
  19. mistralai/models/agentconversation.py +1 -1
  20. mistralai/models/agentcreationrequest.py +1 -1
  21. mistralai/models/agenthandoffentry.py +1 -1
  22. mistralai/models/agents_api_v1_conversations_getop.py +2 -0
  23. mistralai/models/agents_api_v1_conversations_historyop.py +2 -0
  24. mistralai/models/agents_api_v1_conversations_messagesop.py +2 -0
  25. mistralai/models/agents_api_v1_conversations_restart_streamop.py +2 -0
  26. mistralai/models/agents_api_v1_conversations_restartop.py +2 -0
  27. mistralai/models/agentscompletionrequest.py +13 -3
  28. mistralai/models/agentscompletionstreamrequest.py +13 -3
  29. mistralai/models/agentupdaterequest.py +1 -1
  30. mistralai/models/assistantmessage.py +1 -1
  31. mistralai/models/basemodelcard.py +8 -6
  32. mistralai/models/batchjobin.py +1 -1
  33. mistralai/models/batchjobout.py +1 -1
  34. mistralai/models/chatcompletionrequest.py +20 -3
  35. mistralai/models/chatcompletionstreamrequest.py +20 -3
  36. mistralai/models/classifierdetailedjobout.py +1 -1
  37. mistralai/models/classifierftmodelout.py +1 -1
  38. mistralai/models/classifierjobout.py +1 -1
  39. mistralai/models/classifiertargetin.py +1 -1
  40. mistralai/models/classifiertrainingparameters.py +1 -1
  41. mistralai/models/classifiertrainingparametersin.py +1 -1
  42. mistralai/models/completionargs.py +1 -1
  43. mistralai/models/completiondetailedjobout.py +1 -1
  44. mistralai/models/completionftmodelout.py +1 -1
  45. mistralai/models/completionjobout.py +1 -1
  46. mistralai/models/completionresponsestreamchoice.py +1 -1
  47. mistralai/models/completiontrainingparameters.py +1 -1
  48. mistralai/models/completiontrainingparametersin.py +1 -1
  49. mistralai/models/contentchunk.py +3 -0
  50. mistralai/models/conversationrequest.py +1 -1
  51. mistralai/models/conversationstreamrequest.py +1 -1
  52. mistralai/models/conversationusageinfo.py +1 -1
  53. mistralai/models/deltamessage.py +1 -1
  54. mistralai/models/documenturlchunk.py +1 -1
  55. mistralai/models/embeddingrequest.py +1 -1
  56. mistralai/models/eventout.py +1 -1
  57. mistralai/models/filechunk.py +23 -0
  58. mistralai/models/files_api_routes_list_filesop.py +1 -1
  59. mistralai/models/fileschema.py +1 -1
  60. mistralai/models/fimcompletionrequest.py +1 -1
  61. mistralai/models/fimcompletionstreamrequest.py +1 -1
  62. mistralai/models/ftmodelcard.py +9 -6
  63. mistralai/models/functioncallentry.py +1 -1
  64. mistralai/models/functionresultentry.py +1 -1
  65. mistralai/models/githubrepositoryin.py +1 -1
  66. mistralai/models/githubrepositoryout.py +1 -1
  67. mistralai/models/imageurl.py +1 -1
  68. mistralai/models/inputentries.py +21 -2
  69. mistralai/models/jobin.py +1 -1
  70. mistralai/models/jobmetadataout.py +1 -1
  71. mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +1 -1
  72. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +1 -1
  73. mistralai/models/jsonschema.py +1 -1
  74. mistralai/models/legacyjobmetadataout.py +1 -1
  75. mistralai/models/messageinputentry.py +4 -4
  76. mistralai/models/messageoutputentry.py +1 -1
  77. mistralai/models/messageoutputevent.py +1 -1
  78. mistralai/models/metricout.py +1 -1
  79. mistralai/models/mistralpromptmode.py +8 -0
  80. mistralai/models/modelcapabilities.py +3 -0
  81. mistralai/models/modelconversation.py +1 -1
  82. mistralai/models/ocrimageobject.py +1 -1
  83. mistralai/models/ocrpageobject.py +1 -1
  84. mistralai/models/ocrrequest.py +5 -3
  85. mistralai/models/ocrresponse.py +1 -1
  86. mistralai/models/ocrusageinfo.py +1 -1
  87. mistralai/models/responseformat.py +1 -1
  88. mistralai/models/retrievefileout.py +1 -1
  89. mistralai/models/toolexecutionentry.py +1 -1
  90. mistralai/models/toolfilechunk.py +1 -1
  91. mistralai/models/toolmessage.py +1 -1
  92. mistralai/models/toolreferencechunk.py +1 -1
  93. mistralai/models/updateftmodelin.py +1 -1
  94. mistralai/models/uploadfileout.py +1 -1
  95. mistralai/models/usermessage.py +1 -1
  96. mistralai/models/wandbintegration.py +1 -1
  97. mistralai/models/wandbintegrationout.py +1 -1
  98. mistralai/models_.py +14 -2
  99. mistralai/ocr.py +2 -0
  100. mistralai/sdk.py +68 -40
  101. mistralai/sdkconfiguration.py +0 -7
  102. mistralai/types/basemodel.py +3 -3
  103. mistralai/utils/__init__.py +131 -45
  104. mistralai/utils/datetimes.py +23 -0
  105. mistralai/utils/enums.py +67 -27
  106. mistralai/utils/forms.py +49 -28
  107. mistralai/utils/serializers.py +32 -3
  108. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/METADATA +13 -6
  109. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/RECORD +111 -108
  110. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/LICENSE +0 -0
  111. {mistralai-1.8.1.dist-info → mistralai-1.9.1.dist-info}/WHEEL +0 -0
mistralai/_hooks/types.py CHANGED
@@ -3,10 +3,12 @@
3
3
  from abc import ABC, abstractmethod
4
4
  import httpx
5
5
  from mistralai.httpclient import HttpClient
6
+ from mistralai.sdkconfiguration import SDKConfiguration
6
7
  from typing import Any, Callable, List, Optional, Tuple, Union
7
8
 
8
9
 
9
10
  class HookContext:
11
+ config: SDKConfiguration
10
12
  base_url: str
11
13
  operation_id: str
12
14
  oauth2_scopes: Optional[List[str]] = None
@@ -14,11 +16,13 @@ class HookContext:
14
16
 
15
17
  def __init__(
16
18
  self,
19
+ config: SDKConfiguration,
17
20
  base_url: str,
18
21
  operation_id: str,
19
22
  oauth2_scopes: Optional[List[str]],
20
23
  security_source: Optional[Union[Any, Callable[[], Any]]],
21
24
  ):
25
+ self.config = config
22
26
  self.base_url = base_url
23
27
  self.operation_id = operation_id
24
28
  self.oauth2_scopes = oauth2_scopes
@@ -28,6 +32,7 @@ class HookContext:
28
32
  class BeforeRequestContext(HookContext):
29
33
  def __init__(self, hook_ctx: HookContext):
30
34
  super().__init__(
35
+ hook_ctx.config,
31
36
  hook_ctx.base_url,
32
37
  hook_ctx.operation_id,
33
38
  hook_ctx.oauth2_scopes,
@@ -38,6 +43,7 @@ class BeforeRequestContext(HookContext):
38
43
  class AfterSuccessContext(HookContext):
39
44
  def __init__(self, hook_ctx: HookContext):
40
45
  super().__init__(
46
+ hook_ctx.config,
41
47
  hook_ctx.base_url,
42
48
  hook_ctx.operation_id,
43
49
  hook_ctx.oauth2_scopes,
@@ -48,6 +54,7 @@ class AfterSuccessContext(HookContext):
48
54
  class AfterErrorContext(HookContext):
49
55
  def __init__(self, hook_ctx: HookContext):
50
56
  super().__init__(
57
+ hook_ctx.config,
51
58
  hook_ctx.base_url,
52
59
  hook_ctx.operation_id,
53
60
  hook_ctx.oauth2_scopes,
mistralai/_version.py CHANGED
@@ -3,10 +3,10 @@
3
3
  import importlib.metadata
4
4
 
5
5
  __title__: str = "mistralai"
6
- __version__: str = "1.8.1"
6
+ __version__: str = "1.9.1"
7
7
  __openapi_doc_version__: str = "1.0.0"
8
- __gen_version__: str = "2.548.6"
9
- __user_agent__: str = "speakeasy-sdk/python 1.8.1 2.548.6 1.0.0 mistralai"
8
+ __gen_version__: str = "2.634.2"
9
+ __user_agent__: str = "speakeasy-sdk/python 1.9.1 2.634.2 1.0.0 mistralai"
10
10
 
11
11
  try:
12
12
  if __package__ is not None:
mistralai/agents.py CHANGED
@@ -47,6 +47,7 @@ class Agents(BaseSDK):
47
47
  Union[models.Prediction, models.PredictionTypedDict]
48
48
  ] = None,
49
49
  parallel_tool_calls: Optional[bool] = None,
50
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
50
51
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
51
52
  server_url: Optional[str] = None,
52
53
  timeout_ms: Optional[int] = None,
@@ -68,6 +69,7 @@ class Agents(BaseSDK):
68
69
  :param n: Number of completions to return for each request, input tokens are only billed once.
69
70
  :param prediction:
70
71
  :param parallel_tool_calls:
72
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
71
73
  :param retries: Override the default retry configuration for this method
72
74
  :param server_url: Override the default server URL for this method
73
75
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -105,6 +107,7 @@ class Agents(BaseSDK):
105
107
  prediction, Optional[models.Prediction]
106
108
  ),
107
109
  parallel_tool_calls=parallel_tool_calls,
110
+ prompt_mode=prompt_mode,
108
111
  agent_id=agent_id,
109
112
  )
110
113
 
@@ -137,6 +140,7 @@ class Agents(BaseSDK):
137
140
 
138
141
  http_res = self.do_request(
139
142
  hook_ctx=HookContext(
143
+ config=self.sdk_configuration,
140
144
  base_url=base_url or "",
141
145
  operation_id="agents_completion_v1_agents_completions_post",
142
146
  oauth2_scopes=[],
@@ -213,6 +217,7 @@ class Agents(BaseSDK):
213
217
  Union[models.Prediction, models.PredictionTypedDict]
214
218
  ] = None,
215
219
  parallel_tool_calls: Optional[bool] = None,
220
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
216
221
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
217
222
  server_url: Optional[str] = None,
218
223
  timeout_ms: Optional[int] = None,
@@ -234,6 +239,7 @@ class Agents(BaseSDK):
234
239
  :param n: Number of completions to return for each request, input tokens are only billed once.
235
240
  :param prediction:
236
241
  :param parallel_tool_calls:
242
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
237
243
  :param retries: Override the default retry configuration for this method
238
244
  :param server_url: Override the default server URL for this method
239
245
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -271,6 +277,7 @@ class Agents(BaseSDK):
271
277
  prediction, Optional[models.Prediction]
272
278
  ),
273
279
  parallel_tool_calls=parallel_tool_calls,
280
+ prompt_mode=prompt_mode,
274
281
  agent_id=agent_id,
275
282
  )
276
283
 
@@ -303,6 +310,7 @@ class Agents(BaseSDK):
303
310
 
304
311
  http_res = await self.do_request_async(
305
312
  hook_ctx=HookContext(
313
+ config=self.sdk_configuration,
306
314
  base_url=base_url or "",
307
315
  operation_id="agents_completion_v1_agents_completions_post",
308
316
  oauth2_scopes=[],
@@ -379,6 +387,7 @@ class Agents(BaseSDK):
379
387
  Union[models.Prediction, models.PredictionTypedDict]
380
388
  ] = None,
381
389
  parallel_tool_calls: Optional[bool] = None,
390
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
382
391
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
383
392
  server_url: Optional[str] = None,
384
393
  timeout_ms: Optional[int] = None,
@@ -402,6 +411,7 @@ class Agents(BaseSDK):
402
411
  :param n: Number of completions to return for each request, input tokens are only billed once.
403
412
  :param prediction:
404
413
  :param parallel_tool_calls:
414
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
405
415
  :param retries: Override the default retry configuration for this method
406
416
  :param server_url: Override the default server URL for this method
407
417
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -439,6 +449,7 @@ class Agents(BaseSDK):
439
449
  prediction, Optional[models.Prediction]
440
450
  ),
441
451
  parallel_tool_calls=parallel_tool_calls,
452
+ prompt_mode=prompt_mode,
442
453
  agent_id=agent_id,
443
454
  )
444
455
 
@@ -471,6 +482,7 @@ class Agents(BaseSDK):
471
482
 
472
483
  http_res = self.do_request(
473
484
  hook_ctx=HookContext(
485
+ config=self.sdk_configuration,
474
486
  base_url=base_url or "",
475
487
  operation_id="stream_agents",
476
488
  oauth2_scopes=[],
@@ -553,6 +565,7 @@ class Agents(BaseSDK):
553
565
  Union[models.Prediction, models.PredictionTypedDict]
554
566
  ] = None,
555
567
  parallel_tool_calls: Optional[bool] = None,
568
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
556
569
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
557
570
  server_url: Optional[str] = None,
558
571
  timeout_ms: Optional[int] = None,
@@ -576,6 +589,7 @@ class Agents(BaseSDK):
576
589
  :param n: Number of completions to return for each request, input tokens are only billed once.
577
590
  :param prediction:
578
591
  :param parallel_tool_calls:
592
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
579
593
  :param retries: Override the default retry configuration for this method
580
594
  :param server_url: Override the default server URL for this method
581
595
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -613,6 +627,7 @@ class Agents(BaseSDK):
613
627
  prediction, Optional[models.Prediction]
614
628
  ),
615
629
  parallel_tool_calls=parallel_tool_calls,
630
+ prompt_mode=prompt_mode,
616
631
  agent_id=agent_id,
617
632
  )
618
633
 
@@ -645,6 +660,7 @@ class Agents(BaseSDK):
645
660
 
646
661
  http_res = await self.do_request_async(
647
662
  hook_ctx=HookContext(
663
+ config=self.sdk_configuration,
648
664
  base_url=base_url or "",
649
665
  operation_id="stream_agents",
650
666
  oauth2_scopes=[],
mistralai/basesdk.py CHANGED
@@ -218,12 +218,12 @@ class BaseSDK:
218
218
  client = self.sdk_configuration.client
219
219
  logger = self.sdk_configuration.debug_logger
220
220
 
221
+ hooks = self.sdk_configuration.__dict__["_hooks"]
222
+
221
223
  def do():
222
224
  http_res = None
223
225
  try:
224
- req = self.sdk_configuration.get_hooks().before_request(
225
- BeforeRequestContext(hook_ctx), request
226
- )
226
+ req = hooks.before_request(BeforeRequestContext(hook_ctx), request)
227
227
  logger.debug(
228
228
  "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
229
229
  req.method,
@@ -237,9 +237,7 @@ class BaseSDK:
237
237
 
238
238
  http_res = client.send(req, stream=stream)
239
239
  except Exception as e:
240
- _, e = self.sdk_configuration.get_hooks().after_error(
241
- AfterErrorContext(hook_ctx), None, e
242
- )
240
+ _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e)
243
241
  if e is not None:
244
242
  logger.debug("Request Exception", exc_info=True)
245
243
  raise e
@@ -257,7 +255,7 @@ class BaseSDK:
257
255
  )
258
256
 
259
257
  if utils.match_status_codes(error_status_codes, http_res.status_code):
260
- result, err = self.sdk_configuration.get_hooks().after_error(
258
+ result, err = hooks.after_error(
261
259
  AfterErrorContext(hook_ctx), http_res, None
262
260
  )
263
261
  if err is not None:
@@ -277,9 +275,7 @@ class BaseSDK:
277
275
  http_res = do()
278
276
 
279
277
  if not utils.match_status_codes(error_status_codes, http_res.status_code):
280
- http_res = self.sdk_configuration.get_hooks().after_success(
281
- AfterSuccessContext(hook_ctx), http_res
282
- )
278
+ http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res)
283
279
 
284
280
  return http_res
285
281
 
@@ -294,12 +290,12 @@ class BaseSDK:
294
290
  client = self.sdk_configuration.async_client
295
291
  logger = self.sdk_configuration.debug_logger
296
292
 
293
+ hooks = self.sdk_configuration.__dict__["_hooks"]
294
+
297
295
  async def do():
298
296
  http_res = None
299
297
  try:
300
- req = self.sdk_configuration.get_hooks().before_request(
301
- BeforeRequestContext(hook_ctx), request
302
- )
298
+ req = hooks.before_request(BeforeRequestContext(hook_ctx), request)
303
299
  logger.debug(
304
300
  "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
305
301
  req.method,
@@ -313,9 +309,7 @@ class BaseSDK:
313
309
 
314
310
  http_res = await client.send(req, stream=stream)
315
311
  except Exception as e:
316
- _, e = self.sdk_configuration.get_hooks().after_error(
317
- AfterErrorContext(hook_ctx), None, e
318
- )
312
+ _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e)
319
313
  if e is not None:
320
314
  logger.debug("Request Exception", exc_info=True)
321
315
  raise e
@@ -333,7 +327,7 @@ class BaseSDK:
333
327
  )
334
328
 
335
329
  if utils.match_status_codes(error_status_codes, http_res.status_code):
336
- result, err = self.sdk_configuration.get_hooks().after_error(
330
+ result, err = hooks.after_error(
337
331
  AfterErrorContext(hook_ctx), http_res, None
338
332
  )
339
333
  if err is not None:
@@ -355,8 +349,6 @@ class BaseSDK:
355
349
  http_res = await do()
356
350
 
357
351
  if not utils.match_status_codes(error_status_codes, http_res.status_code):
358
- http_res = self.sdk_configuration.get_hooks().after_success(
359
- AfterSuccessContext(hook_ctx), http_res
360
- )
352
+ http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res)
361
353
 
362
354
  return http_res
mistralai/beta.py CHANGED
@@ -8,7 +8,7 @@ from mistralai.mistral_agents import MistralAgents
8
8
 
9
9
  class Beta(BaseSDK):
10
10
  conversations: Conversations
11
- r"""(beta) Converstations API"""
11
+ r"""(beta) Conversations API"""
12
12
  agents: MistralAgents
13
13
  r"""(beta) Agents API"""
14
14
 
mistralai/chat.py CHANGED
@@ -123,6 +123,7 @@ class Chat(BaseSDK):
123
123
  Union[models.Prediction, models.PredictionTypedDict]
124
124
  ] = None,
125
125
  parallel_tool_calls: Optional[bool] = None,
126
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
126
127
  safe_prompt: Optional[bool] = None,
127
128
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
128
129
  server_url: Optional[str] = None,
@@ -147,6 +148,7 @@ class Chat(BaseSDK):
147
148
  :param n: Number of completions to return for each request, input tokens are only billed once.
148
149
  :param prediction:
149
150
  :param parallel_tool_calls:
151
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
150
152
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
151
153
  :param retries: Override the default retry configuration for this method
152
154
  :param server_url: Override the default server URL for this method
@@ -186,6 +188,7 @@ class Chat(BaseSDK):
186
188
  prediction, Optional[models.Prediction]
187
189
  ),
188
190
  parallel_tool_calls=parallel_tool_calls,
191
+ prompt_mode=prompt_mode,
189
192
  safe_prompt=safe_prompt,
190
193
  )
191
194
 
@@ -218,6 +221,7 @@ class Chat(BaseSDK):
218
221
 
219
222
  http_res = self.do_request(
220
223
  hook_ctx=HookContext(
224
+ config=self.sdk_configuration,
221
225
  base_url=base_url or "",
222
226
  operation_id="chat_completion_v1_chat_completions_post",
223
227
  oauth2_scopes=[],
@@ -288,6 +292,7 @@ class Chat(BaseSDK):
288
292
  Union[models.Prediction, models.PredictionTypedDict]
289
293
  ] = None,
290
294
  parallel_tool_calls: Optional[bool] = None,
295
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
291
296
  safe_prompt: Optional[bool] = None,
292
297
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
293
298
  server_url: Optional[str] = None,
@@ -312,6 +317,7 @@ class Chat(BaseSDK):
312
317
  :param n: Number of completions to return for each request, input tokens are only billed once.
313
318
  :param prediction:
314
319
  :param parallel_tool_calls:
320
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
315
321
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
316
322
  :param retries: Override the default retry configuration for this method
317
323
  :param server_url: Override the default server URL for this method
@@ -351,6 +357,7 @@ class Chat(BaseSDK):
351
357
  prediction, Optional[models.Prediction]
352
358
  ),
353
359
  parallel_tool_calls=parallel_tool_calls,
360
+ prompt_mode=prompt_mode,
354
361
  safe_prompt=safe_prompt,
355
362
  )
356
363
 
@@ -383,6 +390,7 @@ class Chat(BaseSDK):
383
390
 
384
391
  http_res = await self.do_request_async(
385
392
  hook_ctx=HookContext(
393
+ config=self.sdk_configuration,
386
394
  base_url=base_url or "",
387
395
  operation_id="chat_completion_v1_chat_completions_post",
388
396
  oauth2_scopes=[],
@@ -461,6 +469,7 @@ class Chat(BaseSDK):
461
469
  Union[models.Prediction, models.PredictionTypedDict]
462
470
  ] = None,
463
471
  parallel_tool_calls: Optional[bool] = None,
472
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
464
473
  safe_prompt: Optional[bool] = None,
465
474
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
466
475
  server_url: Optional[str] = None,
@@ -487,6 +496,7 @@ class Chat(BaseSDK):
487
496
  :param n: Number of completions to return for each request, input tokens are only billed once.
488
497
  :param prediction:
489
498
  :param parallel_tool_calls:
499
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
490
500
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
491
501
  :param retries: Override the default retry configuration for this method
492
502
  :param server_url: Override the default server URL for this method
@@ -528,6 +538,7 @@ class Chat(BaseSDK):
528
538
  prediction, Optional[models.Prediction]
529
539
  ),
530
540
  parallel_tool_calls=parallel_tool_calls,
541
+ prompt_mode=prompt_mode,
531
542
  safe_prompt=safe_prompt,
532
543
  )
533
544
 
@@ -560,6 +571,7 @@ class Chat(BaseSDK):
560
571
 
561
572
  http_res = self.do_request(
562
573
  hook_ctx=HookContext(
574
+ config=self.sdk_configuration,
563
575
  base_url=base_url or "",
564
576
  operation_id="stream_chat",
565
577
  oauth2_scopes=[],
@@ -644,6 +656,7 @@ class Chat(BaseSDK):
644
656
  Union[models.Prediction, models.PredictionTypedDict]
645
657
  ] = None,
646
658
  parallel_tool_calls: Optional[bool] = None,
659
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
647
660
  safe_prompt: Optional[bool] = None,
648
661
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
649
662
  server_url: Optional[str] = None,
@@ -670,6 +683,7 @@ class Chat(BaseSDK):
670
683
  :param n: Number of completions to return for each request, input tokens are only billed once.
671
684
  :param prediction:
672
685
  :param parallel_tool_calls:
686
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
673
687
  :param safe_prompt: Whether to inject a safety prompt before all conversations.
674
688
  :param retries: Override the default retry configuration for this method
675
689
  :param server_url: Override the default server URL for this method
@@ -711,6 +725,7 @@ class Chat(BaseSDK):
711
725
  prediction, Optional[models.Prediction]
712
726
  ),
713
727
  parallel_tool_calls=parallel_tool_calls,
728
+ prompt_mode=prompt_mode,
714
729
  safe_prompt=safe_prompt,
715
730
  )
716
731
 
@@ -743,6 +758,7 @@ class Chat(BaseSDK):
743
758
 
744
759
  http_res = await self.do_request_async(
745
760
  hook_ctx=HookContext(
761
+ config=self.sdk_configuration,
746
762
  base_url=base_url or "",
747
763
  operation_id="stream_chat",
748
764
  oauth2_scopes=[],
mistralai/classifiers.py CHANGED
@@ -77,6 +77,7 @@ class Classifiers(BaseSDK):
77
77
 
78
78
  http_res = self.do_request(
79
79
  hook_ctx=HookContext(
80
+ config=self.sdk_configuration,
80
81
  base_url=base_url or "",
81
82
  operation_id="moderations_v1_moderations_post",
82
83
  oauth2_scopes=[],
@@ -183,6 +184,7 @@ class Classifiers(BaseSDK):
183
184
 
184
185
  http_res = await self.do_request_async(
185
186
  hook_ctx=HookContext(
187
+ config=self.sdk_configuration,
186
188
  base_url=base_url or "",
187
189
  operation_id="moderations_v1_moderations_post",
188
190
  oauth2_scopes=[],
@@ -289,6 +291,7 @@ class Classifiers(BaseSDK):
289
291
 
290
292
  http_res = self.do_request(
291
293
  hook_ctx=HookContext(
294
+ config=self.sdk_configuration,
292
295
  base_url=base_url or "",
293
296
  operation_id="chat_moderations_v1_chat_moderations_post",
294
297
  oauth2_scopes=[],
@@ -395,6 +398,7 @@ class Classifiers(BaseSDK):
395
398
 
396
399
  http_res = await self.do_request_async(
397
400
  hook_ctx=HookContext(
401
+ config=self.sdk_configuration,
398
402
  base_url=base_url or "",
399
403
  operation_id="chat_moderations_v1_chat_moderations_post",
400
404
  oauth2_scopes=[],
@@ -501,6 +505,7 @@ class Classifiers(BaseSDK):
501
505
 
502
506
  http_res = self.do_request(
503
507
  hook_ctx=HookContext(
508
+ config=self.sdk_configuration,
504
509
  base_url=base_url or "",
505
510
  operation_id="classifications_v1_classifications_post",
506
511
  oauth2_scopes=[],
@@ -607,6 +612,7 @@ class Classifiers(BaseSDK):
607
612
 
608
613
  http_res = await self.do_request_async(
609
614
  hook_ctx=HookContext(
615
+ config=self.sdk_configuration,
610
616
  base_url=base_url or "",
611
617
  operation_id="classifications_v1_classifications_post",
612
618
  oauth2_scopes=[],
@@ -710,6 +716,7 @@ class Classifiers(BaseSDK):
710
716
 
711
717
  http_res = self.do_request(
712
718
  hook_ctx=HookContext(
719
+ config=self.sdk_configuration,
713
720
  base_url=base_url or "",
714
721
  operation_id="chat_classifications_v1_chat_classifications_post",
715
722
  oauth2_scopes=[],
@@ -813,6 +820,7 @@ class Classifiers(BaseSDK):
813
820
 
814
821
  http_res = await self.do_request_async(
815
822
  hook_ctx=HookContext(
823
+ config=self.sdk_configuration,
816
824
  base_url=base_url or "",
817
825
  operation_id="chat_classifications_v1_chat_classifications_post",
818
826
  oauth2_scopes=[],