mistralai 1.9.10__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. mistralai/_hooks/registration.py +5 -0
  2. mistralai/_hooks/tracing.py +50 -0
  3. mistralai/_version.py +3 -3
  4. mistralai/accesses.py +51 -116
  5. mistralai/agents.py +58 -85
  6. mistralai/audio.py +8 -3
  7. mistralai/basesdk.py +15 -5
  8. mistralai/batch.py +6 -3
  9. mistralai/beta.py +10 -5
  10. mistralai/chat.py +70 -97
  11. mistralai/classifiers.py +57 -144
  12. mistralai/conversations.py +435 -412
  13. mistralai/documents.py +156 -359
  14. mistralai/embeddings.py +21 -42
  15. mistralai/extra/observability/__init__.py +15 -0
  16. mistralai/extra/observability/otel.py +393 -0
  17. mistralai/extra/run/tools.py +28 -16
  18. mistralai/files.py +53 -176
  19. mistralai/fim.py +46 -73
  20. mistralai/fine_tuning.py +6 -3
  21. mistralai/jobs.py +49 -158
  22. mistralai/libraries.py +71 -178
  23. mistralai/mistral_agents.py +298 -179
  24. mistralai/mistral_jobs.py +51 -138
  25. mistralai/models/__init__.py +94 -5
  26. mistralai/models/agent.py +15 -2
  27. mistralai/models/agentconversation.py +11 -3
  28. mistralai/models/agentcreationrequest.py +6 -2
  29. mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
  30. mistralai/models/agents_api_v1_agents_getop.py +40 -3
  31. mistralai/models/agents_api_v1_agents_listop.py +72 -2
  32. mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
  33. mistralai/models/agents_api_v1_conversations_listop.py +39 -2
  34. mistralai/models/agentscompletionrequest.py +21 -6
  35. mistralai/models/agentscompletionstreamrequest.py +21 -6
  36. mistralai/models/agentupdaterequest.py +18 -2
  37. mistralai/models/audiotranscriptionrequest.py +2 -0
  38. mistralai/models/batchjobin.py +10 -0
  39. mistralai/models/chatcompletionrequest.py +22 -5
  40. mistralai/models/chatcompletionstreamrequest.py +22 -5
  41. mistralai/models/conversationrequest.py +15 -4
  42. mistralai/models/conversationrestartrequest.py +50 -2
  43. mistralai/models/conversationrestartstreamrequest.py +50 -2
  44. mistralai/models/conversationstreamrequest.py +15 -4
  45. mistralai/models/documentout.py +26 -10
  46. mistralai/models/documentupdatein.py +24 -3
  47. mistralai/models/embeddingrequest.py +8 -8
  48. mistralai/models/files_api_routes_list_filesop.py +7 -0
  49. mistralai/models/fimcompletionrequest.py +8 -9
  50. mistralai/models/fimcompletionstreamrequest.py +8 -9
  51. mistralai/models/httpvalidationerror.py +11 -6
  52. mistralai/models/libraries_documents_list_v1op.py +15 -2
  53. mistralai/models/libraryout.py +10 -7
  54. mistralai/models/listfilesout.py +35 -4
  55. mistralai/models/mistralerror.py +26 -0
  56. mistralai/models/modelcapabilities.py +13 -4
  57. mistralai/models/modelconversation.py +8 -2
  58. mistralai/models/no_response_error.py +13 -0
  59. mistralai/models/ocrpageobject.py +26 -5
  60. mistralai/models/ocrrequest.py +17 -1
  61. mistralai/models/ocrtableobject.py +31 -0
  62. mistralai/models/prediction.py +4 -0
  63. mistralai/models/requestsource.py +7 -0
  64. mistralai/models/responseformat.py +4 -2
  65. mistralai/models/responseformats.py +0 -1
  66. mistralai/models/responsevalidationerror.py +25 -0
  67. mistralai/models/sdkerror.py +30 -14
  68. mistralai/models/sharingdelete.py +36 -5
  69. mistralai/models/sharingin.py +36 -5
  70. mistralai/models/sharingout.py +3 -3
  71. mistralai/models/toolexecutiondeltaevent.py +13 -4
  72. mistralai/models/toolexecutiondoneevent.py +13 -4
  73. mistralai/models/toolexecutionentry.py +9 -4
  74. mistralai/models/toolexecutionstartedevent.py +13 -4
  75. mistralai/models_.py +67 -212
  76. mistralai/ocr.py +33 -36
  77. mistralai/sdk.py +15 -2
  78. mistralai/transcriptions.py +21 -60
  79. mistralai/utils/__init__.py +18 -5
  80. mistralai/utils/eventstreaming.py +10 -0
  81. mistralai/utils/serializers.py +3 -2
  82. mistralai/utils/unmarshal_json_response.py +24 -0
  83. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/METADATA +89 -40
  84. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/RECORD +86 -75
  85. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/WHEEL +1 -1
  86. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info/licenses}/LICENSE +0 -0
mistralai/agents.py CHANGED
@@ -5,7 +5,8 @@ from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
6
  from mistralai.types import OptionalNullable, UNSET
7
7
  from mistralai.utils import eventstreaming, get_security_from_env
8
- from typing import Any, List, Mapping, Optional, Union
8
+ from mistralai.utils.unmarshal_json_response import unmarshal_json_response
9
+ from typing import Any, Dict, List, Mapping, Optional, Union
9
10
 
10
11
 
11
12
  class Agents(BaseSDK):
@@ -28,6 +29,7 @@ class Agents(BaseSDK):
28
29
  ]
29
30
  ] = None,
30
31
  random_seed: OptionalNullable[int] = UNSET,
32
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
31
33
  response_format: Optional[
32
34
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
33
35
  ] = None,
@@ -61,13 +63,14 @@ class Agents(BaseSDK):
61
63
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
62
64
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
63
65
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
64
- :param response_format:
66
+ :param metadata:
67
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
65
68
  :param tools:
66
69
  :param tool_choice:
67
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
68
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
70
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
71
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
69
72
  :param n: Number of completions to return for each request, input tokens are only billed once.
70
- :param prediction:
73
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
71
74
  :param parallel_tool_calls:
72
75
  :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
73
76
  :param retries: Override the default retry configuration for this method
@@ -90,6 +93,7 @@ class Agents(BaseSDK):
90
93
  stream=stream,
91
94
  stop=stop,
92
95
  random_seed=random_seed,
96
+ metadata=metadata,
93
97
  messages=utils.get_pydantic_model(
94
98
  messages, List[models.AgentsCompletionRequestMessages]
95
99
  ),
@@ -155,31 +159,20 @@ class Agents(BaseSDK):
155
159
 
156
160
  response_data: Any = None
157
161
  if utils.match_response(http_res, "200", "application/json"):
158
- return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse)
162
+ return unmarshal_json_response(models.ChatCompletionResponse, http_res)
159
163
  if utils.match_response(http_res, "422", "application/json"):
160
- response_data = utils.unmarshal_json(
161
- http_res.text, models.HTTPValidationErrorData
164
+ response_data = unmarshal_json_response(
165
+ models.HTTPValidationErrorData, http_res
162
166
  )
163
- raise models.HTTPValidationError(data=response_data)
167
+ raise models.HTTPValidationError(response_data, http_res)
164
168
  if utils.match_response(http_res, "4XX", "*"):
165
169
  http_res_text = utils.stream_to_text(http_res)
166
- raise models.SDKError(
167
- "API error occurred", http_res.status_code, http_res_text, http_res
168
- )
170
+ raise models.SDKError("API error occurred", http_res, http_res_text)
169
171
  if utils.match_response(http_res, "5XX", "*"):
170
172
  http_res_text = utils.stream_to_text(http_res)
171
- raise models.SDKError(
172
- "API error occurred", http_res.status_code, http_res_text, http_res
173
- )
173
+ raise models.SDKError("API error occurred", http_res, http_res_text)
174
174
 
175
- content_type = http_res.headers.get("Content-Type")
176
- http_res_text = utils.stream_to_text(http_res)
177
- raise models.SDKError(
178
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
179
- http_res.status_code,
180
- http_res_text,
181
- http_res,
182
- )
175
+ raise models.SDKError("Unexpected response received", http_res)
183
176
 
184
177
  async def complete_async(
185
178
  self,
@@ -198,6 +191,7 @@ class Agents(BaseSDK):
198
191
  ]
199
192
  ] = None,
200
193
  random_seed: OptionalNullable[int] = UNSET,
194
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
201
195
  response_format: Optional[
202
196
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
203
197
  ] = None,
@@ -231,13 +225,14 @@ class Agents(BaseSDK):
231
225
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
232
226
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
233
227
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
234
- :param response_format:
228
+ :param metadata:
229
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
235
230
  :param tools:
236
231
  :param tool_choice:
237
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
238
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
232
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
233
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
239
234
  :param n: Number of completions to return for each request, input tokens are only billed once.
240
- :param prediction:
235
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
241
236
  :param parallel_tool_calls:
242
237
  :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
243
238
  :param retries: Override the default retry configuration for this method
@@ -260,6 +255,7 @@ class Agents(BaseSDK):
260
255
  stream=stream,
261
256
  stop=stop,
262
257
  random_seed=random_seed,
258
+ metadata=metadata,
263
259
  messages=utils.get_pydantic_model(
264
260
  messages, List[models.AgentsCompletionRequestMessages]
265
261
  ),
@@ -325,31 +321,20 @@ class Agents(BaseSDK):
325
321
 
326
322
  response_data: Any = None
327
323
  if utils.match_response(http_res, "200", "application/json"):
328
- return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse)
324
+ return unmarshal_json_response(models.ChatCompletionResponse, http_res)
329
325
  if utils.match_response(http_res, "422", "application/json"):
330
- response_data = utils.unmarshal_json(
331
- http_res.text, models.HTTPValidationErrorData
326
+ response_data = unmarshal_json_response(
327
+ models.HTTPValidationErrorData, http_res
332
328
  )
333
- raise models.HTTPValidationError(data=response_data)
329
+ raise models.HTTPValidationError(response_data, http_res)
334
330
  if utils.match_response(http_res, "4XX", "*"):
335
331
  http_res_text = await utils.stream_to_text_async(http_res)
336
- raise models.SDKError(
337
- "API error occurred", http_res.status_code, http_res_text, http_res
338
- )
332
+ raise models.SDKError("API error occurred", http_res, http_res_text)
339
333
  if utils.match_response(http_res, "5XX", "*"):
340
334
  http_res_text = await utils.stream_to_text_async(http_res)
341
- raise models.SDKError(
342
- "API error occurred", http_res.status_code, http_res_text, http_res
343
- )
335
+ raise models.SDKError("API error occurred", http_res, http_res_text)
344
336
 
345
- content_type = http_res.headers.get("Content-Type")
346
- http_res_text = await utils.stream_to_text_async(http_res)
347
- raise models.SDKError(
348
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
349
- http_res.status_code,
350
- http_res_text,
351
- http_res,
352
- )
337
+ raise models.SDKError("Unexpected response received", http_res)
353
338
 
354
339
  def stream(
355
340
  self,
@@ -368,6 +353,7 @@ class Agents(BaseSDK):
368
353
  ]
369
354
  ] = None,
370
355
  random_seed: OptionalNullable[int] = UNSET,
356
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
371
357
  response_format: Optional[
372
358
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
373
359
  ] = None,
@@ -403,13 +389,14 @@ class Agents(BaseSDK):
403
389
  :param stream:
404
390
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
405
391
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
406
- :param response_format:
392
+ :param metadata:
393
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
407
394
  :param tools:
408
395
  :param tool_choice:
409
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
410
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
396
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
397
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
411
398
  :param n: Number of completions to return for each request, input tokens are only billed once.
412
- :param prediction:
399
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
413
400
  :param parallel_tool_calls:
414
401
  :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
415
402
  :param retries: Override the default retry configuration for this method
@@ -432,6 +419,7 @@ class Agents(BaseSDK):
432
419
  stream=stream,
433
420
  stop=stop,
434
421
  random_seed=random_seed,
422
+ metadata=metadata,
435
423
  messages=utils.get_pydantic_model(
436
424
  messages, List[models.AgentsCompletionStreamRequestMessages]
437
425
  ),
@@ -502,32 +490,23 @@ class Agents(BaseSDK):
502
490
  http_res,
503
491
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
504
492
  sentinel="[DONE]",
493
+ client_ref=self,
505
494
  )
506
495
  if utils.match_response(http_res, "422", "application/json"):
507
496
  http_res_text = utils.stream_to_text(http_res)
508
- response_data = utils.unmarshal_json(
509
- http_res_text, models.HTTPValidationErrorData
497
+ response_data = unmarshal_json_response(
498
+ models.HTTPValidationErrorData, http_res, http_res_text
510
499
  )
511
- raise models.HTTPValidationError(data=response_data)
500
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
512
501
  if utils.match_response(http_res, "4XX", "*"):
513
502
  http_res_text = utils.stream_to_text(http_res)
514
- raise models.SDKError(
515
- "API error occurred", http_res.status_code, http_res_text, http_res
516
- )
503
+ raise models.SDKError("API error occurred", http_res, http_res_text)
517
504
  if utils.match_response(http_res, "5XX", "*"):
518
505
  http_res_text = utils.stream_to_text(http_res)
519
- raise models.SDKError(
520
- "API error occurred", http_res.status_code, http_res_text, http_res
521
- )
506
+ raise models.SDKError("API error occurred", http_res, http_res_text)
522
507
 
523
- content_type = http_res.headers.get("Content-Type")
524
508
  http_res_text = utils.stream_to_text(http_res)
525
- raise models.SDKError(
526
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
527
- http_res.status_code,
528
- http_res_text,
529
- http_res,
530
- )
509
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
531
510
 
532
511
  async def stream_async(
533
512
  self,
@@ -546,6 +525,7 @@ class Agents(BaseSDK):
546
525
  ]
547
526
  ] = None,
548
527
  random_seed: OptionalNullable[int] = UNSET,
528
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
549
529
  response_format: Optional[
550
530
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
551
531
  ] = None,
@@ -581,13 +561,14 @@ class Agents(BaseSDK):
581
561
  :param stream:
582
562
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
583
563
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
584
- :param response_format:
564
+ :param metadata:
565
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
585
566
  :param tools:
586
567
  :param tool_choice:
587
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
588
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
568
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
569
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
589
570
  :param n: Number of completions to return for each request, input tokens are only billed once.
590
- :param prediction:
571
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
591
572
  :param parallel_tool_calls:
592
573
  :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
593
574
  :param retries: Override the default retry configuration for this method
@@ -610,6 +591,7 @@ class Agents(BaseSDK):
610
591
  stream=stream,
611
592
  stop=stop,
612
593
  random_seed=random_seed,
594
+ metadata=metadata,
613
595
  messages=utils.get_pydantic_model(
614
596
  messages, List[models.AgentsCompletionStreamRequestMessages]
615
597
  ),
@@ -680,29 +662,20 @@ class Agents(BaseSDK):
680
662
  http_res,
681
663
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
682
664
  sentinel="[DONE]",
665
+ client_ref=self,
683
666
  )
684
667
  if utils.match_response(http_res, "422", "application/json"):
685
668
  http_res_text = await utils.stream_to_text_async(http_res)
686
- response_data = utils.unmarshal_json(
687
- http_res_text, models.HTTPValidationErrorData
669
+ response_data = unmarshal_json_response(
670
+ models.HTTPValidationErrorData, http_res, http_res_text
688
671
  )
689
- raise models.HTTPValidationError(data=response_data)
672
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
690
673
  if utils.match_response(http_res, "4XX", "*"):
691
674
  http_res_text = await utils.stream_to_text_async(http_res)
692
- raise models.SDKError(
693
- "API error occurred", http_res.status_code, http_res_text, http_res
694
- )
675
+ raise models.SDKError("API error occurred", http_res, http_res_text)
695
676
  if utils.match_response(http_res, "5XX", "*"):
696
677
  http_res_text = await utils.stream_to_text_async(http_res)
697
- raise models.SDKError(
698
- "API error occurred", http_res.status_code, http_res_text, http_res
699
- )
678
+ raise models.SDKError("API error occurred", http_res, http_res_text)
700
679
 
701
- content_type = http_res.headers.get("Content-Type")
702
680
  http_res_text = await utils.stream_to_text_async(http_res)
703
- raise models.SDKError(
704
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
705
- http_res.status_code,
706
- http_res_text,
707
- http_res,
708
- )
681
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
mistralai/audio.py CHANGED
@@ -3,16 +3,21 @@
3
3
  from .basesdk import BaseSDK
4
4
  from .sdkconfiguration import SDKConfiguration
5
5
  from mistralai.transcriptions import Transcriptions
6
+ from typing import Optional
6
7
 
7
8
 
8
9
  class Audio(BaseSDK):
9
10
  transcriptions: Transcriptions
10
11
  r"""API for audio transcription."""
11
12
 
12
- def __init__(self, sdk_config: SDKConfiguration) -> None:
13
- BaseSDK.__init__(self, sdk_config)
13
+ def __init__(
14
+ self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
15
+ ) -> None:
16
+ BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
14
17
  self.sdk_configuration = sdk_config
15
18
  self._init_sdks()
16
19
 
17
20
  def _init_sdks(self):
18
- self.transcriptions = Transcriptions(self.sdk_configuration)
21
+ self.transcriptions = Transcriptions(
22
+ self.sdk_configuration, parent_ref=self.parent_ref
23
+ )
mistralai/basesdk.py CHANGED
@@ -15,9 +15,19 @@ from urllib.parse import parse_qs, urlparse
15
15
 
16
16
  class BaseSDK:
17
17
  sdk_configuration: SDKConfiguration
18
+ parent_ref: Optional[object] = None
19
+ """
20
+ Reference to the root SDK instance, if any. This will prevent it from
21
+ being garbage collected while there are active streams.
22
+ """
18
23
 
19
- def __init__(self, sdk_config: SDKConfiguration) -> None:
24
+ def __init__(
25
+ self,
26
+ sdk_config: SDKConfiguration,
27
+ parent_ref: Optional[object] = None,
28
+ ) -> None:
20
29
  self.sdk_configuration = sdk_config
30
+ self.parent_ref = parent_ref
21
31
 
22
32
  def _get_url(self, base_url, url_variables):
23
33
  sdk_url, sdk_variables = self.sdk_configuration.get_server_details()
@@ -244,7 +254,7 @@ class BaseSDK:
244
254
 
245
255
  if http_res is None:
246
256
  logger.debug("Raising no response SDK error")
247
- raise models.SDKError("No response received")
257
+ raise models.NoResponseError("No response received")
248
258
 
249
259
  logger.debug(
250
260
  "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
@@ -265,7 +275,7 @@ class BaseSDK:
265
275
  http_res = result
266
276
  else:
267
277
  logger.debug("Raising unexpected SDK error")
268
- raise models.SDKError("Unexpected error occurred")
278
+ raise models.SDKError("Unexpected error occurred", http_res)
269
279
 
270
280
  return http_res
271
281
 
@@ -316,7 +326,7 @@ class BaseSDK:
316
326
 
317
327
  if http_res is None:
318
328
  logger.debug("Raising no response SDK error")
319
- raise models.SDKError("No response received")
329
+ raise models.NoResponseError("No response received")
320
330
 
321
331
  logger.debug(
322
332
  "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
@@ -337,7 +347,7 @@ class BaseSDK:
337
347
  http_res = result
338
348
  else:
339
349
  logger.debug("Raising unexpected SDK error")
340
- raise models.SDKError("Unexpected error occurred")
350
+ raise models.SDKError("Unexpected error occurred", http_res)
341
351
 
342
352
  return http_res
343
353
 
mistralai/batch.py CHANGED
@@ -3,15 +3,18 @@
3
3
  from .basesdk import BaseSDK
4
4
  from .sdkconfiguration import SDKConfiguration
5
5
  from mistralai.mistral_jobs import MistralJobs
6
+ from typing import Optional
6
7
 
7
8
 
8
9
  class Batch(BaseSDK):
9
10
  jobs: MistralJobs
10
11
 
11
- def __init__(self, sdk_config: SDKConfiguration) -> None:
12
- BaseSDK.__init__(self, sdk_config)
12
+ def __init__(
13
+ self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
14
+ ) -> None:
15
+ BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
13
16
  self.sdk_configuration = sdk_config
14
17
  self._init_sdks()
15
18
 
16
19
  def _init_sdks(self):
17
- self.jobs = MistralJobs(self.sdk_configuration)
20
+ self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref)
mistralai/beta.py CHANGED
@@ -5,6 +5,7 @@ from .sdkconfiguration import SDKConfiguration
5
5
  from mistralai.conversations import Conversations
6
6
  from mistralai.libraries import Libraries
7
7
  from mistralai.mistral_agents import MistralAgents
8
+ from typing import Optional
8
9
 
9
10
 
10
11
  class Beta(BaseSDK):
@@ -15,12 +16,16 @@ class Beta(BaseSDK):
15
16
  libraries: Libraries
16
17
  r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities."""
17
18
 
18
- def __init__(self, sdk_config: SDKConfiguration) -> None:
19
- BaseSDK.__init__(self, sdk_config)
19
+ def __init__(
20
+ self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
21
+ ) -> None:
22
+ BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
20
23
  self.sdk_configuration = sdk_config
21
24
  self._init_sdks()
22
25
 
23
26
  def _init_sdks(self):
24
- self.conversations = Conversations(self.sdk_configuration)
25
- self.agents = MistralAgents(self.sdk_configuration)
26
- self.libraries = Libraries(self.sdk_configuration)
27
+ self.conversations = Conversations(
28
+ self.sdk_configuration, parent_ref=self.parent_ref
29
+ )
30
+ self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref)
31
+ self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref)