mistralai 1.2.4__py3-none-any.whl → 1.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. mistralai/_hooks/custom_user_agent.py +7 -1
  2. mistralai/_version.py +1 -1
  3. mistralai/agents.py +17 -5
  4. mistralai/basesdk.py +25 -9
  5. mistralai/chat.py +25 -13
  6. mistralai/classifiers.py +17 -5
  7. mistralai/embeddings.py +9 -3
  8. mistralai/files.py +49 -13
  9. mistralai/fim.py +17 -5
  10. mistralai/jobs.py +41 -11
  11. mistralai/mistral_jobs.py +33 -9
  12. mistralai/models/chatcompletionrequest.py +2 -2
  13. mistralai/models/chatcompletionstreamrequest.py +2 -2
  14. mistralai/models/files_api_routes_upload_fileop.py +2 -6
  15. mistralai/models_.py +49 -13
  16. mistralai/sdkconfiguration.py +3 -3
  17. mistralai/utils/forms.py +4 -10
  18. mistralai/utils/requestbodies.py +1 -1
  19. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/METADATA +117 -86
  20. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/RECORD +41 -41
  21. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/WHEEL +1 -1
  22. mistralai_azure/_hooks/custom_user_agent.py +8 -2
  23. mistralai_azure/_version.py +1 -1
  24. mistralai_azure/basesdk.py +25 -9
  25. mistralai_azure/chat.py +25 -13
  26. mistralai_azure/models/chatcompletionrequest.py +2 -2
  27. mistralai_azure/models/chatcompletionstreamrequest.py +2 -2
  28. mistralai_azure/sdkconfiguration.py +3 -3
  29. mistralai_azure/utils/forms.py +4 -10
  30. mistralai_azure/utils/requestbodies.py +1 -1
  31. mistralai_gcp/_hooks/custom_user_agent.py +7 -1
  32. mistralai_gcp/_version.py +1 -1
  33. mistralai_gcp/basesdk.py +25 -9
  34. mistralai_gcp/chat.py +21 -9
  35. mistralai_gcp/fim.py +17 -5
  36. mistralai_gcp/models/chatcompletionrequest.py +1 -1
  37. mistralai_gcp/models/chatcompletionstreamrequest.py +1 -1
  38. mistralai_gcp/sdkconfiguration.py +3 -3
  39. mistralai_gcp/utils/forms.py +4 -10
  40. mistralai_gcp/utils/requestbodies.py +1 -1
  41. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/LICENSE +0 -0
mistralai_gcp/chat.py CHANGED
@@ -5,7 +5,7 @@ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
6
  from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
7
7
  from mistralai_gcp.utils import eventstreaming
8
- from typing import Any, List, Optional, Union
8
+ from typing import Any, List, Mapping, Optional, Union
9
9
 
10
10
 
11
11
  class Chat(BaseSDK):
@@ -17,7 +17,7 @@ class Chat(BaseSDK):
17
17
  model: Nullable[str],
18
18
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
19
19
  temperature: OptionalNullable[float] = UNSET,
20
- top_p: Optional[float] = 1,
20
+ top_p: Optional[float] = None,
21
21
  max_tokens: OptionalNullable[int] = UNSET,
22
22
  stream: Optional[bool] = True,
23
23
  stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
@@ -40,6 +40,7 @@ class Chat(BaseSDK):
40
40
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
41
41
  server_url: Optional[str] = None,
42
42
  timeout_ms: Optional[int] = None,
43
+ http_headers: Optional[Mapping[str, str]] = None,
43
44
  ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
44
45
  r"""Stream chat completion
45
46
 
@@ -62,6 +63,7 @@ class Chat(BaseSDK):
62
63
  :param retries: Override the default retry configuration for this method
63
64
  :param server_url: Override the default server URL for this method
64
65
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
66
+ :param http_headers: Additional headers to set or replace on requests.
65
67
  """
66
68
  base_url = None
67
69
  url_variables = None
@@ -92,7 +94,7 @@ class Chat(BaseSDK):
92
94
  n=n,
93
95
  )
94
96
 
95
- req = self.build_request(
97
+ req = self._build_request(
96
98
  method="POST",
97
99
  path="/streamRawPredict",
98
100
  base_url=base_url,
@@ -103,6 +105,7 @@ class Chat(BaseSDK):
103
105
  request_has_query_params=True,
104
106
  user_agent_header="user-agent",
105
107
  accept_header_value="text/event-stream",
108
+ http_headers=http_headers,
106
109
  security=self.sdk_configuration.security,
107
110
  get_serialized_body=lambda: utils.serialize_request_body(
108
111
  request, False, False, "json", models.ChatCompletionStreamRequest
@@ -162,7 +165,7 @@ class Chat(BaseSDK):
162
165
  model: Nullable[str],
163
166
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
164
167
  temperature: OptionalNullable[float] = UNSET,
165
- top_p: Optional[float] = 1,
168
+ top_p: Optional[float] = None,
166
169
  max_tokens: OptionalNullable[int] = UNSET,
167
170
  stream: Optional[bool] = True,
168
171
  stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
@@ -185,6 +188,7 @@ class Chat(BaseSDK):
185
188
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
186
189
  server_url: Optional[str] = None,
187
190
  timeout_ms: Optional[int] = None,
191
+ http_headers: Optional[Mapping[str, str]] = None,
188
192
  ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
189
193
  r"""Stream chat completion
190
194
 
@@ -207,6 +211,7 @@ class Chat(BaseSDK):
207
211
  :param retries: Override the default retry configuration for this method
208
212
  :param server_url: Override the default server URL for this method
209
213
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
214
+ :param http_headers: Additional headers to set or replace on requests.
210
215
  """
211
216
  base_url = None
212
217
  url_variables = None
@@ -237,7 +242,7 @@ class Chat(BaseSDK):
237
242
  n=n,
238
243
  )
239
244
 
240
- req = self.build_request_async(
245
+ req = self._build_request_async(
241
246
  method="POST",
242
247
  path="/streamRawPredict",
243
248
  base_url=base_url,
@@ -248,6 +253,7 @@ class Chat(BaseSDK):
248
253
  request_has_query_params=True,
249
254
  user_agent_header="user-agent",
250
255
  accept_header_value="text/event-stream",
256
+ http_headers=http_headers,
251
257
  security=self.sdk_configuration.security,
252
258
  get_serialized_body=lambda: utils.serialize_request_body(
253
259
  request, False, False, "json", models.ChatCompletionStreamRequest
@@ -310,7 +316,7 @@ class Chat(BaseSDK):
310
316
  List[models.ChatCompletionRequestMessagesTypedDict],
311
317
  ],
312
318
  temperature: OptionalNullable[float] = UNSET,
313
- top_p: Optional[float] = 1,
319
+ top_p: Optional[float] = None,
314
320
  max_tokens: OptionalNullable[int] = UNSET,
315
321
  stream: Optional[bool] = False,
316
322
  stop: Optional[
@@ -338,6 +344,7 @@ class Chat(BaseSDK):
338
344
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
339
345
  server_url: Optional[str] = None,
340
346
  timeout_ms: Optional[int] = None,
347
+ http_headers: Optional[Mapping[str, str]] = None,
341
348
  ) -> Optional[models.ChatCompletionResponse]:
342
349
  r"""Chat Completion
343
350
 
@@ -358,6 +365,7 @@ class Chat(BaseSDK):
358
365
  :param retries: Override the default retry configuration for this method
359
366
  :param server_url: Override the default server URL for this method
360
367
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
368
+ :param http_headers: Additional headers to set or replace on requests.
361
369
  """
362
370
  base_url = None
363
371
  url_variables = None
@@ -390,7 +398,7 @@ class Chat(BaseSDK):
390
398
  n=n,
391
399
  )
392
400
 
393
- req = self.build_request(
401
+ req = self._build_request(
394
402
  method="POST",
395
403
  path="/rawPredict",
396
404
  base_url=base_url,
@@ -401,6 +409,7 @@ class Chat(BaseSDK):
401
409
  request_has_query_params=True,
402
410
  user_agent_header="user-agent",
403
411
  accept_header_value="application/json",
412
+ http_headers=http_headers,
404
413
  security=self.sdk_configuration.security,
405
414
  get_serialized_body=lambda: utils.serialize_request_body(
406
415
  request, False, False, "json", models.ChatCompletionRequest
@@ -459,7 +468,7 @@ class Chat(BaseSDK):
459
468
  List[models.ChatCompletionRequestMessagesTypedDict],
460
469
  ],
461
470
  temperature: OptionalNullable[float] = UNSET,
462
- top_p: Optional[float] = 1,
471
+ top_p: Optional[float] = None,
463
472
  max_tokens: OptionalNullable[int] = UNSET,
464
473
  stream: Optional[bool] = False,
465
474
  stop: Optional[
@@ -487,6 +496,7 @@ class Chat(BaseSDK):
487
496
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
488
497
  server_url: Optional[str] = None,
489
498
  timeout_ms: Optional[int] = None,
499
+ http_headers: Optional[Mapping[str, str]] = None,
490
500
  ) -> Optional[models.ChatCompletionResponse]:
491
501
  r"""Chat Completion
492
502
 
@@ -507,6 +517,7 @@ class Chat(BaseSDK):
507
517
  :param retries: Override the default retry configuration for this method
508
518
  :param server_url: Override the default server URL for this method
509
519
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
520
+ :param http_headers: Additional headers to set or replace on requests.
510
521
  """
511
522
  base_url = None
512
523
  url_variables = None
@@ -539,7 +550,7 @@ class Chat(BaseSDK):
539
550
  n=n,
540
551
  )
541
552
 
542
- req = self.build_request_async(
553
+ req = self._build_request_async(
543
554
  method="POST",
544
555
  path="/rawPredict",
545
556
  base_url=base_url,
@@ -550,6 +561,7 @@ class Chat(BaseSDK):
550
561
  request_has_query_params=True,
551
562
  user_agent_header="user-agent",
552
563
  accept_header_value="application/json",
564
+ http_headers=http_headers,
553
565
  security=self.sdk_configuration.security,
554
566
  get_serialized_body=lambda: utils.serialize_request_body(
555
567
  request, False, False, "json", models.ChatCompletionRequest
mistralai_gcp/fim.py CHANGED
@@ -5,7 +5,7 @@ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
6
  from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
7
7
  from mistralai_gcp.utils import eventstreaming
8
- from typing import Any, Optional, Union
8
+ from typing import Any, Mapping, Optional, Union
9
9
 
10
10
 
11
11
  class Fim(BaseSDK):
@@ -32,6 +32,7 @@ class Fim(BaseSDK):
32
32
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
33
33
  server_url: Optional[str] = None,
34
34
  timeout_ms: Optional[int] = None,
35
+ http_headers: Optional[Mapping[str, str]] = None,
35
36
  ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
36
37
  r"""Stream fim completion
37
38
 
@@ -50,6 +51,7 @@ class Fim(BaseSDK):
50
51
  :param retries: Override the default retry configuration for this method
51
52
  :param server_url: Override the default server URL for this method
52
53
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
54
+ :param http_headers: Additional headers to set or replace on requests.
53
55
  """
54
56
  base_url = None
55
57
  url_variables = None
@@ -72,7 +74,7 @@ class Fim(BaseSDK):
72
74
  min_tokens=min_tokens,
73
75
  )
74
76
 
75
- req = self.build_request(
77
+ req = self._build_request(
76
78
  method="POST",
77
79
  path="/streamRawPredict#fim",
78
80
  base_url=base_url,
@@ -83,6 +85,7 @@ class Fim(BaseSDK):
83
85
  request_has_query_params=True,
84
86
  user_agent_header="user-agent",
85
87
  accept_header_value="text/event-stream",
88
+ http_headers=http_headers,
86
89
  security=self.sdk_configuration.security,
87
90
  get_serialized_body=lambda: utils.serialize_request_body(
88
91
  request, False, False, "json", models.FIMCompletionStreamRequest
@@ -157,6 +160,7 @@ class Fim(BaseSDK):
157
160
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
158
161
  server_url: Optional[str] = None,
159
162
  timeout_ms: Optional[int] = None,
163
+ http_headers: Optional[Mapping[str, str]] = None,
160
164
  ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
161
165
  r"""Stream fim completion
162
166
 
@@ -175,6 +179,7 @@ class Fim(BaseSDK):
175
179
  :param retries: Override the default retry configuration for this method
176
180
  :param server_url: Override the default server URL for this method
177
181
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
182
+ :param http_headers: Additional headers to set or replace on requests.
178
183
  """
179
184
  base_url = None
180
185
  url_variables = None
@@ -197,7 +202,7 @@ class Fim(BaseSDK):
197
202
  min_tokens=min_tokens,
198
203
  )
199
204
 
200
- req = self.build_request_async(
205
+ req = self._build_request_async(
201
206
  method="POST",
202
207
  path="/streamRawPredict#fim",
203
208
  base_url=base_url,
@@ -208,6 +213,7 @@ class Fim(BaseSDK):
208
213
  request_has_query_params=True,
209
214
  user_agent_header="user-agent",
210
215
  accept_header_value="text/event-stream",
216
+ http_headers=http_headers,
211
217
  security=self.sdk_configuration.security,
212
218
  get_serialized_body=lambda: utils.serialize_request_body(
213
219
  request, False, False, "json", models.FIMCompletionStreamRequest
@@ -282,6 +288,7 @@ class Fim(BaseSDK):
282
288
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
283
289
  server_url: Optional[str] = None,
284
290
  timeout_ms: Optional[int] = None,
291
+ http_headers: Optional[Mapping[str, str]] = None,
285
292
  ) -> Optional[models.FIMCompletionResponse]:
286
293
  r"""Fim Completion
287
294
 
@@ -300,6 +307,7 @@ class Fim(BaseSDK):
300
307
  :param retries: Override the default retry configuration for this method
301
308
  :param server_url: Override the default server URL for this method
302
309
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
310
+ :param http_headers: Additional headers to set or replace on requests.
303
311
  """
304
312
  base_url = None
305
313
  url_variables = None
@@ -322,7 +330,7 @@ class Fim(BaseSDK):
322
330
  min_tokens=min_tokens,
323
331
  )
324
332
 
325
- req = self.build_request(
333
+ req = self._build_request(
326
334
  method="POST",
327
335
  path="/rawPredict#fim",
328
336
  base_url=base_url,
@@ -333,6 +341,7 @@ class Fim(BaseSDK):
333
341
  request_has_query_params=True,
334
342
  user_agent_header="user-agent",
335
343
  accept_header_value="application/json",
344
+ http_headers=http_headers,
336
345
  security=self.sdk_configuration.security,
337
346
  get_serialized_body=lambda: utils.serialize_request_body(
338
347
  request, False, False, "json", models.FIMCompletionRequest
@@ -403,6 +412,7 @@ class Fim(BaseSDK):
403
412
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
404
413
  server_url: Optional[str] = None,
405
414
  timeout_ms: Optional[int] = None,
415
+ http_headers: Optional[Mapping[str, str]] = None,
406
416
  ) -> Optional[models.FIMCompletionResponse]:
407
417
  r"""Fim Completion
408
418
 
@@ -421,6 +431,7 @@ class Fim(BaseSDK):
421
431
  :param retries: Override the default retry configuration for this method
422
432
  :param server_url: Override the default server URL for this method
423
433
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
434
+ :param http_headers: Additional headers to set or replace on requests.
424
435
  """
425
436
  base_url = None
426
437
  url_variables = None
@@ -443,7 +454,7 @@ class Fim(BaseSDK):
443
454
  min_tokens=min_tokens,
444
455
  )
445
456
 
446
- req = self.build_request_async(
457
+ req = self._build_request_async(
447
458
  method="POST",
448
459
  path="/rawPredict#fim",
449
460
  base_url=base_url,
@@ -454,6 +465,7 @@ class Fim(BaseSDK):
454
465
  request_has_query_params=True,
455
466
  user_agent_header="user-agent",
456
467
  accept_header_value="application/json",
468
+ http_headers=http_headers,
457
469
  security=self.sdk_configuration.security,
458
470
  get_serialized_body=lambda: utils.serialize_request_body(
459
471
  request, False, False, "json", models.FIMCompletionRequest
@@ -105,7 +105,7 @@ class ChatCompletionRequest(BaseModel):
105
105
  temperature: OptionalNullable[float] = UNSET
106
106
  r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
107
107
 
108
- top_p: Optional[float] = 1
108
+ top_p: Optional[float] = None
109
109
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
110
110
 
111
111
  max_tokens: OptionalNullable[int] = UNSET
@@ -100,7 +100,7 @@ class ChatCompletionStreamRequest(BaseModel):
100
100
  temperature: OptionalNullable[float] = UNSET
101
101
  r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
102
102
 
103
- top_p: Optional[float] = 1
103
+ top_p: Optional[float] = None
104
104
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
105
105
 
106
106
  max_tokens: OptionalNullable[int] = UNSET
@@ -28,9 +28,9 @@ class SDKConfiguration:
28
28
  server: Optional[str] = ""
29
29
  language: str = "python"
30
30
  openapi_doc_version: str = "0.0.2"
31
- sdk_version: str = "1.2.3"
32
- gen_version: str = "2.470.1"
33
- user_agent: str = "speakeasy-sdk/python 1.2.3 2.470.1 0.0.2 mistralai-gcp"
31
+ sdk_version: str = "1.2.6"
32
+ gen_version: str = "2.486.1"
33
+ user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai-gcp"
34
34
  retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
35
35
  timeout_ms: Optional[int] = None
36
36
 
@@ -109,13 +109,12 @@ def serialize_multipart_form(
109
109
  if not field_metadata:
110
110
  continue
111
111
 
112
- f_name = field.alias if field.alias is not None else name
112
+ f_name = field.alias if field.alias else name
113
113
 
114
114
  if field_metadata.file:
115
115
  file_fields: Dict[str, FieldInfo] = val.__class__.model_fields
116
116
 
117
117
  file_name = ""
118
- field_name = ""
119
118
  content = None
120
119
  content_type = None
121
120
 
@@ -131,20 +130,15 @@ def serialize_multipart_form(
131
130
  elif file_field_name == "content_type":
132
131
  content_type = getattr(val, file_field_name, None)
133
132
  else:
134
- field_name = (
135
- file_field.alias
136
- if file_field.alias is not None
137
- else file_field_name
138
- )
139
133
  file_name = getattr(val, file_field_name)
140
134
 
141
- if field_name == "" or file_name == "" or content is None:
135
+ if file_name == "" or content is None:
142
136
  raise ValueError("invalid multipart/form-data file")
143
137
 
144
138
  if content_type is not None:
145
- files[field_name] = (file_name, content, content_type)
139
+ files[f_name] = (file_name, content, content_type)
146
140
  else:
147
- files[field_name] = (file_name, content)
141
+ files[f_name] = (file_name, content)
148
142
  elif field_metadata.json:
149
143
  files[f_name] = (
150
144
  None,
@@ -23,7 +23,7 @@ SERIALIZATION_METHOD_TO_CONTENT_TYPE = {
23
23
 
24
24
  @dataclass
25
25
  class SerializedRequestBody:
26
- media_type: str
26
+ media_type: Optional[str] = None
27
27
  content: Optional[Any] = None
28
28
  data: Optional[Any] = None
29
29
  files: Optional[Any] = None