mistralai 1.5.0__py3-none-any.whl → 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mistralai/_version.py CHANGED
@@ -3,10 +3,10 @@
3
3
  import importlib.metadata
4
4
 
5
5
  __title__: str = "mistralai"
6
- __version__: str = "1.5.0"
6
+ __version__: str = "1.5.1"
7
7
  __openapi_doc_version__: str = "0.0.2"
8
8
  __gen_version__: str = "2.497.0"
9
- __user_agent__: str = "speakeasy-sdk/python 1.5.0 2.497.0 0.0.2 mistralai"
9
+ __user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai"
10
10
 
11
11
  try:
12
12
  if __package__ is not None:
mistralai/chat.py CHANGED
@@ -3,7 +3,7 @@
3
3
  from .basesdk import BaseSDK
4
4
  from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
- from mistralai.types import Nullable, OptionalNullable, UNSET
6
+ from mistralai.types import OptionalNullable, UNSET
7
7
  from mistralai.utils import eventstreaming, get_security_from_env
8
8
  from typing import Any, List, Mapping, Optional, Union
9
9
 
@@ -96,7 +96,7 @@ class Chat(BaseSDK):
96
96
  def complete(
97
97
  self,
98
98
  *,
99
- model: Nullable[str],
99
+ model: str,
100
100
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
101
101
  temperature: OptionalNullable[float] = UNSET,
102
102
  top_p: Optional[float] = None,
@@ -253,7 +253,7 @@ class Chat(BaseSDK):
253
253
  async def complete_async(
254
254
  self,
255
255
  *,
256
- model: Nullable[str],
256
+ model: str,
257
257
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
258
258
  temperature: OptionalNullable[float] = UNSET,
259
259
  top_p: Optional[float] = None,
@@ -410,7 +410,7 @@ class Chat(BaseSDK):
410
410
  def stream(
411
411
  self,
412
412
  *,
413
- model: Nullable[str],
413
+ model: str,
414
414
  messages: Union[
415
415
  List[models.ChatCompletionStreamRequestMessages],
416
416
  List[models.ChatCompletionStreamRequestMessagesTypedDict],
@@ -585,7 +585,7 @@ class Chat(BaseSDK):
585
585
  async def stream_async(
586
586
  self,
587
587
  *,
588
- model: Nullable[str],
588
+ model: str,
589
589
  messages: Union[
590
590
  List[models.ChatCompletionStreamRequestMessages],
591
591
  List[models.ChatCompletionStreamRequestMessagesTypedDict],
mistralai/classifiers.py CHANGED
@@ -3,7 +3,7 @@
3
3
  from .basesdk import BaseSDK
4
4
  from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
- from mistralai.types import Nullable, OptionalNullable, UNSET
6
+ from mistralai.types import OptionalNullable, UNSET
7
7
  from mistralai.utils import get_security_from_env
8
8
  from typing import Any, Mapping, Optional, Union
9
9
 
@@ -14,11 +14,11 @@ class Classifiers(BaseSDK):
14
14
  def moderate(
15
15
  self,
16
16
  *,
17
+ model: str,
17
18
  inputs: Union[
18
19
  models.ClassificationRequestInputs,
19
20
  models.ClassificationRequestInputsTypedDict,
20
21
  ],
21
- model: OptionalNullable[str] = UNSET,
22
22
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
23
23
  server_url: Optional[str] = None,
24
24
  timeout_ms: Optional[int] = None,
@@ -26,8 +26,8 @@ class Classifiers(BaseSDK):
26
26
  ) -> models.ClassificationResponse:
27
27
  r"""Moderations
28
28
 
29
+ :param model: ID of the model to use.
29
30
  :param inputs: Text to classify.
30
- :param model:
31
31
  :param retries: Override the default retry configuration for this method
32
32
  :param server_url: Override the default server URL for this method
33
33
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -42,8 +42,8 @@ class Classifiers(BaseSDK):
42
42
  base_url = server_url
43
43
 
44
44
  request = models.ClassificationRequest(
45
- inputs=inputs,
46
45
  model=model,
46
+ inputs=inputs,
47
47
  )
48
48
 
49
49
  req = self._build_request(
@@ -115,11 +115,11 @@ class Classifiers(BaseSDK):
115
115
  async def moderate_async(
116
116
  self,
117
117
  *,
118
+ model: str,
118
119
  inputs: Union[
119
120
  models.ClassificationRequestInputs,
120
121
  models.ClassificationRequestInputsTypedDict,
121
122
  ],
122
- model: OptionalNullable[str] = UNSET,
123
123
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
124
124
  server_url: Optional[str] = None,
125
125
  timeout_ms: Optional[int] = None,
@@ -127,8 +127,8 @@ class Classifiers(BaseSDK):
127
127
  ) -> models.ClassificationResponse:
128
128
  r"""Moderations
129
129
 
130
+ :param model: ID of the model to use.
130
131
  :param inputs: Text to classify.
131
- :param model:
132
132
  :param retries: Override the default retry configuration for this method
133
133
  :param server_url: Override the default server URL for this method
134
134
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -143,8 +143,8 @@ class Classifiers(BaseSDK):
143
143
  base_url = server_url
144
144
 
145
145
  request = models.ClassificationRequest(
146
- inputs=inputs,
147
146
  model=model,
147
+ inputs=inputs,
148
148
  )
149
149
 
150
150
  req = self._build_request_async(
@@ -216,11 +216,12 @@ class Classifiers(BaseSDK):
216
216
  def moderate_chat(
217
217
  self,
218
218
  *,
219
+ model: str,
219
220
  inputs: Union[
220
- models.ChatClassificationRequestInputs,
221
- models.ChatClassificationRequestInputsTypedDict,
221
+ models.ChatModerationRequestInputs,
222
+ models.ChatModerationRequestInputsTypedDict,
222
223
  ],
223
- model: Nullable[str],
224
+ truncate_for_context_length: Optional[bool] = False,
224
225
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
225
226
  server_url: Optional[str] = None,
226
227
  timeout_ms: Optional[int] = None,
@@ -228,8 +229,9 @@ class Classifiers(BaseSDK):
228
229
  ) -> models.ClassificationResponse:
229
230
  r"""Moderations Chat
230
231
 
231
- :param inputs: Chat to classify
232
232
  :param model:
233
+ :param inputs: Chat to classify
234
+ :param truncate_for_context_length:
233
235
  :param retries: Override the default retry configuration for this method
234
236
  :param server_url: Override the default server URL for this method
235
237
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -243,11 +245,10 @@ class Classifiers(BaseSDK):
243
245
  if server_url is not None:
244
246
  base_url = server_url
245
247
 
246
- request = models.ChatClassificationRequest(
247
- inputs=utils.get_pydantic_model(
248
- inputs, models.ChatClassificationRequestInputs
249
- ),
248
+ request = models.ChatModerationRequest(
250
249
  model=model,
250
+ inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs),
251
+ truncate_for_context_length=truncate_for_context_length,
251
252
  )
252
253
 
253
254
  req = self._build_request(
@@ -264,7 +265,7 @@ class Classifiers(BaseSDK):
264
265
  http_headers=http_headers,
265
266
  security=self.sdk_configuration.security,
266
267
  get_serialized_body=lambda: utils.serialize_request_body(
267
- request, False, False, "json", models.ChatClassificationRequest
268
+ request, False, False, "json", models.ChatModerationRequest
268
269
  ),
269
270
  timeout_ms=timeout_ms,
270
271
  )
@@ -319,11 +320,12 @@ class Classifiers(BaseSDK):
319
320
  async def moderate_chat_async(
320
321
  self,
321
322
  *,
323
+ model: str,
322
324
  inputs: Union[
323
- models.ChatClassificationRequestInputs,
324
- models.ChatClassificationRequestInputsTypedDict,
325
+ models.ChatModerationRequestInputs,
326
+ models.ChatModerationRequestInputsTypedDict,
325
327
  ],
326
- model: Nullable[str],
328
+ truncate_for_context_length: Optional[bool] = False,
327
329
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
328
330
  server_url: Optional[str] = None,
329
331
  timeout_ms: Optional[int] = None,
@@ -331,8 +333,9 @@ class Classifiers(BaseSDK):
331
333
  ) -> models.ClassificationResponse:
332
334
  r"""Moderations Chat
333
335
 
334
- :param inputs: Chat to classify
335
336
  :param model:
337
+ :param inputs: Chat to classify
338
+ :param truncate_for_context_length:
336
339
  :param retries: Override the default retry configuration for this method
337
340
  :param server_url: Override the default server URL for this method
338
341
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -346,11 +349,10 @@ class Classifiers(BaseSDK):
346
349
  if server_url is not None:
347
350
  base_url = server_url
348
351
 
349
- request = models.ChatClassificationRequest(
350
- inputs=utils.get_pydantic_model(
351
- inputs, models.ChatClassificationRequestInputs
352
- ),
352
+ request = models.ChatModerationRequest(
353
353
  model=model,
354
+ inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs),
355
+ truncate_for_context_length=truncate_for_context_length,
354
356
  )
355
357
 
356
358
  req = self._build_request_async(
@@ -367,7 +369,7 @@ class Classifiers(BaseSDK):
367
369
  http_headers=http_headers,
368
370
  security=self.sdk_configuration.security,
369
371
  get_serialized_body=lambda: utils.serialize_request_body(
370
- request, False, False, "json", models.ChatClassificationRequest
372
+ request, False, False, "json", models.ChatModerationRequest
371
373
  ),
372
374
  timeout_ms=timeout_ms,
373
375
  )
mistralai/embeddings.py CHANGED
@@ -16,7 +16,6 @@ class Embeddings(BaseSDK):
16
16
  *,
17
17
  inputs: Union[models.Inputs, models.InputsTypedDict],
18
18
  model: Optional[str] = "mistral-embed",
19
- encoding_format: OptionalNullable[str] = UNSET,
20
19
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
21
20
  server_url: Optional[str] = None,
22
21
  timeout_ms: Optional[int] = None,
@@ -28,7 +27,6 @@ class Embeddings(BaseSDK):
28
27
 
29
28
  :param inputs: Text to embed.
30
29
  :param model: ID of the model to use.
31
- :param encoding_format: The format to return the embeddings in.
32
30
  :param retries: Override the default retry configuration for this method
33
31
  :param server_url: Override the default server URL for this method
34
32
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -43,9 +41,8 @@ class Embeddings(BaseSDK):
43
41
  base_url = server_url
44
42
 
45
43
  request = models.EmbeddingRequest(
46
- inputs=inputs,
47
44
  model=model,
48
- encoding_format=encoding_format,
45
+ inputs=inputs,
49
46
  )
50
47
 
51
48
  req = self._build_request(
@@ -119,7 +116,6 @@ class Embeddings(BaseSDK):
119
116
  *,
120
117
  inputs: Union[models.Inputs, models.InputsTypedDict],
121
118
  model: Optional[str] = "mistral-embed",
122
- encoding_format: OptionalNullable[str] = UNSET,
123
119
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
124
120
  server_url: Optional[str] = None,
125
121
  timeout_ms: Optional[int] = None,
@@ -131,7 +127,6 @@ class Embeddings(BaseSDK):
131
127
 
132
128
  :param inputs: Text to embed.
133
129
  :param model: ID of the model to use.
134
- :param encoding_format: The format to return the embeddings in.
135
130
  :param retries: Override the default retry configuration for this method
136
131
  :param server_url: Override the default server URL for this method
137
132
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -146,9 +141,8 @@ class Embeddings(BaseSDK):
146
141
  base_url = server_url
147
142
 
148
143
  request = models.EmbeddingRequest(
149
- inputs=inputs,
150
144
  model=model,
151
- encoding_format=encoding_format,
145
+ inputs=inputs,
152
146
  )
153
147
 
154
148
  req = self._build_request_async(
mistralai/fim.py CHANGED
@@ -3,7 +3,7 @@
3
3
  from .basesdk import BaseSDK
4
4
  from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
- from mistralai.types import Nullable, OptionalNullable, UNSET
6
+ from mistralai.types import OptionalNullable, UNSET
7
7
  from mistralai.utils import eventstreaming, get_security_from_env
8
8
  from typing import Any, Mapping, Optional, Union
9
9
 
@@ -14,7 +14,7 @@ class Fim(BaseSDK):
14
14
  def complete(
15
15
  self,
16
16
  *,
17
- model: Nullable[str],
17
+ model: str,
18
18
  prompt: str,
19
19
  temperature: OptionalNullable[float] = UNSET,
20
20
  top_p: Optional[float] = 1,
@@ -143,7 +143,7 @@ class Fim(BaseSDK):
143
143
  async def complete_async(
144
144
  self,
145
145
  *,
146
- model: Nullable[str],
146
+ model: str,
147
147
  prompt: str,
148
148
  temperature: OptionalNullable[float] = UNSET,
149
149
  top_p: Optional[float] = 1,
@@ -272,7 +272,7 @@ class Fim(BaseSDK):
272
272
  def stream(
273
273
  self,
274
274
  *,
275
- model: Nullable[str],
275
+ model: str,
276
276
  prompt: str,
277
277
  temperature: OptionalNullable[float] = UNSET,
278
278
  top_p: Optional[float] = 1,
@@ -407,7 +407,7 @@ class Fim(BaseSDK):
407
407
  async def stream_async(
408
408
  self,
409
409
  *,
410
- model: Nullable[str],
410
+ model: str,
411
411
  prompt: str,
412
412
  temperature: OptionalNullable[float] = UNSET,
413
413
  top_p: Optional[float] = 1,
@@ -39,16 +39,6 @@ from .batchjobin import BatchJobIn, BatchJobInTypedDict
39
39
  from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict
40
40
  from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict
41
41
  from .batchjobstatus import BatchJobStatus
42
- from .chatclassificationrequest import (
43
- ChatClassificationRequest,
44
- ChatClassificationRequestInputs,
45
- ChatClassificationRequestInputsTypedDict,
46
- ChatClassificationRequestTypedDict,
47
- One,
48
- OneTypedDict,
49
- Two,
50
- TwoTypedDict,
51
- )
52
42
  from .chatcompletionchoice import (
53
43
  ChatCompletionChoice,
54
44
  ChatCompletionChoiceTypedDict,
@@ -78,6 +68,16 @@ from .chatcompletionstreamrequest import (
78
68
  ChatCompletionStreamRequestToolChoiceTypedDict,
79
69
  ChatCompletionStreamRequestTypedDict,
80
70
  )
71
+ from .chatmoderationrequest import (
72
+ ChatModerationRequest,
73
+ ChatModerationRequestInputs,
74
+ ChatModerationRequestInputsTypedDict,
75
+ ChatModerationRequestTypedDict,
76
+ One,
77
+ OneTypedDict,
78
+ Two,
79
+ TwoTypedDict,
80
+ )
81
81
  from .checkpointout import CheckpointOut, CheckpointOutTypedDict
82
82
  from .classificationobject import ClassificationObject, ClassificationObjectTypedDict
83
83
  from .classificationrequest import (
@@ -115,6 +115,7 @@ from .detailedjobout import (
115
115
  DetailedJobOutStatus,
116
116
  DetailedJobOutTypedDict,
117
117
  )
118
+ from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict
118
119
  from .embeddingrequest import (
119
120
  EmbeddingRequest,
120
121
  EmbeddingRequestTypedDict,
@@ -274,7 +275,13 @@ from .listfilesout import ListFilesOut, ListFilesOutTypedDict
274
275
  from .metricout import MetricOut, MetricOutTypedDict
275
276
  from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
276
277
  from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict
277
- from .prediction import Prediction, PredictionType, PredictionTypedDict
278
+ from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict
279
+ from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict
280
+ from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict
281
+ from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict
282
+ from .ocrresponse import OCRResponse, OCRResponseTypedDict
283
+ from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict
284
+ from .prediction import Prediction, PredictionTypedDict
278
285
  from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
279
286
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
280
287
  from .responseformats import ResponseFormats
@@ -386,10 +393,6 @@ __all__ = [
386
393
  "BatchJobsOut",
387
394
  "BatchJobsOutObject",
388
395
  "BatchJobsOutTypedDict",
389
- "ChatClassificationRequest",
390
- "ChatClassificationRequestInputs",
391
- "ChatClassificationRequestInputsTypedDict",
392
- "ChatClassificationRequestTypedDict",
393
396
  "ChatCompletionChoice",
394
397
  "ChatCompletionChoiceTypedDict",
395
398
  "ChatCompletionRequest",
@@ -406,6 +409,10 @@ __all__ = [
406
409
  "ChatCompletionStreamRequestToolChoice",
407
410
  "ChatCompletionStreamRequestToolChoiceTypedDict",
408
411
  "ChatCompletionStreamRequestTypedDict",
412
+ "ChatModerationRequest",
413
+ "ChatModerationRequestInputs",
414
+ "ChatModerationRequestInputsTypedDict",
415
+ "ChatModerationRequestTypedDict",
409
416
  "CheckpointOut",
410
417
  "CheckpointOutTypedDict",
411
418
  "ClassificationObject",
@@ -445,6 +452,10 @@ __all__ = [
445
452
  "DetailedJobOutRepositoriesTypedDict",
446
453
  "DetailedJobOutStatus",
447
454
  "DetailedJobOutTypedDict",
455
+ "Document",
456
+ "DocumentTypedDict",
457
+ "DocumentURLChunk",
458
+ "DocumentURLChunkTypedDict",
448
459
  "EmbeddingRequest",
449
460
  "EmbeddingRequestTypedDict",
450
461
  "EmbeddingResponse",
@@ -568,11 +579,22 @@ __all__ = [
568
579
  "ModelCapabilitiesTypedDict",
569
580
  "ModelList",
570
581
  "ModelListTypedDict",
582
+ "OCRImageObject",
583
+ "OCRImageObjectTypedDict",
584
+ "OCRPageDimensions",
585
+ "OCRPageDimensionsTypedDict",
586
+ "OCRPageObject",
587
+ "OCRPageObjectTypedDict",
588
+ "OCRRequest",
589
+ "OCRRequestTypedDict",
590
+ "OCRResponse",
591
+ "OCRResponseTypedDict",
592
+ "OCRUsageInfo",
593
+ "OCRUsageInfoTypedDict",
571
594
  "Object",
572
595
  "One",
573
596
  "OneTypedDict",
574
597
  "Prediction",
575
- "PredictionType",
576
598
  "PredictionTypedDict",
577
599
  "QueryParamStatus",
578
600
  "ReferenceChunk",
@@ -26,6 +26,7 @@ class AssistantMessageTypedDict(TypedDict):
26
26
  content: NotRequired[Nullable[AssistantMessageContentTypedDict]]
27
27
  tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
28
28
  prefix: NotRequired[bool]
29
+ r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
29
30
  role: NotRequired[AssistantMessageRole]
30
31
 
31
32
 
@@ -35,6 +36,7 @@ class AssistantMessage(BaseModel):
35
36
  tool_calls: OptionalNullable[List[ToolCall]] = UNSET
36
37
 
37
38
  prefix: Optional[bool] = False
39
+ r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
38
40
 
39
41
  role: Optional[AssistantMessageRole] = "assistant"
40
42
 
@@ -59,7 +59,7 @@ ChatCompletionRequestToolChoice = TypeAliasType(
59
59
 
60
60
 
61
61
  class ChatCompletionRequestTypedDict(TypedDict):
62
- model: Nullable[str]
62
+ model: str
63
63
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
64
64
  messages: List[MessagesTypedDict]
65
65
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
@@ -90,7 +90,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
90
90
 
91
91
 
92
92
  class ChatCompletionRequest(BaseModel):
93
- model: Nullable[str]
93
+ model: str
94
94
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
95
95
 
96
96
  messages: List[Messages]
@@ -152,14 +152,7 @@ class ChatCompletionRequest(BaseModel):
152
152
  "prediction",
153
153
  "safe_prompt",
154
154
  ]
155
- nullable_fields = [
156
- "model",
157
- "temperature",
158
- "max_tokens",
159
- "random_seed",
160
- "tools",
161
- "n",
162
- ]
155
+ nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
163
156
  null_default_fields = []
164
157
 
165
158
  serialized = handler(self)
@@ -63,7 +63,7 @@ ChatCompletionStreamRequestToolChoice = TypeAliasType(
63
63
 
64
64
 
65
65
  class ChatCompletionStreamRequestTypedDict(TypedDict):
66
- model: Nullable[str]
66
+ model: str
67
67
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
68
68
  messages: List[ChatCompletionStreamRequestMessagesTypedDict]
69
69
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
@@ -93,7 +93,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
93
93
 
94
94
 
95
95
  class ChatCompletionStreamRequest(BaseModel):
96
- model: Nullable[str]
96
+ model: str
97
97
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
98
98
 
99
99
  messages: List[ChatCompletionStreamRequestMessages]
@@ -154,14 +154,7 @@ class ChatCompletionStreamRequest(BaseModel):
154
154
  "prediction",
155
155
  "safe_prompt",
156
156
  ]
157
- nullable_fields = [
158
- "model",
159
- "temperature",
160
- "max_tokens",
161
- "random_seed",
162
- "tools",
163
- "n",
164
- ]
157
+ nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
165
158
  null_default_fields = []
166
159
 
167
160
  serialized = handler(self)
@@ -0,0 +1,86 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .systemmessage import SystemMessage, SystemMessageTypedDict
6
+ from .toolmessage import ToolMessage, ToolMessageTypedDict
7
+ from .usermessage import UserMessage, UserMessageTypedDict
8
+ from mistralai.types import BaseModel
9
+ from mistralai.utils import get_discriminator
10
+ import pydantic
11
+ from pydantic import Discriminator, Tag
12
+ from typing import List, Optional, Union
13
+ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
14
+
15
+
16
+ TwoTypedDict = TypeAliasType(
17
+ "TwoTypedDict",
18
+ Union[
19
+ SystemMessageTypedDict,
20
+ UserMessageTypedDict,
21
+ AssistantMessageTypedDict,
22
+ ToolMessageTypedDict,
23
+ ],
24
+ )
25
+
26
+
27
+ Two = Annotated[
28
+ Union[
29
+ Annotated[AssistantMessage, Tag("assistant")],
30
+ Annotated[SystemMessage, Tag("system")],
31
+ Annotated[ToolMessage, Tag("tool")],
32
+ Annotated[UserMessage, Tag("user")],
33
+ ],
34
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
35
+ ]
36
+
37
+
38
+ OneTypedDict = TypeAliasType(
39
+ "OneTypedDict",
40
+ Union[
41
+ SystemMessageTypedDict,
42
+ UserMessageTypedDict,
43
+ AssistantMessageTypedDict,
44
+ ToolMessageTypedDict,
45
+ ],
46
+ )
47
+
48
+
49
+ One = Annotated[
50
+ Union[
51
+ Annotated[AssistantMessage, Tag("assistant")],
52
+ Annotated[SystemMessage, Tag("system")],
53
+ Annotated[ToolMessage, Tag("tool")],
54
+ Annotated[UserMessage, Tag("user")],
55
+ ],
56
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
57
+ ]
58
+
59
+
60
+ ChatModerationRequestInputsTypedDict = TypeAliasType(
61
+ "ChatModerationRequestInputsTypedDict",
62
+ Union[List[OneTypedDict], List[List[TwoTypedDict]]],
63
+ )
64
+ r"""Chat to classify"""
65
+
66
+
67
+ ChatModerationRequestInputs = TypeAliasType(
68
+ "ChatModerationRequestInputs", Union[List[One], List[List[Two]]]
69
+ )
70
+ r"""Chat to classify"""
71
+
72
+
73
+ class ChatModerationRequestTypedDict(TypedDict):
74
+ model: str
75
+ inputs: ChatModerationRequestInputsTypedDict
76
+ r"""Chat to classify"""
77
+ truncate_for_context_length: NotRequired[bool]
78
+
79
+
80
+ class ChatModerationRequest(BaseModel):
81
+ model: str
82
+
83
+ inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")]
84
+ r"""Chat to classify"""
85
+
86
+ truncate_for_context_length: Optional[bool] = False
@@ -1,11 +1,10 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
4
+ from mistralai.types import BaseModel
5
5
  import pydantic
6
- from pydantic import model_serializer
7
6
  from typing import List, Union
8
- from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
7
+ from typing_extensions import Annotated, TypeAliasType, TypedDict
9
8
 
10
9
 
11
10
  ClassificationRequestInputsTypedDict = TypeAliasType(
@@ -21,43 +20,15 @@ r"""Text to classify."""
21
20
 
22
21
 
23
22
  class ClassificationRequestTypedDict(TypedDict):
23
+ model: str
24
+ r"""ID of the model to use."""
24
25
  inputs: ClassificationRequestInputsTypedDict
25
26
  r"""Text to classify."""
26
- model: NotRequired[Nullable[str]]
27
27
 
28
28
 
29
29
  class ClassificationRequest(BaseModel):
30
+ model: str
31
+ r"""ID of the model to use."""
32
+
30
33
  inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")]
31
34
  r"""Text to classify."""
32
-
33
- model: OptionalNullable[str] = UNSET
34
-
35
- @model_serializer(mode="wrap")
36
- def serialize_model(self, handler):
37
- optional_fields = ["model"]
38
- nullable_fields = ["model"]
39
- null_default_fields = []
40
-
41
- serialized = handler(self)
42
-
43
- m = {}
44
-
45
- for n, f in self.model_fields.items():
46
- k = f.alias or n
47
- val = serialized.get(k)
48
- serialized.pop(k, None)
49
-
50
- optional_nullable = k in optional_fields and k in nullable_fields
51
- is_set = (
52
- self.__pydantic_fields_set__.intersection({n})
53
- or k in null_default_fields
54
- ) # pylint: disable=no-member
55
-
56
- if val is not None and val != UNSET_SENTINEL:
57
- m[k] = val
58
- elif val != UNSET_SENTINEL and (
59
- not k in optional_fields or (optional_nullable and is_set)
60
- ):
61
- m[k] = val
62
-
63
- return m