mistralai 1.10.0__py3-none-any.whl → 1.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. mistralai/_hooks/tracing.py +28 -3
  2. mistralai/_version.py +2 -2
  3. mistralai/classifiers.py +13 -1
  4. mistralai/embeddings.py +7 -1
  5. mistralai/extra/README.md +1 -1
  6. mistralai/extra/mcp/auth.py +10 -11
  7. mistralai/extra/mcp/base.py +17 -16
  8. mistralai/extra/mcp/sse.py +13 -15
  9. mistralai/extra/mcp/stdio.py +5 -6
  10. mistralai/extra/observability/otel.py +47 -68
  11. mistralai/extra/run/context.py +33 -43
  12. mistralai/extra/run/result.py +29 -30
  13. mistralai/extra/run/tools.py +8 -9
  14. mistralai/extra/struct_chat.py +15 -8
  15. mistralai/extra/utils/response_format.py +5 -3
  16. mistralai/mistral_jobs.py +31 -5
  17. mistralai/models/__init__.py +30 -1
  18. mistralai/models/agents_api_v1_agents_listop.py +1 -1
  19. mistralai/models/agents_api_v1_conversations_listop.py +1 -1
  20. mistralai/models/audioencoding.py +13 -0
  21. mistralai/models/audioformat.py +19 -0
  22. mistralai/models/batchjobin.py +17 -6
  23. mistralai/models/batchjobout.py +5 -0
  24. mistralai/models/batchrequest.py +48 -0
  25. mistralai/models/classificationrequest.py +37 -3
  26. mistralai/models/embeddingrequest.py +11 -3
  27. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
  28. mistralai/models/toolfilechunk.py +11 -4
  29. mistralai/models/toolreferencechunk.py +13 -4
  30. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/METADATA +142 -150
  31. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/RECORD +122 -105
  32. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
  33. mistralai_azure/_version.py +3 -3
  34. mistralai_azure/basesdk.py +15 -5
  35. mistralai_azure/chat.py +59 -98
  36. mistralai_azure/models/__init__.py +50 -3
  37. mistralai_azure/models/chatcompletionrequest.py +16 -4
  38. mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
  39. mistralai_azure/models/httpvalidationerror.py +11 -6
  40. mistralai_azure/models/mistralazureerror.py +26 -0
  41. mistralai_azure/models/no_response_error.py +13 -0
  42. mistralai_azure/models/prediction.py +4 -0
  43. mistralai_azure/models/responseformat.py +4 -2
  44. mistralai_azure/models/responseformats.py +0 -1
  45. mistralai_azure/models/responsevalidationerror.py +25 -0
  46. mistralai_azure/models/sdkerror.py +30 -14
  47. mistralai_azure/models/systemmessage.py +7 -3
  48. mistralai_azure/models/systemmessagecontentchunks.py +21 -0
  49. mistralai_azure/models/thinkchunk.py +35 -0
  50. mistralai_azure/ocr.py +15 -36
  51. mistralai_azure/utils/__init__.py +18 -5
  52. mistralai_azure/utils/eventstreaming.py +10 -0
  53. mistralai_azure/utils/serializers.py +3 -2
  54. mistralai_azure/utils/unmarshal_json_response.py +24 -0
  55. mistralai_gcp/_hooks/types.py +7 -0
  56. mistralai_gcp/_version.py +4 -4
  57. mistralai_gcp/basesdk.py +27 -25
  58. mistralai_gcp/chat.py +75 -98
  59. mistralai_gcp/fim.py +39 -74
  60. mistralai_gcp/httpclient.py +6 -16
  61. mistralai_gcp/models/__init__.py +321 -116
  62. mistralai_gcp/models/assistantmessage.py +1 -1
  63. mistralai_gcp/models/chatcompletionrequest.py +36 -7
  64. mistralai_gcp/models/chatcompletionresponse.py +6 -6
  65. mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
  66. mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
  67. mistralai_gcp/models/deltamessage.py +1 -1
  68. mistralai_gcp/models/fimcompletionrequest.py +3 -9
  69. mistralai_gcp/models/fimcompletionresponse.py +6 -6
  70. mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
  71. mistralai_gcp/models/httpvalidationerror.py +11 -6
  72. mistralai_gcp/models/imageurl.py +1 -1
  73. mistralai_gcp/models/jsonschema.py +1 -1
  74. mistralai_gcp/models/mistralgcperror.py +26 -0
  75. mistralai_gcp/models/mistralpromptmode.py +8 -0
  76. mistralai_gcp/models/no_response_error.py +13 -0
  77. mistralai_gcp/models/prediction.py +4 -0
  78. mistralai_gcp/models/responseformat.py +5 -3
  79. mistralai_gcp/models/responseformats.py +0 -1
  80. mistralai_gcp/models/responsevalidationerror.py +25 -0
  81. mistralai_gcp/models/sdkerror.py +30 -14
  82. mistralai_gcp/models/systemmessage.py +7 -3
  83. mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
  84. mistralai_gcp/models/thinkchunk.py +35 -0
  85. mistralai_gcp/models/toolmessage.py +1 -1
  86. mistralai_gcp/models/usageinfo.py +71 -8
  87. mistralai_gcp/models/usermessage.py +1 -1
  88. mistralai_gcp/sdk.py +12 -10
  89. mistralai_gcp/sdkconfiguration.py +0 -7
  90. mistralai_gcp/types/basemodel.py +3 -3
  91. mistralai_gcp/utils/__init__.py +143 -45
  92. mistralai_gcp/utils/datetimes.py +23 -0
  93. mistralai_gcp/utils/enums.py +67 -27
  94. mistralai_gcp/utils/eventstreaming.py +10 -0
  95. mistralai_gcp/utils/forms.py +49 -28
  96. mistralai_gcp/utils/serializers.py +33 -3
  97. mistralai_gcp/utils/unmarshal_json_response.py +24 -0
  98. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .mistralpromptmode import MistralPromptMode
5
6
  from .prediction import Prediction, PredictionTypedDict
6
7
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
8
  from .systemmessage import SystemMessage, SystemMessageTypedDict
@@ -17,8 +18,9 @@ from mistralai_gcp.types import (
17
18
  UNSET,
18
19
  UNSET_SENTINEL,
19
20
  )
20
- from mistralai_gcp.utils import get_discriminator
21
+ from mistralai_gcp.utils import get_discriminator, validate_open_enum
21
22
  from pydantic import Discriminator, Tag, model_serializer
23
+ from pydantic.functional_validators import PlainValidator
22
24
  from typing import List, Optional, Union
23
25
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
24
26
 
@@ -57,11 +59,13 @@ ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType(
57
59
  "ChatCompletionStreamRequestToolChoiceTypedDict",
58
60
  Union[ToolChoiceTypedDict, ToolChoiceEnum],
59
61
  )
62
+ r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
60
63
 
61
64
 
62
65
  ChatCompletionStreamRequestToolChoice = TypeAliasType(
63
66
  "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
64
67
  )
68
+ r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
65
69
 
66
70
 
67
71
  class ChatCompletionStreamRequestTypedDict(TypedDict):
@@ -81,16 +85,23 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
81
85
  random_seed: NotRequired[Nullable[int]]
82
86
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
83
87
  response_format: NotRequired[ResponseFormatTypedDict]
88
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
84
89
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
90
+ r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
85
91
  tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
92
+ r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
86
93
  presence_penalty: NotRequired[float]
87
- r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
94
+ r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
88
95
  frequency_penalty: NotRequired[float]
89
- r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
96
+ r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
90
97
  n: NotRequired[Nullable[int]]
91
98
  r"""Number of completions to return for each request, input tokens are only billed once."""
92
99
  prediction: NotRequired[PredictionTypedDict]
100
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
93
101
  parallel_tool_calls: NotRequired[bool]
102
+ r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
103
+ prompt_mode: NotRequired[Nullable[MistralPromptMode]]
104
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
94
105
 
95
106
 
96
107
  class ChatCompletionStreamRequest(BaseModel):
@@ -118,23 +129,33 @@ class ChatCompletionStreamRequest(BaseModel):
118
129
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
119
130
 
120
131
  response_format: Optional[ResponseFormat] = None
132
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
121
133
 
122
134
  tools: OptionalNullable[List[Tool]] = UNSET
135
+ r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for."""
123
136
 
124
137
  tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
138
+ r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool."""
125
139
 
126
140
  presence_penalty: Optional[float] = None
127
- r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
141
+ r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
128
142
 
129
143
  frequency_penalty: Optional[float] = None
130
- r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
144
+ r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
131
145
 
132
146
  n: OptionalNullable[int] = UNSET
133
147
  r"""Number of completions to return for each request, input tokens are only billed once."""
134
148
 
135
149
  prediction: Optional[Prediction] = None
150
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
136
151
 
137
152
  parallel_tool_calls: Optional[bool] = None
153
+ r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
154
+
155
+ prompt_mode: Annotated[
156
+ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
157
+ ] = UNSET
158
+ r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
138
159
 
139
160
  @model_serializer(mode="wrap")
140
161
  def serialize_model(self, handler):
@@ -153,15 +174,23 @@ class ChatCompletionStreamRequest(BaseModel):
153
174
  "n",
154
175
  "prediction",
155
176
  "parallel_tool_calls",
177
+ "prompt_mode",
178
+ ]
179
+ nullable_fields = [
180
+ "temperature",
181
+ "max_tokens",
182
+ "random_seed",
183
+ "tools",
184
+ "n",
185
+ "prompt_mode",
156
186
  ]
157
- nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
158
187
  null_default_fields = []
159
188
 
160
189
  serialized = handler(self)
161
190
 
162
191
  m = {}
163
192
 
164
- for n, f in self.model_fields.items():
193
+ for n, f in type(self).model_fields.items():
165
194
  k = f.alias or n
166
195
  val = serialized.get(k)
167
196
  serialized.pop(k, None)
@@ -38,7 +38,7 @@ class CompletionResponseStreamChoice(BaseModel):
38
38
 
39
39
  m = {}
40
40
 
41
- for n, f in self.model_fields.items():
41
+ for n, f in type(self).model_fields.items():
42
42
  k = f.alias or n
43
43
  val = serialized.get(k)
44
44
  serialized.pop(k, None)
@@ -46,7 +46,7 @@ class DeltaMessage(BaseModel):
46
46
 
47
47
  m = {}
48
48
 
49
- for n, f in self.model_fields.items():
49
+ for n, f in type(self).model_fields.items():
50
50
  k = f.alias or n
51
51
  val = serialized.get(k)
52
52
  serialized.pop(k, None)
@@ -27,10 +27,7 @@ r"""Stop generation if this token is detected. Or if one of these tokens is dete
27
27
 
28
28
  class FIMCompletionRequestTypedDict(TypedDict):
29
29
  model: str
30
- r"""ID of the model to use. Only compatible for now with:
31
- - `codestral-2405`
32
- - `codestral-latest`
33
- """
30
+ r"""ID of the model with FIM to use."""
34
31
  prompt: str
35
32
  r"""The text/code to complete."""
36
33
  temperature: NotRequired[Nullable[float]]
@@ -53,10 +50,7 @@ class FIMCompletionRequestTypedDict(TypedDict):
53
50
 
54
51
  class FIMCompletionRequest(BaseModel):
55
52
  model: str
56
- r"""ID of the model to use. Only compatible for now with:
57
- - `codestral-2405`
58
- - `codestral-latest`
59
- """
53
+ r"""ID of the model with FIM to use."""
60
54
 
61
55
  prompt: str
62
56
  r"""The text/code to complete."""
@@ -110,7 +104,7 @@ class FIMCompletionRequest(BaseModel):
110
104
 
111
105
  m = {}
112
106
 
113
- for n, f in self.model_fields.items():
107
+ for n, f in type(self).model_fields.items():
114
108
  k = f.alias or n
115
109
  val = serialized.get(k)
116
110
  serialized.pop(k, None)
@@ -4,8 +4,8 @@ from __future__ import annotations
4
4
  from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
5
5
  from .usageinfo import UsageInfo, UsageInfoTypedDict
6
6
  from mistralai_gcp.types import BaseModel
7
- from typing import List, Optional
8
- from typing_extensions import NotRequired, TypedDict
7
+ from typing import List
8
+ from typing_extensions import TypedDict
9
9
 
10
10
 
11
11
  class FIMCompletionResponseTypedDict(TypedDict):
@@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict):
13
13
  object: str
14
14
  model: str
15
15
  usage: UsageInfoTypedDict
16
- created: NotRequired[int]
17
- choices: NotRequired[List[ChatCompletionChoiceTypedDict]]
16
+ created: int
17
+ choices: List[ChatCompletionChoiceTypedDict]
18
18
 
19
19
 
20
20
  class FIMCompletionResponse(BaseModel):
@@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel):
26
26
 
27
27
  usage: UsageInfo
28
28
 
29
- created: Optional[int] = None
29
+ created: int
30
30
 
31
- choices: Optional[List[ChatCompletionChoice]] = None
31
+ choices: List[ChatCompletionChoice]
@@ -27,10 +27,7 @@ r"""Stop generation if this token is detected. Or if one of these tokens is dete
27
27
 
28
28
  class FIMCompletionStreamRequestTypedDict(TypedDict):
29
29
  model: str
30
- r"""ID of the model to use. Only compatible for now with:
31
- - `codestral-2405`
32
- - `codestral-latest`
33
- """
30
+ r"""ID of the model with FIM to use."""
34
31
  prompt: str
35
32
  r"""The text/code to complete."""
36
33
  temperature: NotRequired[Nullable[float]]
@@ -52,10 +49,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
52
49
 
53
50
  class FIMCompletionStreamRequest(BaseModel):
54
51
  model: str
55
- r"""ID of the model to use. Only compatible for now with:
56
- - `codestral-2405`
57
- - `codestral-latest`
58
- """
52
+ r"""ID of the model with FIM to use."""
59
53
 
60
54
  prompt: str
61
55
  r"""The text/code to complete."""
@@ -108,7 +102,7 @@ class FIMCompletionStreamRequest(BaseModel):
108
102
 
109
103
  m = {}
110
104
 
111
- for n, f in self.model_fields.items():
105
+ for n, f in type(self).model_fields.items():
112
106
  k = f.alias or n
113
107
  val = serialized.get(k)
114
108
  serialized.pop(k, None)
@@ -2,7 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .validationerror import ValidationError
5
- from mistralai_gcp import utils
5
+ import httpx
6
+ from mistralai_gcp.models import MistralGcpError
6
7
  from mistralai_gcp.types import BaseModel
7
8
  from typing import List, Optional
8
9
 
@@ -11,11 +12,15 @@ class HTTPValidationErrorData(BaseModel):
11
12
  detail: Optional[List[ValidationError]] = None
12
13
 
13
14
 
14
- class HTTPValidationError(Exception):
15
+ class HTTPValidationError(MistralGcpError):
15
16
  data: HTTPValidationErrorData
16
17
 
17
- def __init__(self, data: HTTPValidationErrorData):
18
+ def __init__(
19
+ self,
20
+ data: HTTPValidationErrorData,
21
+ raw_response: httpx.Response,
22
+ body: Optional[str] = None,
23
+ ):
24
+ message = body or raw_response.text
25
+ super().__init__(message, raw_response, body)
18
26
  self.data = data
19
-
20
- def __str__(self) -> str:
21
- return utils.marshal_json(self.data, HTTPValidationErrorData)
@@ -32,7 +32,7 @@ class ImageURL(BaseModel):
32
32
 
33
33
  m = {}
34
34
 
35
- for n, f in self.model_fields.items():
35
+ for n, f in type(self).model_fields.items():
36
36
  k = f.alias or n
37
37
  val = serialized.get(k)
38
38
  serialized.pop(k, None)
@@ -40,7 +40,7 @@ class JSONSchema(BaseModel):
40
40
 
41
41
  m = {}
42
42
 
43
- for n, f in self.model_fields.items():
43
+ for n, f in type(self).model_fields.items():
44
44
  k = f.alias or n
45
45
  val = serialized.get(k)
46
46
  serialized.pop(k, None)
@@ -0,0 +1,26 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ import httpx
4
+ from typing import Optional
5
+
6
+
7
+ class MistralGcpError(Exception):
8
+ """The base class for all HTTP error responses."""
9
+
10
+ message: str
11
+ status_code: int
12
+ body: str
13
+ headers: httpx.Headers
14
+ raw_response: httpx.Response
15
+
16
+ def __init__(
17
+ self, message: str, raw_response: httpx.Response, body: Optional[str] = None
18
+ ):
19
+ self.message = message
20
+ self.status_code = raw_response.status_code
21
+ self.body = body if body is not None else raw_response.text
22
+ self.headers = raw_response.headers
23
+ self.raw_response = raw_response
24
+
25
+ def __str__(self):
26
+ return self.message
@@ -0,0 +1,8 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai_gcp.types import UnrecognizedStr
5
+ from typing import Literal, Union
6
+
7
+
8
+ MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr]
@@ -0,0 +1,13 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ class NoResponseError(Exception):
4
+ """Error raised when no HTTP response is received from the server."""
5
+
6
+ message: str
7
+
8
+ def __init__(self, message: str = "No response received"):
9
+ self.message = message
10
+ super().__init__(message)
11
+
12
+ def __str__(self):
13
+ return self.message
@@ -10,11 +10,15 @@ from typing_extensions import Annotated, NotRequired, TypedDict
10
10
 
11
11
 
12
12
  class PredictionTypedDict(TypedDict):
13
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
14
+
13
15
  type: Literal["content"]
14
16
  content: NotRequired[str]
15
17
 
16
18
 
17
19
  class Prediction(BaseModel):
20
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
21
+
18
22
  TYPE: Annotated[
19
23
  Annotated[
20
24
  Optional[Literal["content"]], AfterValidator(validate_const("content"))
@@ -16,14 +16,16 @@ from typing_extensions import NotRequired, TypedDict
16
16
 
17
17
 
18
18
  class ResponseFormatTypedDict(TypedDict):
19
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
20
+
19
21
  type: NotRequired[ResponseFormats]
20
- r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
21
22
  json_schema: NotRequired[Nullable[JSONSchemaTypedDict]]
22
23
 
23
24
 
24
25
  class ResponseFormat(BaseModel):
26
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
27
+
25
28
  type: Optional[ResponseFormats] = None
26
- r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
27
29
 
28
30
  json_schema: OptionalNullable[JSONSchema] = UNSET
29
31
 
@@ -37,7 +39,7 @@ class ResponseFormat(BaseModel):
37
39
 
38
40
  m = {}
39
41
 
40
- for n, f in self.model_fields.items():
42
+ for n, f in type(self).model_fields.items():
41
43
  k = f.alias or n
42
44
  val = serialized.get(k)
43
45
  serialized.pop(k, None)
@@ -5,4 +5,3 @@ from typing import Literal
5
5
 
6
6
 
7
7
  ResponseFormats = Literal["text", "json_object", "json_schema"]
8
- r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message."""
@@ -0,0 +1,25 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ import httpx
4
+ from typing import Optional
5
+
6
+ from mistralai_gcp.models import MistralGcpError
7
+
8
+
9
+ class ResponseValidationError(MistralGcpError):
10
+ """Error raised when there is a type mismatch between the response data and the expected Pydantic model."""
11
+
12
+ def __init__(
13
+ self,
14
+ message: str,
15
+ raw_response: httpx.Response,
16
+ cause: Exception,
17
+ body: Optional[str] = None,
18
+ ):
19
+ message = f"{message}: {cause}"
20
+ super().__init__(message, raw_response, body)
21
+
22
+ @property
23
+ def cause(self):
24
+ """Normally the Pydantic ValidationError"""
25
+ return self.__cause__
@@ -1,22 +1,38 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
- from dataclasses import dataclass
4
- from typing import Optional
5
3
  import httpx
4
+ from typing import Optional
5
+
6
+ from mistralai_gcp.models import MistralGcpError
7
+
8
+ MAX_MESSAGE_LEN = 10_000
9
+
10
+
11
+ class SDKError(MistralGcpError):
12
+ """The fallback error class if no more specific error class is matched."""
13
+
14
+ def __init__(
15
+ self, message: str, raw_response: httpx.Response, body: Optional[str] = None
16
+ ):
17
+ body_display = body or raw_response.text or '""'
6
18
 
19
+ if message:
20
+ message += ": "
21
+ message += f"Status {raw_response.status_code}"
7
22
 
8
- @dataclass
9
- class SDKError(Exception):
10
- """Represents an error returned by the API."""
23
+ headers = raw_response.headers
24
+ content_type = headers.get("content-type", '""')
25
+ if content_type != "application/json":
26
+ if " " in content_type:
27
+ content_type = f'"{content_type}"'
28
+ message += f" Content-Type {content_type}"
11
29
 
12
- message: str
13
- status_code: int = -1
14
- body: str = ""
15
- raw_response: Optional[httpx.Response] = None
30
+ if len(body_display) > MAX_MESSAGE_LEN:
31
+ truncated = body_display[:MAX_MESSAGE_LEN]
32
+ remaining = len(body_display) - MAX_MESSAGE_LEN
33
+ body_display = f"{truncated}...and {remaining} more chars"
16
34
 
17
- def __str__(self):
18
- body = ""
19
- if len(self.body) > 0:
20
- body = f"\n{self.body}"
35
+ message += f". Body: {body_display}"
36
+ message = message.strip()
21
37
 
22
- return f"{self.message}: Status {self.status_code}{body}"
38
+ super().__init__(message, raw_response, body)
@@ -1,19 +1,23 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from .textchunk import TextChunk, TextChunkTypedDict
4
+ from .systemmessagecontentchunks import (
5
+ SystemMessageContentChunks,
6
+ SystemMessageContentChunksTypedDict,
7
+ )
5
8
  from mistralai_gcp.types import BaseModel
6
9
  from typing import List, Literal, Optional, Union
7
10
  from typing_extensions import NotRequired, TypeAliasType, TypedDict
8
11
 
9
12
 
10
13
  SystemMessageContentTypedDict = TypeAliasType(
11
- "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]]
14
+ "SystemMessageContentTypedDict",
15
+ Union[str, List[SystemMessageContentChunksTypedDict]],
12
16
  )
13
17
 
14
18
 
15
19
  SystemMessageContent = TypeAliasType(
16
- "SystemMessageContent", Union[str, List[TextChunk]]
20
+ "SystemMessageContent", Union[str, List[SystemMessageContentChunks]]
17
21
  )
18
22
 
19
23
 
@@ -0,0 +1,21 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .textchunk import TextChunk, TextChunkTypedDict
5
+ from .thinkchunk import ThinkChunk, ThinkChunkTypedDict
6
+ from mistralai_gcp.utils import get_discriminator
7
+ from pydantic import Discriminator, Tag
8
+ from typing import Union
9
+ from typing_extensions import Annotated, TypeAliasType
10
+
11
+
12
+ SystemMessageContentChunksTypedDict = TypeAliasType(
13
+ "SystemMessageContentChunksTypedDict",
14
+ Union[TextChunkTypedDict, ThinkChunkTypedDict],
15
+ )
16
+
17
+
18
+ SystemMessageContentChunks = Annotated[
19
+ Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]],
20
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
21
+ ]
@@ -0,0 +1,35 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
5
+ from .textchunk import TextChunk, TextChunkTypedDict
6
+ from mistralai_gcp.types import BaseModel
7
+ from typing import List, Literal, Optional, Union
8
+ from typing_extensions import NotRequired, TypeAliasType, TypedDict
9
+
10
+
11
+ ThinkingTypedDict = TypeAliasType(
12
+ "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict]
13
+ )
14
+
15
+
16
+ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk])
17
+
18
+
19
+ ThinkChunkType = Literal["thinking"]
20
+
21
+
22
+ class ThinkChunkTypedDict(TypedDict):
23
+ thinking: List[ThinkingTypedDict]
24
+ closed: NotRequired[bool]
25
+ r"""Whether the thinking chunk is closed or not. Currently only used for prefixing."""
26
+ type: NotRequired[ThinkChunkType]
27
+
28
+
29
+ class ThinkChunk(BaseModel):
30
+ thinking: List[Thinking]
31
+
32
+ closed: Optional[bool] = None
33
+ r"""Whether the thinking chunk is closed or not. Currently only used for prefixing."""
34
+
35
+ type: Optional[ThinkChunkType] = "thinking"
@@ -51,7 +51,7 @@ class ToolMessage(BaseModel):
51
51
 
52
52
  m = {}
53
53
 
54
- for n, f in self.model_fields.items():
54
+ for n, f in type(self).model_fields.items():
55
55
  k = f.alias or n
56
56
  val = serialized.get(k)
57
57
  serialized.pop(k, None)