mistralai 1.0.0rc1__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. mistralai/agents.py +434 -0
  2. mistralai/basesdk.py +43 -6
  3. mistralai/chat.py +29 -34
  4. mistralai/client.py +1 -1
  5. mistralai/embeddings.py +4 -4
  6. mistralai/files.py +10 -10
  7. mistralai/fim.py +17 -18
  8. mistralai/fine_tuning.py +10 -849
  9. mistralai/jobs.py +844 -0
  10. mistralai/models/__init__.py +6 -4
  11. mistralai/models/agentscompletionrequest.py +96 -0
  12. mistralai/models/agentscompletionstreamrequest.py +92 -0
  13. mistralai/models/assistantmessage.py +4 -9
  14. mistralai/models/chatcompletionchoice.py +4 -15
  15. mistralai/models/chatcompletionrequest.py +25 -30
  16. mistralai/models/chatcompletionstreamrequest.py +25 -30
  17. mistralai/models/completionresponsestreamchoice.py +4 -9
  18. mistralai/models/delete_model_v1_models_model_id_deleteop.py +2 -0
  19. mistralai/models/deltamessage.py +7 -12
  20. mistralai/models/detailedjobout.py +4 -9
  21. mistralai/models/embeddingrequest.py +12 -17
  22. mistralai/models/eventout.py +4 -9
  23. mistralai/models/fileschema.py +4 -9
  24. mistralai/models/fimcompletionrequest.py +19 -24
  25. mistralai/models/fimcompletionstreamrequest.py +19 -24
  26. mistralai/models/ftmodelout.py +4 -9
  27. mistralai/models/functioncall.py +9 -3
  28. mistralai/models/githubrepositoryin.py +4 -9
  29. mistralai/models/githubrepositoryout.py +4 -9
  30. mistralai/models/httpvalidationerror.py +1 -1
  31. mistralai/models/jobin.py +4 -9
  32. mistralai/models/jobmetadataout.py +4 -9
  33. mistralai/models/jobout.py +4 -9
  34. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +2 -0
  35. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +1 -59
  36. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +4 -9
  37. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +2 -0
  38. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +2 -0
  39. mistralai/models/legacyjobmetadataout.py +4 -9
  40. mistralai/models/metricout.py +4 -9
  41. mistralai/models/modelcard.py +4 -9
  42. mistralai/models/retrieve_model_v1_models_model_id_getop.py +2 -0
  43. mistralai/models/retrievefileout.py +4 -9
  44. mistralai/models/security.py +4 -4
  45. mistralai/models/systemmessage.py +6 -6
  46. mistralai/models/toolmessage.py +4 -9
  47. mistralai/models/trainingparameters.py +4 -9
  48. mistralai/models/trainingparametersin.py +4 -9
  49. mistralai/models/updateftmodelin.py +4 -9
  50. mistralai/models/uploadfileout.py +4 -9
  51. mistralai/models/usermessage.py +6 -6
  52. mistralai/models/validationerror.py +6 -6
  53. mistralai/models/wandbintegration.py +4 -9
  54. mistralai/models/wandbintegrationout.py +4 -9
  55. mistralai/models_.py +24 -24
  56. mistralai/sdk.py +14 -6
  57. mistralai/sdkconfiguration.py +5 -4
  58. mistralai/types/basemodel.py +10 -6
  59. mistralai/utils/__init__.py +4 -0
  60. mistralai/utils/eventstreaming.py +8 -9
  61. mistralai/utils/logger.py +16 -0
  62. mistralai/utils/retries.py +2 -2
  63. mistralai/utils/security.py +5 -2
  64. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/METADATA +153 -69
  65. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/RECORD +114 -107
  66. mistralai_azure/basesdk.py +42 -4
  67. mistralai_azure/chat.py +15 -20
  68. mistralai_azure/models/__init__.py +3 -3
  69. mistralai_azure/models/assistantmessage.py +4 -9
  70. mistralai_azure/models/chatcompletionchoice.py +4 -15
  71. mistralai_azure/models/chatcompletionrequest.py +21 -26
  72. mistralai_azure/models/chatcompletionstreamrequest.py +21 -26
  73. mistralai_azure/models/completionresponsestreamchoice.py +4 -9
  74. mistralai_azure/models/deltamessage.py +7 -12
  75. mistralai_azure/models/functioncall.py +9 -3
  76. mistralai_azure/models/httpvalidationerror.py +1 -1
  77. mistralai_azure/models/systemmessage.py +6 -6
  78. mistralai_azure/models/toolmessage.py +4 -9
  79. mistralai_azure/models/usermessage.py +6 -6
  80. mistralai_azure/models/validationerror.py +6 -6
  81. mistralai_azure/sdk.py +7 -2
  82. mistralai_azure/sdkconfiguration.py +5 -4
  83. mistralai_azure/types/basemodel.py +10 -6
  84. mistralai_azure/utils/__init__.py +4 -0
  85. mistralai_azure/utils/eventstreaming.py +8 -9
  86. mistralai_azure/utils/logger.py +16 -0
  87. mistralai_azure/utils/retries.py +2 -2
  88. mistralai_gcp/basesdk.py +42 -4
  89. mistralai_gcp/chat.py +12 -17
  90. mistralai_gcp/fim.py +12 -13
  91. mistralai_gcp/models/__init__.py +3 -3
  92. mistralai_gcp/models/assistantmessage.py +4 -9
  93. mistralai_gcp/models/chatcompletionchoice.py +4 -15
  94. mistralai_gcp/models/chatcompletionrequest.py +23 -28
  95. mistralai_gcp/models/chatcompletionstreamrequest.py +23 -28
  96. mistralai_gcp/models/completionresponsestreamchoice.py +4 -9
  97. mistralai_gcp/models/deltamessage.py +7 -12
  98. mistralai_gcp/models/fimcompletionrequest.py +19 -24
  99. mistralai_gcp/models/fimcompletionstreamrequest.py +19 -24
  100. mistralai_gcp/models/functioncall.py +9 -3
  101. mistralai_gcp/models/httpvalidationerror.py +1 -1
  102. mistralai_gcp/models/systemmessage.py +6 -6
  103. mistralai_gcp/models/toolmessage.py +4 -9
  104. mistralai_gcp/models/usermessage.py +6 -6
  105. mistralai_gcp/models/validationerror.py +6 -6
  106. mistralai_gcp/sdk.py +9 -0
  107. mistralai_gcp/sdkconfiguration.py +5 -4
  108. mistralai_gcp/types/basemodel.py +10 -6
  109. mistralai_gcp/utils/__init__.py +4 -0
  110. mistralai_gcp/utils/eventstreaming.py +8 -9
  111. mistralai_gcp/utils/logger.py +16 -0
  112. mistralai_gcp/utils/retries.py +2 -2
  113. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/LICENSE +0 -0
  114. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/WHEEL +0 -0
mistralai_azure/sdk.py CHANGED
@@ -3,8 +3,7 @@
3
3
  from typing import Any, Callable, Dict, Optional, Union
4
4
 
5
5
  import httpx
6
- import mistralai_azure.utils as utils
7
- from mistralai_azure import models
6
+ from mistralai_azure import models, utils
8
7
  from mistralai_azure._hooks import SDKHooks
9
8
  from mistralai_azure.chat import Chat
10
9
  from mistralai_azure.types import Nullable
@@ -12,6 +11,7 @@ from mistralai_azure.types import Nullable
12
11
  from .basesdk import BaseSDK
13
12
  from .httpclient import AsyncHttpClient, HttpClient
14
13
  from .sdkconfiguration import SDKConfiguration
14
+ from .utils.logger import Logger, NoOpLogger
15
15
  from .utils.retries import RetryConfig
16
16
 
17
17
 
@@ -29,6 +29,7 @@ class MistralAzure(BaseSDK):
29
29
  client: Optional[HttpClient] = None,
30
30
  async_client: Optional[AsyncHttpClient] = None,
31
31
  retry_config: Optional[Nullable[RetryConfig]] = None,
32
+ debug_logger: Optional[Logger] = None,
32
33
  ) -> None:
33
34
  r"""Instantiates the SDK configuring it with the provided parameters.
34
35
 
@@ -60,6 +61,9 @@ class MistralAzure(BaseSDK):
60
61
  type(async_client), AsyncHttpClient
61
62
  ), "The provided async_client must implement the AsyncHttpClient protocol."
62
63
 
64
+ if debug_logger is None:
65
+ debug_logger = NoOpLogger()
66
+
63
67
  security: Any = None
64
68
  if callable(azure_api_key):
65
69
  security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment
@@ -81,6 +85,7 @@ class MistralAzure(BaseSDK):
81
85
  server_url=server_url,
82
86
  server=None,
83
87
  retry_config=retry_config,
88
+ debug_logger=debug_logger,
84
89
  ),
85
90
  )
86
91
 
@@ -3,7 +3,7 @@
3
3
 
4
4
  from ._hooks import SDKHooks
5
5
  from .httpclient import AsyncHttpClient, HttpClient
6
- from .utils import RetryConfig, remove_suffix
6
+ from .utils import Logger, RetryConfig, remove_suffix
7
7
  from dataclasses import dataclass
8
8
  from mistralai_azure import models
9
9
  from mistralai_azure.types import OptionalNullable, UNSET
@@ -23,14 +23,15 @@ SERVERS = {
23
23
  class SDKConfiguration:
24
24
  client: HttpClient
25
25
  async_client: AsyncHttpClient
26
+ debug_logger: Logger
26
27
  security: Optional[Union[models.Security,Callable[[], models.Security]]] = None
27
28
  server_url: Optional[str] = ""
28
29
  server: Optional[str] = ""
29
30
  language: str = "python"
30
31
  openapi_doc_version: str = "0.0.2"
31
- sdk_version: str = "1.0.0rc1"
32
- gen_version: str = "2.382.2"
33
- user_agent: str = "speakeasy-sdk/python 1.0.0rc1 2.382.2 0.0.2 mistralai_azure"
32
+ sdk_version: str = "1.0.0-rc.4"
33
+ gen_version: str = "2.390.6"
34
+ user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai_azure"
34
35
  retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
35
36
  timeout_ms: Optional[int] = None
36
37
 
@@ -2,8 +2,8 @@
2
2
 
3
3
  from pydantic import ConfigDict, model_serializer
4
4
  from pydantic import BaseModel as PydanticBaseModel
5
- from typing import Literal, Optional, TypeVar, Union, NewType
6
- from typing_extensions import TypeAliasType
5
+ from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType
6
+ from typing_extensions import TypeAliasType, TypeAlias
7
7
 
8
8
 
9
9
  class BaseModel(PydanticBaseModel):
@@ -26,10 +26,14 @@ UNSET_SENTINEL = "~?~unset~?~sentinel~?~"
26
26
 
27
27
 
28
28
  T = TypeVar("T")
29
- Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,))
30
- OptionalNullable = TypeAliasType(
31
- "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,)
32
- )
29
+ if TYPE_CHECKING:
30
+ Nullable: TypeAlias = Union[T, None]
31
+ OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset]
32
+ else:
33
+ Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,))
34
+ OptionalNullable = TypeAliasType(
35
+ "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,)
36
+ )
33
37
 
34
38
  UnrecognizedInt = NewType("UnrecognizedInt", int)
35
39
  UnrecognizedStr = NewType("UnrecognizedStr", str)
@@ -34,6 +34,7 @@ from .serializers import (
34
34
  )
35
35
  from .url import generate_url, template_url, remove_suffix
36
36
  from .values import get_global_from_env, match_content_type, match_status_codes, match_response
37
+ from .logger import Logger, get_body_content, NoOpLogger
37
38
 
38
39
  __all__ = [
39
40
  "BackoffStrategy",
@@ -41,6 +42,7 @@ __all__ = [
41
42
  "find_metadata",
42
43
  "FormMetadata",
43
44
  "generate_url",
45
+ "get_body_content",
44
46
  "get_discriminator",
45
47
  "get_global_from_env",
46
48
  "get_headers",
@@ -49,11 +51,13 @@ __all__ = [
49
51
  "get_response_headers",
50
52
  "get_security",
51
53
  "HeaderMetadata",
54
+ "Logger",
52
55
  "marshal_json",
53
56
  "match_content_type",
54
57
  "match_status_codes",
55
58
  "match_response",
56
59
  "MultipartFormMetadata",
60
+ "NoOpLogger",
57
61
  "OpenEnumMeta",
58
62
  "PathParamMetadata",
59
63
  "QueryParamMetadata",
@@ -147,15 +147,14 @@ def _parse_event(
147
147
  data = data[:-1]
148
148
  event.data = data
149
149
 
150
- if (
151
- data.isnumeric()
152
- or data == "true"
153
- or data == "false"
154
- or data == "null"
155
- or data.startswith("{")
156
- or data.startswith("[")
157
- or data.startswith('"')
158
- ):
150
+ data_is_primitive = (
151
+ data.isnumeric() or data == "true" or data == "false" or data == "null"
152
+ )
153
+ data_is_json = (
154
+ data.startswith("{") or data.startswith("[") or data.startswith('"')
155
+ )
156
+
157
+ if data_is_primitive or data_is_json:
159
158
  try:
160
159
  event.data = json.loads(data)
161
160
  except Exception:
@@ -0,0 +1,16 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ import httpx
4
+ from typing import Any, Protocol
5
+
6
+ class Logger(Protocol):
7
+ def debug(self, msg: str, *args: Any, **kwargs: Any) -> None:
8
+ pass
9
+
10
+ class NoOpLogger:
11
+ def debug(self, msg: str, *args: Any, **kwargs: Any) -> None:
12
+ pass
13
+
14
+ def get_body_content(req: httpx.Request) -> str:
15
+ return "<streaming body>" if not hasattr(req, "_content") else str(req.content)
16
+
@@ -76,7 +76,7 @@ def retry(func, retries: Retries):
76
76
 
77
77
  status_major = res.status_code / 100
78
78
 
79
- if status_major >= code_range and status_major < code_range + 1:
79
+ if code_range <= status_major < code_range + 1:
80
80
  raise TemporaryError(res)
81
81
  else:
82
82
  parsed_code = int(code)
@@ -125,7 +125,7 @@ async def retry_async(func, retries: Retries):
125
125
 
126
126
  status_major = res.status_code / 100
127
127
 
128
- if status_major >= code_range and status_major < code_range + 1:
128
+ if code_range <= status_major < code_range + 1:
129
129
  raise TemporaryError(res)
130
130
  else:
131
131
  parsed_code = int(code)
mistralai_gcp/basesdk.py CHANGED
@@ -2,10 +2,9 @@
2
2
 
3
3
  from .sdkconfiguration import SDKConfiguration
4
4
  import httpx
5
- from mistralai_gcp import models
5
+ from mistralai_gcp import models, utils
6
6
  from mistralai_gcp._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext
7
- import mistralai_gcp.utils as utils
8
- from mistralai_gcp.utils import RetryConfig, SerializedRequestBody
7
+ from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content
9
8
  from typing import Callable, List, Optional, Tuple
10
9
 
11
10
  class BaseSDK:
@@ -117,6 +116,7 @@ class BaseSDK:
117
116
  retry_config: Optional[Tuple[RetryConfig, List[str]]] = None,
118
117
  ) -> httpx.Response:
119
118
  client = self.sdk_configuration.client
119
+ logger = self.sdk_configuration.debug_logger
120
120
 
121
121
  def do():
122
122
  http_res = None
@@ -124,26 +124,45 @@ class BaseSDK:
124
124
  req = self.sdk_configuration.get_hooks().before_request(
125
125
  BeforeRequestContext(hook_ctx), request
126
126
  )
127
+ logger.debug(
128
+ "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
129
+ req.method,
130
+ req.url,
131
+ req.headers,
132
+ get_body_content(req)
133
+ )
127
134
  http_res = client.send(req, stream=stream)
128
135
  except Exception as e:
129
136
  _, e = self.sdk_configuration.get_hooks().after_error(
130
137
  AfterErrorContext(hook_ctx), None, e
131
138
  )
132
139
  if e is not None:
140
+ logger.debug("Request Exception", exc_info=True)
133
141
  raise e
134
142
 
135
143
  if http_res is None:
144
+ logger.debug("Raising no response SDK error")
136
145
  raise models.SDKError("No response received")
137
146
 
147
+ logger.debug(
148
+ "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
149
+ http_res.status_code,
150
+ http_res.url,
151
+ http_res.headers,
152
+ "<streaming response>" if stream else http_res.text
153
+ )
154
+
138
155
  if utils.match_status_codes(error_status_codes, http_res.status_code):
139
156
  result, err = self.sdk_configuration.get_hooks().after_error(
140
157
  AfterErrorContext(hook_ctx), http_res, None
141
158
  )
142
159
  if err is not None:
160
+ logger.debug("Request Exception", exc_info=True)
143
161
  raise err
144
162
  if result is not None:
145
163
  http_res = result
146
164
  else:
165
+ logger.debug("Raising unexpected SDK error")
147
166
  raise models.SDKError("Unexpected error occurred")
148
167
 
149
168
  return http_res
@@ -169,33 +188,52 @@ class BaseSDK:
169
188
  retry_config: Optional[Tuple[RetryConfig, List[str]]] = None,
170
189
  ) -> httpx.Response:
171
190
  client = self.sdk_configuration.async_client
172
-
191
+ logger = self.sdk_configuration.debug_logger
173
192
  async def do():
174
193
  http_res = None
175
194
  try:
176
195
  req = self.sdk_configuration.get_hooks().before_request(
177
196
  BeforeRequestContext(hook_ctx), request
178
197
  )
198
+ logger.debug(
199
+ "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
200
+ req.method,
201
+ req.url,
202
+ req.headers,
203
+ get_body_content(req)
204
+ )
179
205
  http_res = await client.send(req, stream=stream)
180
206
  except Exception as e:
181
207
  _, e = self.sdk_configuration.get_hooks().after_error(
182
208
  AfterErrorContext(hook_ctx), None, e
183
209
  )
184
210
  if e is not None:
211
+ logger.debug("Request Exception", exc_info=True)
185
212
  raise e
186
213
 
187
214
  if http_res is None:
215
+ logger.debug("Raising no response SDK error")
188
216
  raise models.SDKError("No response received")
189
217
 
218
+ logger.debug(
219
+ "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s",
220
+ http_res.status_code,
221
+ http_res.url,
222
+ http_res.headers,
223
+ "<streaming response>" if stream else http_res.text
224
+ )
225
+
190
226
  if utils.match_status_codes(error_status_codes, http_res.status_code):
191
227
  result, err = self.sdk_configuration.get_hooks().after_error(
192
228
  AfterErrorContext(hook_ctx), http_res, None
193
229
  )
194
230
  if err is not None:
231
+ logger.debug("Request Exception", exc_info=True)
195
232
  raise err
196
233
  if result is not None:
197
234
  http_res = result
198
235
  else:
236
+ logger.debug("Raising unexpected SDK error")
199
237
  raise models.SDKError("Unexpected error occurred")
200
238
 
201
239
  return http_res
mistralai_gcp/chat.py CHANGED
@@ -1,10 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .basesdk import BaseSDK
4
- from mistralai_gcp import models
4
+ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
- from mistralai_gcp.types import OptionalNullable, UNSET
7
- import mistralai_gcp.utils as utils
6
+ from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
8
7
  from mistralai_gcp.utils import eventstreaming
9
8
  from typing import Any, AsyncGenerator, Generator, List, Optional, Union
10
9
 
@@ -14,8 +13,8 @@ class Chat(BaseSDK):
14
13
 
15
14
  def stream(
16
15
  self, *,
16
+ model: Nullable[str],
17
17
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
18
- model: OptionalNullable[str] = UNSET,
19
18
  temperature: Optional[float] = 0.7,
20
19
  top_p: Optional[float] = 1,
21
20
  max_tokens: OptionalNullable[int] = UNSET,
@@ -34,8 +33,8 @@ class Chat(BaseSDK):
34
33
 
35
34
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
36
35
 
37
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
38
36
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
37
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
39
38
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
40
39
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
41
40
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -127,8 +126,8 @@ class Chat(BaseSDK):
127
126
 
128
127
  async def stream_async(
129
128
  self, *,
129
+ model: Nullable[str],
130
130
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
131
- model: OptionalNullable[str] = UNSET,
132
131
  temperature: Optional[float] = 0.7,
133
132
  top_p: Optional[float] = 1,
134
133
  max_tokens: OptionalNullable[int] = UNSET,
@@ -147,8 +146,8 @@ class Chat(BaseSDK):
147
146
 
148
147
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
149
148
 
150
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
151
149
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
150
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
152
151
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
153
152
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
154
153
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -238,10 +237,10 @@ class Chat(BaseSDK):
238
237
 
239
238
 
240
239
 
241
- def create(
240
+ def complete(
242
241
  self, *,
242
+ model: Nullable[str],
243
243
  messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]],
244
- model: OptionalNullable[str] = UNSET,
245
244
  temperature: Optional[float] = 0.7,
246
245
  top_p: Optional[float] = 1,
247
246
  max_tokens: OptionalNullable[int] = UNSET,
@@ -258,10 +257,8 @@ class Chat(BaseSDK):
258
257
  ) -> Optional[models.ChatCompletionResponse]:
259
258
  r"""Chat Completion
260
259
 
261
- Chat Completion
262
-
263
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
264
260
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
261
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
265
262
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
266
263
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
267
264
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -350,10 +347,10 @@ class Chat(BaseSDK):
350
347
 
351
348
 
352
349
 
353
- async def create_async(
350
+ async def complete_async(
354
351
  self, *,
352
+ model: Nullable[str],
355
353
  messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]],
356
- model: OptionalNullable[str] = UNSET,
357
354
  temperature: Optional[float] = 0.7,
358
355
  top_p: Optional[float] = 1,
359
356
  max_tokens: OptionalNullable[int] = UNSET,
@@ -370,10 +367,8 @@ class Chat(BaseSDK):
370
367
  ) -> Optional[models.ChatCompletionResponse]:
371
368
  r"""Chat Completion
372
369
 
373
- Chat Completion
374
-
375
- :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
376
370
  :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.
371
+ :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
377
372
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
378
373
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
379
374
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
mistralai_gcp/fim.py CHANGED
@@ -1,10 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .basesdk import BaseSDK
4
- from mistralai_gcp import models
4
+ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
- from mistralai_gcp.types import OptionalNullable, UNSET
7
- import mistralai_gcp.utils as utils
6
+ from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
8
7
  from mistralai_gcp.utils import eventstreaming
9
8
  from typing import Any, AsyncGenerator, Generator, Optional, Union
10
9
 
@@ -14,8 +13,8 @@ class Fim(BaseSDK):
14
13
 
15
14
  def stream(
16
15
  self, *,
16
+ model: Nullable[str],
17
17
  prompt: str,
18
- model: OptionalNullable[str] = UNSET,
19
18
  temperature: Optional[float] = 0.7,
20
19
  top_p: Optional[float] = 1,
21
20
  max_tokens: OptionalNullable[int] = UNSET,
@@ -32,8 +31,8 @@ class Fim(BaseSDK):
32
31
 
33
32
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
34
33
 
35
- :param prompt: The text/code to complete.
36
34
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
35
+ :param prompt: The text/code to complete.
37
36
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
38
37
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
39
38
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -121,8 +120,8 @@ class Fim(BaseSDK):
121
120
 
122
121
  async def stream_async(
123
122
  self, *,
123
+ model: Nullable[str],
124
124
  prompt: str,
125
- model: OptionalNullable[str] = UNSET,
126
125
  temperature: Optional[float] = 0.7,
127
126
  top_p: Optional[float] = 1,
128
127
  max_tokens: OptionalNullable[int] = UNSET,
@@ -139,8 +138,8 @@ class Fim(BaseSDK):
139
138
 
140
139
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
141
140
 
142
- :param prompt: The text/code to complete.
143
141
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
142
+ :param prompt: The text/code to complete.
144
143
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
145
144
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
146
145
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -226,10 +225,10 @@ class Fim(BaseSDK):
226
225
 
227
226
 
228
227
 
229
- def create(
228
+ def complete(
230
229
  self, *,
230
+ model: Nullable[str],
231
231
  prompt: str,
232
- model: OptionalNullable[str] = UNSET,
233
232
  temperature: Optional[float] = 0.7,
234
233
  top_p: Optional[float] = 1,
235
234
  max_tokens: OptionalNullable[int] = UNSET,
@@ -246,8 +245,8 @@ class Fim(BaseSDK):
246
245
 
247
246
  FIM completion.
248
247
 
249
- :param prompt: The text/code to complete.
250
248
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
249
+ :param prompt: The text/code to complete.
251
250
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
252
251
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
253
252
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -332,10 +331,10 @@ class Fim(BaseSDK):
332
331
 
333
332
 
334
333
 
335
- async def create_async(
334
+ async def complete_async(
336
335
  self, *,
336
+ model: Nullable[str],
337
337
  prompt: str,
338
- model: OptionalNullable[str] = UNSET,
339
338
  temperature: Optional[float] = 0.7,
340
339
  top_p: Optional[float] = 1,
341
340
  max_tokens: OptionalNullable[int] = UNSET,
@@ -352,8 +351,8 @@ class Fim(BaseSDK):
352
351
 
353
352
  FIM completion.
354
353
 
355
- :param prompt: The text/code to complete.
356
354
  :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
355
+ :param prompt: The text/code to complete.
357
356
  :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
358
357
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
359
358
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
@@ -1,7 +1,7 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict
4
- from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict, Message, MessageTypedDict
4
+ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict
5
5
  from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict
6
6
  from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict
7
7
  from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice
@@ -14,7 +14,7 @@ from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop
14
14
  from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict
15
15
  from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict
16
16
  from .function import Function, FunctionTypedDict
17
- from .functioncall import FunctionCall, FunctionCallTypedDict
17
+ from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict
18
18
  from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
19
19
  from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats
20
20
  from .sdkerror import SDKError
@@ -28,4 +28,4 @@ from .usageinfo import UsageInfo, UsageInfoTypedDict
28
28
  from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict
29
29
  from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict
30
30
 
31
- __all__ = ["AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Message", "MessageTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
31
+ __all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"]
@@ -39,18 +39,13 @@ class AssistantMessage(BaseModel):
39
39
  k = f.alias or n
40
40
  val = serialized.get(k)
41
41
 
42
+ optional_nullable = k in optional_fields and k in nullable_fields
43
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
44
+
42
45
  if val is not None and val != UNSET_SENTINEL:
43
46
  m[k] = val
44
47
  elif val != UNSET_SENTINEL and (
45
- not k in optional_fields
46
- or (
47
- k in optional_fields
48
- and k in nullable_fields
49
- and (
50
- self.__pydantic_fields_set__.intersection({n})
51
- or k in null_default_fields
52
- ) # pylint: disable=no-member
53
- )
48
+ not k in optional_fields or (optional_nullable and is_set)
54
49
  ):
55
50
  m[k] = val
56
51
 
@@ -2,14 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
- from .systemmessage import SystemMessage, SystemMessageTypedDict
6
- from .toolmessage import ToolMessage, ToolMessageTypedDict
7
- from .usermessage import UserMessage, UserMessageTypedDict
8
5
  from mistralai_gcp.types import BaseModel
9
- from mistralai_gcp.utils import get_discriminator
10
- from pydantic import Discriminator, Tag
11
- from typing import Literal, Optional, TypedDict, Union
12
- from typing_extensions import Annotated, NotRequired
6
+ from typing import Literal, Optional, TypedDict
7
+ from typing_extensions import NotRequired
13
8
 
14
9
 
15
10
  ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
@@ -17,17 +12,11 @@ ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "er
17
12
  class ChatCompletionChoiceTypedDict(TypedDict):
18
13
  index: int
19
14
  finish_reason: ChatCompletionChoiceFinishReason
20
- message: NotRequired[MessageTypedDict]
15
+ message: NotRequired[AssistantMessageTypedDict]
21
16
 
22
17
 
23
18
  class ChatCompletionChoice(BaseModel):
24
19
  index: int
25
20
  finish_reason: ChatCompletionChoiceFinishReason
26
- message: Optional[Message] = None
21
+ message: Optional[AssistantMessage] = None
27
22
 
28
-
29
- MessageTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
30
-
31
-
32
- Message = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
33
-