mistralai 1.0.3__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (230) hide show
  1. mistralai/__init__.py +4 -0
  2. mistralai/_hooks/sdkhooks.py +23 -4
  3. mistralai/_hooks/types.py +27 -9
  4. mistralai/_version.py +12 -0
  5. mistralai/agents.py +334 -164
  6. mistralai/basesdk.py +90 -5
  7. mistralai/batch.py +17 -0
  8. mistralai/chat.py +316 -166
  9. mistralai/classifiers.py +396 -0
  10. mistralai/embeddings.py +79 -55
  11. mistralai/files.py +487 -194
  12. mistralai/fim.py +206 -132
  13. mistralai/fine_tuning.py +3 -2
  14. mistralai/jobs.py +392 -263
  15. mistralai/mistral_jobs.py +733 -0
  16. mistralai/models/__init__.py +593 -50
  17. mistralai/models/agentscompletionrequest.py +70 -17
  18. mistralai/models/agentscompletionstreamrequest.py +72 -17
  19. mistralai/models/apiendpoint.py +9 -0
  20. mistralai/models/archiveftmodelout.py +15 -5
  21. mistralai/models/assistantmessage.py +22 -10
  22. mistralai/models/{modelcard.py → basemodelcard.py} +53 -14
  23. mistralai/models/batcherror.py +17 -0
  24. mistralai/models/batchjobin.py +58 -0
  25. mistralai/models/batchjobout.py +117 -0
  26. mistralai/models/batchjobsout.py +30 -0
  27. mistralai/models/batchjobstatus.py +15 -0
  28. mistralai/models/chatclassificationrequest.py +104 -0
  29. mistralai/models/chatcompletionchoice.py +13 -6
  30. mistralai/models/chatcompletionrequest.py +86 -21
  31. mistralai/models/chatcompletionresponse.py +8 -4
  32. mistralai/models/chatcompletionstreamrequest.py +88 -21
  33. mistralai/models/checkpointout.py +4 -3
  34. mistralai/models/classificationobject.py +21 -0
  35. mistralai/models/classificationrequest.py +59 -0
  36. mistralai/models/classificationresponse.py +21 -0
  37. mistralai/models/completionchunk.py +12 -5
  38. mistralai/models/completionevent.py +2 -3
  39. mistralai/models/completionresponsestreamchoice.py +22 -8
  40. mistralai/models/contentchunk.py +13 -10
  41. mistralai/models/delete_model_v1_models_model_id_deleteop.py +5 -5
  42. mistralai/models/deletefileout.py +4 -3
  43. mistralai/models/deletemodelout.py +5 -4
  44. mistralai/models/deltamessage.py +23 -11
  45. mistralai/models/detailedjobout.py +70 -12
  46. mistralai/models/embeddingrequest.py +14 -9
  47. mistralai/models/embeddingresponse.py +7 -3
  48. mistralai/models/embeddingresponsedata.py +5 -4
  49. mistralai/models/eventout.py +11 -6
  50. mistralai/models/filepurpose.py +8 -0
  51. mistralai/models/files_api_routes_delete_fileop.py +5 -5
  52. mistralai/models/files_api_routes_download_fileop.py +16 -0
  53. mistralai/models/files_api_routes_list_filesop.py +96 -0
  54. mistralai/models/files_api_routes_retrieve_fileop.py +5 -5
  55. mistralai/models/files_api_routes_upload_fileop.py +33 -14
  56. mistralai/models/fileschema.py +22 -15
  57. mistralai/models/fimcompletionrequest.py +44 -16
  58. mistralai/models/fimcompletionresponse.py +8 -4
  59. mistralai/models/fimcompletionstreamrequest.py +44 -16
  60. mistralai/models/finetuneablemodel.py +7 -1
  61. mistralai/models/ftmodelcapabilitiesout.py +6 -4
  62. mistralai/models/ftmodelcard.py +121 -0
  63. mistralai/models/ftmodelout.py +39 -9
  64. mistralai/models/function.py +5 -4
  65. mistralai/models/functioncall.py +4 -3
  66. mistralai/models/functionname.py +17 -0
  67. mistralai/models/githubrepositoryin.py +24 -7
  68. mistralai/models/githubrepositoryout.py +24 -7
  69. mistralai/models/httpvalidationerror.py +1 -3
  70. mistralai/models/imageurl.py +47 -0
  71. mistralai/models/imageurlchunk.py +38 -0
  72. mistralai/models/jobin.py +24 -7
  73. mistralai/models/jobmetadataout.py +32 -8
  74. mistralai/models/jobout.py +65 -12
  75. mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +16 -0
  76. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +16 -0
  77. mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +95 -0
  78. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +5 -5
  79. mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +5 -5
  80. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +3 -2
  81. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +5 -5
  82. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +85 -18
  83. mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +5 -5
  84. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +5 -5
  85. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +10 -6
  86. mistralai/models/jobsout.py +13 -5
  87. mistralai/models/legacyjobmetadataout.py +55 -9
  88. mistralai/models/listfilesout.py +7 -3
  89. mistralai/models/metricout.py +12 -8
  90. mistralai/models/modelcapabilities.py +9 -4
  91. mistralai/models/modellist.py +21 -7
  92. mistralai/models/responseformat.py +7 -8
  93. mistralai/models/responseformats.py +8 -0
  94. mistralai/models/retrieve_model_v1_models_model_id_getop.py +25 -6
  95. mistralai/models/retrievefileout.py +25 -15
  96. mistralai/models/sampletype.py +6 -2
  97. mistralai/models/security.py +14 -5
  98. mistralai/models/source.py +3 -2
  99. mistralai/models/systemmessage.py +10 -9
  100. mistralai/models/textchunk.py +14 -5
  101. mistralai/models/tool.py +10 -9
  102. mistralai/models/toolcall.py +10 -8
  103. mistralai/models/toolchoice.py +29 -0
  104. mistralai/models/toolchoiceenum.py +7 -0
  105. mistralai/models/toolmessage.py +13 -6
  106. mistralai/models/tooltypes.py +8 -0
  107. mistralai/models/trainingfile.py +4 -4
  108. mistralai/models/trainingparameters.py +34 -8
  109. mistralai/models/trainingparametersin.py +36 -10
  110. mistralai/models/unarchiveftmodelout.py +15 -5
  111. mistralai/models/updateftmodelin.py +9 -6
  112. mistralai/models/uploadfileout.py +22 -15
  113. mistralai/models/usageinfo.py +4 -3
  114. mistralai/models/usermessage.py +42 -10
  115. mistralai/models/validationerror.py +5 -3
  116. mistralai/models/wandbintegration.py +23 -7
  117. mistralai/models/wandbintegrationout.py +23 -8
  118. mistralai/models_.py +416 -294
  119. mistralai/sdk.py +31 -19
  120. mistralai/sdkconfiguration.py +9 -11
  121. mistralai/utils/__init__.py +14 -1
  122. mistralai/utils/annotations.py +13 -2
  123. mistralai/utils/logger.py +4 -1
  124. mistralai/utils/retries.py +2 -1
  125. mistralai/utils/security.py +13 -6
  126. mistralai/utils/serializers.py +25 -0
  127. {mistralai-1.0.3.dist-info → mistralai-1.2.0.dist-info}/METADATA +171 -66
  128. mistralai-1.2.0.dist-info/RECORD +276 -0
  129. {mistralai-1.0.3.dist-info → mistralai-1.2.0.dist-info}/WHEEL +1 -1
  130. mistralai_azure/__init__.py +4 -0
  131. mistralai_azure/_hooks/sdkhooks.py +23 -4
  132. mistralai_azure/_hooks/types.py +27 -9
  133. mistralai_azure/_version.py +12 -0
  134. mistralai_azure/basesdk.py +91 -6
  135. mistralai_azure/chat.py +308 -166
  136. mistralai_azure/models/__init__.py +164 -16
  137. mistralai_azure/models/assistantmessage.py +29 -11
  138. mistralai_azure/models/chatcompletionchoice.py +15 -6
  139. mistralai_azure/models/chatcompletionrequest.py +94 -22
  140. mistralai_azure/models/chatcompletionresponse.py +8 -4
  141. mistralai_azure/models/chatcompletionstreamrequest.py +96 -22
  142. mistralai_azure/models/completionchunk.py +12 -5
  143. mistralai_azure/models/completionevent.py +2 -3
  144. mistralai_azure/models/completionresponsestreamchoice.py +19 -8
  145. mistralai_azure/models/contentchunk.py +4 -11
  146. mistralai_azure/models/deltamessage.py +30 -12
  147. mistralai_azure/models/function.py +5 -4
  148. mistralai_azure/models/functioncall.py +4 -3
  149. mistralai_azure/models/functionname.py +17 -0
  150. mistralai_azure/models/httpvalidationerror.py +1 -3
  151. mistralai_azure/models/responseformat.py +7 -8
  152. mistralai_azure/models/responseformats.py +8 -0
  153. mistralai_azure/models/security.py +13 -5
  154. mistralai_azure/models/systemmessage.py +10 -9
  155. mistralai_azure/models/textchunk.py +14 -5
  156. mistralai_azure/models/tool.py +10 -9
  157. mistralai_azure/models/toolcall.py +10 -8
  158. mistralai_azure/models/toolchoice.py +29 -0
  159. mistralai_azure/models/toolchoiceenum.py +7 -0
  160. mistralai_azure/models/toolmessage.py +20 -7
  161. mistralai_azure/models/tooltypes.py +8 -0
  162. mistralai_azure/models/usageinfo.py +4 -3
  163. mistralai_azure/models/usermessage.py +42 -10
  164. mistralai_azure/models/validationerror.py +5 -3
  165. mistralai_azure/sdkconfiguration.py +9 -11
  166. mistralai_azure/utils/__init__.py +16 -3
  167. mistralai_azure/utils/annotations.py +13 -2
  168. mistralai_azure/utils/forms.py +10 -9
  169. mistralai_azure/utils/headers.py +8 -8
  170. mistralai_azure/utils/logger.py +6 -0
  171. mistralai_azure/utils/queryparams.py +16 -14
  172. mistralai_azure/utils/retries.py +2 -1
  173. mistralai_azure/utils/security.py +12 -6
  174. mistralai_azure/utils/serializers.py +42 -8
  175. mistralai_azure/utils/url.py +13 -8
  176. mistralai_azure/utils/values.py +6 -0
  177. mistralai_gcp/__init__.py +4 -0
  178. mistralai_gcp/_hooks/sdkhooks.py +23 -4
  179. mistralai_gcp/_hooks/types.py +27 -9
  180. mistralai_gcp/_version.py +12 -0
  181. mistralai_gcp/basesdk.py +91 -6
  182. mistralai_gcp/chat.py +308 -166
  183. mistralai_gcp/fim.py +198 -132
  184. mistralai_gcp/models/__init__.py +186 -18
  185. mistralai_gcp/models/assistantmessage.py +29 -11
  186. mistralai_gcp/models/chatcompletionchoice.py +15 -6
  187. mistralai_gcp/models/chatcompletionrequest.py +91 -22
  188. mistralai_gcp/models/chatcompletionresponse.py +8 -4
  189. mistralai_gcp/models/chatcompletionstreamrequest.py +93 -22
  190. mistralai_gcp/models/completionchunk.py +12 -5
  191. mistralai_gcp/models/completionevent.py +2 -3
  192. mistralai_gcp/models/completionresponsestreamchoice.py +19 -8
  193. mistralai_gcp/models/contentchunk.py +4 -11
  194. mistralai_gcp/models/deltamessage.py +30 -12
  195. mistralai_gcp/models/fimcompletionrequest.py +51 -17
  196. mistralai_gcp/models/fimcompletionresponse.py +8 -4
  197. mistralai_gcp/models/fimcompletionstreamrequest.py +51 -17
  198. mistralai_gcp/models/function.py +5 -4
  199. mistralai_gcp/models/functioncall.py +4 -3
  200. mistralai_gcp/models/functionname.py +17 -0
  201. mistralai_gcp/models/httpvalidationerror.py +1 -3
  202. mistralai_gcp/models/responseformat.py +7 -8
  203. mistralai_gcp/models/responseformats.py +8 -0
  204. mistralai_gcp/models/security.py +13 -5
  205. mistralai_gcp/models/systemmessage.py +10 -9
  206. mistralai_gcp/models/textchunk.py +14 -5
  207. mistralai_gcp/models/tool.py +10 -9
  208. mistralai_gcp/models/toolcall.py +10 -8
  209. mistralai_gcp/models/toolchoice.py +29 -0
  210. mistralai_gcp/models/toolchoiceenum.py +7 -0
  211. mistralai_gcp/models/toolmessage.py +20 -7
  212. mistralai_gcp/models/tooltypes.py +8 -0
  213. mistralai_gcp/models/usageinfo.py +4 -3
  214. mistralai_gcp/models/usermessage.py +42 -10
  215. mistralai_gcp/models/validationerror.py +5 -3
  216. mistralai_gcp/sdk.py +6 -7
  217. mistralai_gcp/sdkconfiguration.py +9 -11
  218. mistralai_gcp/utils/__init__.py +16 -3
  219. mistralai_gcp/utils/annotations.py +13 -2
  220. mistralai_gcp/utils/forms.py +10 -9
  221. mistralai_gcp/utils/headers.py +8 -8
  222. mistralai_gcp/utils/logger.py +6 -0
  223. mistralai_gcp/utils/queryparams.py +16 -14
  224. mistralai_gcp/utils/retries.py +2 -1
  225. mistralai_gcp/utils/security.py +12 -6
  226. mistralai_gcp/utils/serializers.py +42 -8
  227. mistralai_gcp/utils/url.py +13 -8
  228. mistralai_gcp/utils/values.py +6 -0
  229. mistralai-1.0.3.dist-info/RECORD +0 -236
  230. {mistralai-1.0.3.dist-info → mistralai-1.2.0.dist-info}/LICENSE +0 -0
@@ -3,8 +3,8 @@
3
3
  from __future__ import annotations
4
4
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
5
  from pydantic import model_serializer
6
- from typing import List, Optional, TypedDict, Union
7
- from typing_extensions import NotRequired
6
+ from typing import List, Optional, Union
7
+ from typing_extensions import NotRequired, TypedDict
8
8
 
9
9
 
10
10
  FIMCompletionRequestStopTypedDict = Union[str, List[str]]
@@ -23,14 +23,12 @@ class FIMCompletionRequestTypedDict(TypedDict):
23
23
  """
24
24
  prompt: str
25
25
  r"""The text/code to complete."""
26
- temperature: NotRequired[float]
27
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
+ temperature: NotRequired[Nullable[float]]
27
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
28
28
  top_p: NotRequired[float]
29
29
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
30
30
  max_tokens: NotRequired[Nullable[int]]
31
31
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
32
- min_tokens: NotRequired[Nullable[int]]
33
- r"""The minimum number of tokens to generate in the completion."""
34
32
  stream: NotRequired[bool]
35
33
  r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
36
34
  stop: NotRequired[FIMCompletionRequestStopTypedDict]
@@ -39,7 +37,9 @@ class FIMCompletionRequestTypedDict(TypedDict):
39
37
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
40
38
  suffix: NotRequired[Nullable[str]]
41
39
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
42
-
40
+ min_tokens: NotRequired[Nullable[int]]
41
+ r"""The minimum number of tokens to generate in the completion."""
42
+
43
43
 
44
44
  class FIMCompletionRequest(BaseModel):
45
45
  model: Nullable[str]
@@ -47,29 +47,54 @@ class FIMCompletionRequest(BaseModel):
47
47
  - `codestral-2405`
48
48
  - `codestral-latest`
49
49
  """
50
+
50
51
  prompt: str
51
52
  r"""The text/code to complete."""
52
- temperature: Optional[float] = 0.7
53
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
53
+
54
+ temperature: OptionalNullable[float] = UNSET
55
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
56
+
54
57
  top_p: Optional[float] = 1
55
58
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
59
+
56
60
  max_tokens: OptionalNullable[int] = UNSET
57
61
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
58
- min_tokens: OptionalNullable[int] = UNSET
59
- r"""The minimum number of tokens to generate in the completion."""
62
+
60
63
  stream: Optional[bool] = False
61
64
  r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
65
+
62
66
  stop: Optional[FIMCompletionRequestStop] = None
63
67
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
68
+
64
69
  random_seed: OptionalNullable[int] = UNSET
65
70
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
71
+
66
72
  suffix: OptionalNullable[str] = UNSET
67
73
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
68
-
74
+
75
+ min_tokens: OptionalNullable[int] = UNSET
76
+ r"""The minimum number of tokens to generate in the completion."""
77
+
69
78
  @model_serializer(mode="wrap")
70
79
  def serialize_model(self, handler):
71
- optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
72
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
80
+ optional_fields = [
81
+ "temperature",
82
+ "top_p",
83
+ "max_tokens",
84
+ "stream",
85
+ "stop",
86
+ "random_seed",
87
+ "suffix",
88
+ "min_tokens",
89
+ ]
90
+ nullable_fields = [
91
+ "model",
92
+ "temperature",
93
+ "max_tokens",
94
+ "random_seed",
95
+ "suffix",
96
+ "min_tokens",
97
+ ]
73
98
  null_default_fields = []
74
99
 
75
100
  serialized = handler(self)
@@ -79,9 +104,13 @@ class FIMCompletionRequest(BaseModel):
79
104
  for n, f in self.model_fields.items():
80
105
  k = f.alias or n
81
106
  val = serialized.get(k)
107
+ serialized.pop(k, None)
82
108
 
83
109
  optional_nullable = k in optional_fields and k in nullable_fields
84
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
110
+ is_set = (
111
+ self.__pydantic_fields_set__.intersection({n})
112
+ or k in null_default_fields
113
+ ) # pylint: disable=no-member
85
114
 
86
115
  if val is not None and val != UNSET_SENTINEL:
87
116
  m[k] = val
@@ -91,4 +120,3 @@ class FIMCompletionRequest(BaseModel):
91
120
  m[k] = val
92
121
 
93
122
  return m
94
-
@@ -4,8 +4,8 @@ from __future__ import annotations
4
4
  from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
5
5
  from .usageinfo import UsageInfo, UsageInfoTypedDict
6
6
  from mistralai.types import BaseModel
7
- from typing import List, Optional, TypedDict
8
- from typing_extensions import NotRequired
7
+ from typing import List, Optional
8
+ from typing_extensions import NotRequired, TypedDict
9
9
 
10
10
 
11
11
  class FIMCompletionResponseTypedDict(TypedDict):
@@ -15,13 +15,17 @@ class FIMCompletionResponseTypedDict(TypedDict):
15
15
  usage: UsageInfoTypedDict
16
16
  created: NotRequired[int]
17
17
  choices: NotRequired[List[ChatCompletionChoiceTypedDict]]
18
-
18
+
19
19
 
20
20
  class FIMCompletionResponse(BaseModel):
21
21
  id: str
22
+
22
23
  object: str
24
+
23
25
  model: str
26
+
24
27
  usage: UsageInfo
28
+
25
29
  created: Optional[int] = None
30
+
26
31
  choices: Optional[List[ChatCompletionChoice]] = None
27
-
@@ -3,8 +3,8 @@
3
3
  from __future__ import annotations
4
4
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
5
  from pydantic import model_serializer
6
- from typing import List, Optional, TypedDict, Union
7
- from typing_extensions import NotRequired
6
+ from typing import List, Optional, Union
7
+ from typing_extensions import NotRequired, TypedDict
8
8
 
9
9
 
10
10
  FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]]
@@ -23,14 +23,12 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
23
23
  """
24
24
  prompt: str
25
25
  r"""The text/code to complete."""
26
- temperature: NotRequired[float]
27
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
+ temperature: NotRequired[Nullable[float]]
27
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
28
28
  top_p: NotRequired[float]
29
29
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
30
30
  max_tokens: NotRequired[Nullable[int]]
31
31
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
32
- min_tokens: NotRequired[Nullable[int]]
33
- r"""The minimum number of tokens to generate in the completion."""
34
32
  stream: NotRequired[bool]
35
33
  stop: NotRequired[FIMCompletionStreamRequestStopTypedDict]
36
34
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
@@ -38,7 +36,9 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
38
36
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
39
37
  suffix: NotRequired[Nullable[str]]
40
38
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
41
-
39
+ min_tokens: NotRequired[Nullable[int]]
40
+ r"""The minimum number of tokens to generate in the completion."""
41
+
42
42
 
43
43
  class FIMCompletionStreamRequest(BaseModel):
44
44
  model: Nullable[str]
@@ -46,28 +46,53 @@ class FIMCompletionStreamRequest(BaseModel):
46
46
  - `codestral-2405`
47
47
  - `codestral-latest`
48
48
  """
49
+
49
50
  prompt: str
50
51
  r"""The text/code to complete."""
51
- temperature: Optional[float] = 0.7
52
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
52
+
53
+ temperature: OptionalNullable[float] = UNSET
54
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
55
+
53
56
  top_p: Optional[float] = 1
54
57
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
58
+
55
59
  max_tokens: OptionalNullable[int] = UNSET
56
60
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
57
- min_tokens: OptionalNullable[int] = UNSET
58
- r"""The minimum number of tokens to generate in the completion."""
61
+
59
62
  stream: Optional[bool] = True
63
+
60
64
  stop: Optional[FIMCompletionStreamRequestStop] = None
61
65
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
66
+
62
67
  random_seed: OptionalNullable[int] = UNSET
63
68
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
69
+
64
70
  suffix: OptionalNullable[str] = UNSET
65
71
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
66
-
72
+
73
+ min_tokens: OptionalNullable[int] = UNSET
74
+ r"""The minimum number of tokens to generate in the completion."""
75
+
67
76
  @model_serializer(mode="wrap")
68
77
  def serialize_model(self, handler):
69
- optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
70
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
78
+ optional_fields = [
79
+ "temperature",
80
+ "top_p",
81
+ "max_tokens",
82
+ "stream",
83
+ "stop",
84
+ "random_seed",
85
+ "suffix",
86
+ "min_tokens",
87
+ ]
88
+ nullable_fields = [
89
+ "model",
90
+ "temperature",
91
+ "max_tokens",
92
+ "random_seed",
93
+ "suffix",
94
+ "min_tokens",
95
+ ]
71
96
  null_default_fields = []
72
97
 
73
98
  serialized = handler(self)
@@ -77,9 +102,13 @@ class FIMCompletionStreamRequest(BaseModel):
77
102
  for n, f in self.model_fields.items():
78
103
  k = f.alias or n
79
104
  val = serialized.get(k)
105
+ serialized.pop(k, None)
80
106
 
81
107
  optional_nullable = k in optional_fields and k in nullable_fields
82
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
108
+ is_set = (
109
+ self.__pydantic_fields_set__.intersection({n})
110
+ or k in null_default_fields
111
+ ) # pylint: disable=no-member
83
112
 
84
113
  if val is not None and val != UNSET_SENTINEL:
85
114
  m[k] = val
@@ -89,4 +118,3 @@ class FIMCompletionStreamRequest(BaseModel):
89
118
  m[k] = val
90
119
 
91
120
  return m
92
-
@@ -4,5 +4,11 @@ from __future__ import annotations
4
4
  from typing import Literal
5
5
 
6
6
 
7
- FineTuneableModel = Literal["open-mistral-7b", "mistral-small-latest", "codestral-latest", "mistral-large-latest", "open-mistral-nemo"]
7
+ FineTuneableModel = Literal[
8
+ "open-mistral-7b",
9
+ "mistral-small-latest",
10
+ "codestral-latest",
11
+ "mistral-large-latest",
12
+ "open-mistral-nemo",
13
+ ]
8
14
  r"""The name of the model to fine-tune."""
@@ -2,8 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from mistralai.types import BaseModel
5
- from typing import Optional, TypedDict
6
- from typing_extensions import NotRequired
5
+ from typing import Optional
6
+ from typing_extensions import NotRequired, TypedDict
7
7
 
8
8
 
9
9
  class FTModelCapabilitiesOutTypedDict(TypedDict):
@@ -11,11 +11,13 @@ class FTModelCapabilitiesOutTypedDict(TypedDict):
11
11
  completion_fim: NotRequired[bool]
12
12
  function_calling: NotRequired[bool]
13
13
  fine_tuning: NotRequired[bool]
14
-
14
+
15
15
 
16
16
  class FTModelCapabilitiesOut(BaseModel):
17
17
  completion_chat: Optional[bool] = True
18
+
18
19
  completion_fim: Optional[bool] = False
20
+
19
21
  function_calling: Optional[bool] = False
22
+
20
23
  fine_tuning: Optional[bool] = False
21
-
@@ -0,0 +1,121 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
5
+ from datetime import datetime
6
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
7
+ from mistralai.utils import validate_const
8
+ import pydantic
9
+ from pydantic import model_serializer
10
+ from pydantic.functional_validators import AfterValidator
11
+ from typing import List, Literal, Optional
12
+ from typing_extensions import Annotated, NotRequired, TypedDict
13
+
14
+
15
+ FTModelCardType = Literal["fine-tuned"]
16
+
17
+
18
+ class FTModelCardTypedDict(TypedDict):
19
+ r"""Extra fields for fine-tuned models."""
20
+
21
+ id: str
22
+ capabilities: ModelCapabilitiesTypedDict
23
+ job: str
24
+ root: str
25
+ object: NotRequired[str]
26
+ created: NotRequired[int]
27
+ owned_by: NotRequired[str]
28
+ name: NotRequired[Nullable[str]]
29
+ description: NotRequired[Nullable[str]]
30
+ max_context_length: NotRequired[int]
31
+ aliases: NotRequired[List[str]]
32
+ deprecation: NotRequired[Nullable[datetime]]
33
+ default_model_temperature: NotRequired[Nullable[float]]
34
+ type: FTModelCardType
35
+ archived: NotRequired[bool]
36
+
37
+
38
+ class FTModelCard(BaseModel):
39
+ r"""Extra fields for fine-tuned models."""
40
+
41
+ id: str
42
+
43
+ capabilities: ModelCapabilities
44
+
45
+ job: str
46
+
47
+ root: str
48
+
49
+ object: Optional[str] = "model"
50
+
51
+ created: Optional[int] = None
52
+
53
+ owned_by: Optional[str] = "mistralai"
54
+
55
+ name: OptionalNullable[str] = UNSET
56
+
57
+ description: OptionalNullable[str] = UNSET
58
+
59
+ max_context_length: Optional[int] = 32768
60
+
61
+ aliases: Optional[List[str]] = None
62
+
63
+ deprecation: OptionalNullable[datetime] = UNSET
64
+
65
+ default_model_temperature: OptionalNullable[float] = UNSET
66
+
67
+ TYPE: Annotated[
68
+ Annotated[
69
+ Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned"))
70
+ ],
71
+ pydantic.Field(alias="type"),
72
+ ] = "fine-tuned"
73
+
74
+ archived: Optional[bool] = False
75
+
76
+ @model_serializer(mode="wrap")
77
+ def serialize_model(self, handler):
78
+ optional_fields = [
79
+ "object",
80
+ "created",
81
+ "owned_by",
82
+ "name",
83
+ "description",
84
+ "max_context_length",
85
+ "aliases",
86
+ "deprecation",
87
+ "default_model_temperature",
88
+ "type",
89
+ "archived",
90
+ ]
91
+ nullable_fields = [
92
+ "name",
93
+ "description",
94
+ "deprecation",
95
+ "default_model_temperature",
96
+ ]
97
+ null_default_fields = []
98
+
99
+ serialized = handler(self)
100
+
101
+ m = {}
102
+
103
+ for n, f in self.model_fields.items():
104
+ k = f.alias or n
105
+ val = serialized.get(k)
106
+ serialized.pop(k, None)
107
+
108
+ optional_nullable = k in optional_fields and k in nullable_fields
109
+ is_set = (
110
+ self.__pydantic_fields_set__.intersection({n})
111
+ or k in null_default_fields
112
+ ) # pylint: disable=no-member
113
+
114
+ if val is not None and val != UNSET_SENTINEL:
115
+ m[k] = val
116
+ elif val != UNSET_SENTINEL and (
117
+ not k in optional_fields or (optional_nullable and is_set)
118
+ ):
119
+ m[k] = val
120
+
121
+ return m
@@ -1,16 +1,22 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict
4
+ from .ftmodelcapabilitiesout import (
5
+ FTModelCapabilitiesOut,
6
+ FTModelCapabilitiesOutTypedDict,
7
+ )
5
8
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
9
+ from mistralai.utils import validate_const
6
10
  import pydantic
7
11
  from pydantic import model_serializer
8
- from typing import Final, List, Literal, Optional, TypedDict
9
- from typing_extensions import Annotated, NotRequired
12
+ from pydantic.functional_validators import AfterValidator
13
+ from typing import List, Literal, Optional
14
+ from typing_extensions import Annotated, NotRequired, TypedDict
10
15
 
11
16
 
12
17
  FTModelOutObject = Literal["model"]
13
18
 
19
+
14
20
  class FTModelOutTypedDict(TypedDict):
15
21
  id: str
16
22
  created: int
@@ -19,29 +25,50 @@ class FTModelOutTypedDict(TypedDict):
19
25
  archived: bool
20
26
  capabilities: FTModelCapabilitiesOutTypedDict
21
27
  job: str
28
+ object: FTModelOutObject
22
29
  name: NotRequired[Nullable[str]]
23
30
  description: NotRequired[Nullable[str]]
24
31
  max_context_length: NotRequired[int]
25
32
  aliases: NotRequired[List[str]]
26
-
33
+
27
34
 
28
35
  class FTModelOut(BaseModel):
29
36
  id: str
37
+
30
38
  created: int
39
+
31
40
  owned_by: str
41
+
32
42
  root: str
43
+
33
44
  archived: bool
45
+
34
46
  capabilities: FTModelCapabilitiesOut
47
+
35
48
  job: str
36
- OBJECT: Annotated[Final[Optional[FTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore
49
+
50
+ OBJECT: Annotated[
51
+ Annotated[Optional[FTModelOutObject], AfterValidator(validate_const("model"))],
52
+ pydantic.Field(alias="object"),
53
+ ] = "model"
54
+
37
55
  name: OptionalNullable[str] = UNSET
56
+
38
57
  description: OptionalNullable[str] = UNSET
58
+
39
59
  max_context_length: Optional[int] = 32768
60
+
40
61
  aliases: Optional[List[str]] = None
41
-
62
+
42
63
  @model_serializer(mode="wrap")
43
64
  def serialize_model(self, handler):
44
- optional_fields = ["object", "name", "description", "max_context_length", "aliases"]
65
+ optional_fields = [
66
+ "object",
67
+ "name",
68
+ "description",
69
+ "max_context_length",
70
+ "aliases",
71
+ ]
45
72
  nullable_fields = ["name", "description"]
46
73
  null_default_fields = []
47
74
 
@@ -52,9 +79,13 @@ class FTModelOut(BaseModel):
52
79
  for n, f in self.model_fields.items():
53
80
  k = f.alias or n
54
81
  val = serialized.get(k)
82
+ serialized.pop(k, None)
55
83
 
56
84
  optional_nullable = k in optional_fields and k in nullable_fields
57
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
85
+ is_set = (
86
+ self.__pydantic_fields_set__.intersection({n})
87
+ or k in null_default_fields
88
+ ) # pylint: disable=no-member
58
89
 
59
90
  if val is not None and val != UNSET_SENTINEL:
60
91
  m[k] = val
@@ -64,4 +95,3 @@ class FTModelOut(BaseModel):
64
95
  m[k] = val
65
96
 
66
97
  return m
67
-
@@ -2,18 +2,19 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from mistralai.types import BaseModel
5
- from typing import Any, Dict, Optional, TypedDict
6
- from typing_extensions import NotRequired
5
+ from typing import Any, Dict, Optional
6
+ from typing_extensions import NotRequired, TypedDict
7
7
 
8
8
 
9
9
  class FunctionTypedDict(TypedDict):
10
10
  name: str
11
11
  parameters: Dict[str, Any]
12
12
  description: NotRequired[str]
13
-
13
+
14
14
 
15
15
  class Function(BaseModel):
16
16
  name: str
17
+
17
18
  parameters: Dict[str, Any]
19
+
18
20
  description: Optional[str] = ""
19
-
@@ -2,7 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from mistralai.types import BaseModel
5
- from typing import Any, Dict, TypedDict, Union
5
+ from typing import Any, Dict, Union
6
+ from typing_extensions import TypedDict
6
7
 
7
8
 
8
9
  ArgumentsTypedDict = Union[Dict[str, Any], str]
@@ -14,9 +15,9 @@ Arguments = Union[Dict[str, Any], str]
14
15
  class FunctionCallTypedDict(TypedDict):
15
16
  name: str
16
17
  arguments: ArgumentsTypedDict
17
-
18
+
18
19
 
19
20
  class FunctionCall(BaseModel):
20
21
  name: str
22
+
21
23
  arguments: Arguments
22
-
@@ -0,0 +1,17 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from typing_extensions import TypedDict
6
+
7
+
8
+ class FunctionNameTypedDict(TypedDict):
9
+ r"""this restriction of `Function` is used to select a specific function to call"""
10
+
11
+ name: str
12
+
13
+
14
+ class FunctionName(BaseModel):
15
+ r"""this restriction of `Function` is used to select a specific function to call"""
16
+
17
+ name: str
@@ -2,30 +2,44 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
+ from mistralai.utils import validate_const
5
6
  import pydantic
6
7
  from pydantic import model_serializer
7
- from typing import Final, Literal, Optional, TypedDict
8
- from typing_extensions import Annotated, NotRequired
8
+ from pydantic.functional_validators import AfterValidator
9
+ from typing import Literal, Optional
10
+ from typing_extensions import Annotated, NotRequired, TypedDict
9
11
 
10
12
 
11
13
  GithubRepositoryInType = Literal["github"]
12
14
 
15
+
13
16
  class GithubRepositoryInTypedDict(TypedDict):
14
17
  name: str
15
18
  owner: str
16
19
  token: str
20
+ type: GithubRepositoryInType
17
21
  ref: NotRequired[Nullable[str]]
18
22
  weight: NotRequired[float]
19
-
23
+
20
24
 
21
25
  class GithubRepositoryIn(BaseModel):
22
26
  name: str
27
+
23
28
  owner: str
29
+
24
30
  token: str
25
- TYPE: Annotated[Final[Optional[GithubRepositoryInType]], pydantic.Field(alias="type")] = "github" # type: ignore
31
+
32
+ TYPE: Annotated[
33
+ Annotated[
34
+ Optional[GithubRepositoryInType], AfterValidator(validate_const("github"))
35
+ ],
36
+ pydantic.Field(alias="type"),
37
+ ] = "github"
38
+
26
39
  ref: OptionalNullable[str] = UNSET
40
+
27
41
  weight: Optional[float] = 1
28
-
42
+
29
43
  @model_serializer(mode="wrap")
30
44
  def serialize_model(self, handler):
31
45
  optional_fields = ["type", "ref", "weight"]
@@ -39,9 +53,13 @@ class GithubRepositoryIn(BaseModel):
39
53
  for n, f in self.model_fields.items():
40
54
  k = f.alias or n
41
55
  val = serialized.get(k)
56
+ serialized.pop(k, None)
42
57
 
43
58
  optional_nullable = k in optional_fields and k in nullable_fields
44
- is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
59
+ is_set = (
60
+ self.__pydantic_fields_set__.intersection({n})
61
+ or k in null_default_fields
62
+ ) # pylint: disable=no-member
45
63
 
46
64
  if val is not None and val != UNSET_SENTINEL:
47
65
  m[k] = val
@@ -51,4 +69,3 @@ class GithubRepositoryIn(BaseModel):
51
69
  m[k] = val
52
70
 
53
71
  return m
54
-