mistralai 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (186) hide show
  1. mistralai/__init__.py +4 -0
  2. mistralai/_version.py +12 -0
  3. mistralai/agents.py +56 -22
  4. mistralai/batch.py +17 -0
  5. mistralai/chat.py +64 -30
  6. mistralai/classifiers.py +396 -0
  7. mistralai/embeddings.py +10 -6
  8. mistralai/files.py +252 -19
  9. mistralai/fim.py +40 -30
  10. mistralai/jobs.py +40 -20
  11. mistralai/mistral_jobs.py +733 -0
  12. mistralai/models/__init__.py +108 -18
  13. mistralai/models/agentscompletionrequest.py +27 -10
  14. mistralai/models/agentscompletionstreamrequest.py +27 -10
  15. mistralai/models/apiendpoint.py +9 -0
  16. mistralai/models/archiveftmodelout.py +11 -5
  17. mistralai/models/assistantmessage.py +11 -6
  18. mistralai/models/basemodelcard.py +22 -6
  19. mistralai/models/batcherror.py +17 -0
  20. mistralai/models/batchjobin.py +58 -0
  21. mistralai/models/batchjobout.py +117 -0
  22. mistralai/models/batchjobsout.py +30 -0
  23. mistralai/models/batchjobstatus.py +15 -0
  24. mistralai/models/chatclassificationrequest.py +104 -0
  25. mistralai/models/chatcompletionchoice.py +9 -4
  26. mistralai/models/chatcompletionrequest.py +32 -13
  27. mistralai/models/chatcompletionresponse.py +2 -2
  28. mistralai/models/chatcompletionstreamrequest.py +32 -13
  29. mistralai/models/checkpointout.py +1 -1
  30. mistralai/models/classificationobject.py +21 -0
  31. mistralai/models/classificationrequest.py +59 -0
  32. mistralai/models/classificationresponse.py +21 -0
  33. mistralai/models/completionchunk.py +2 -2
  34. mistralai/models/completionevent.py +1 -1
  35. mistralai/models/completionresponsestreamchoice.py +11 -5
  36. mistralai/models/delete_model_v1_models_model_id_deleteop.py +1 -2
  37. mistralai/models/deletefileout.py +1 -1
  38. mistralai/models/deletemodelout.py +2 -2
  39. mistralai/models/deltamessage.py +14 -7
  40. mistralai/models/detailedjobout.py +11 -5
  41. mistralai/models/embeddingrequest.py +5 -5
  42. mistralai/models/embeddingresponse.py +2 -1
  43. mistralai/models/embeddingresponsedata.py +2 -2
  44. mistralai/models/eventout.py +2 -2
  45. mistralai/models/filepurpose.py +8 -0
  46. mistralai/models/files_api_routes_delete_fileop.py +1 -2
  47. mistralai/models/files_api_routes_download_fileop.py +16 -0
  48. mistralai/models/files_api_routes_list_filesop.py +96 -0
  49. mistralai/models/files_api_routes_retrieve_fileop.py +1 -2
  50. mistralai/models/files_api_routes_upload_fileop.py +9 -9
  51. mistralai/models/fileschema.py +7 -21
  52. mistralai/models/fimcompletionrequest.py +20 -13
  53. mistralai/models/fimcompletionresponse.py +2 -2
  54. mistralai/models/fimcompletionstreamrequest.py +20 -13
  55. mistralai/models/ftmodelcapabilitiesout.py +2 -2
  56. mistralai/models/ftmodelcard.py +24 -6
  57. mistralai/models/ftmodelout.py +9 -5
  58. mistralai/models/function.py +2 -2
  59. mistralai/models/functioncall.py +2 -1
  60. mistralai/models/functionname.py +1 -1
  61. mistralai/models/githubrepositoryin.py +11 -5
  62. mistralai/models/githubrepositoryout.py +11 -5
  63. mistralai/models/httpvalidationerror.py +0 -2
  64. mistralai/models/imageurl.py +1 -2
  65. mistralai/models/imageurlchunk.py +11 -5
  66. mistralai/models/jobin.py +2 -2
  67. mistralai/models/jobmetadataout.py +1 -2
  68. mistralai/models/jobout.py +10 -5
  69. mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +16 -0
  70. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +16 -0
  71. mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +95 -0
  72. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +1 -2
  73. mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +1 -2
  74. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +1 -2
  75. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +2 -2
  76. mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +1 -2
  77. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +1 -2
  78. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +1 -2
  79. mistralai/models/jobsout.py +9 -5
  80. mistralai/models/legacyjobmetadataout.py +12 -5
  81. mistralai/models/listfilesout.py +5 -1
  82. mistralai/models/metricout.py +1 -2
  83. mistralai/models/modelcapabilities.py +2 -2
  84. mistralai/models/modellist.py +2 -2
  85. mistralai/models/responseformat.py +2 -2
  86. mistralai/models/retrieve_model_v1_models_model_id_getop.py +2 -2
  87. mistralai/models/retrievefileout.py +10 -21
  88. mistralai/models/sampletype.py +6 -2
  89. mistralai/models/security.py +2 -2
  90. mistralai/models/source.py +3 -2
  91. mistralai/models/systemmessage.py +6 -6
  92. mistralai/models/textchunk.py +9 -5
  93. mistralai/models/tool.py +2 -2
  94. mistralai/models/toolcall.py +2 -2
  95. mistralai/models/toolchoice.py +2 -2
  96. mistralai/models/toolmessage.py +2 -2
  97. mistralai/models/trainingfile.py +2 -2
  98. mistralai/models/trainingparameters.py +7 -2
  99. mistralai/models/trainingparametersin.py +7 -2
  100. mistralai/models/unarchiveftmodelout.py +11 -5
  101. mistralai/models/updateftmodelin.py +1 -2
  102. mistralai/models/uploadfileout.py +7 -21
  103. mistralai/models/usageinfo.py +1 -1
  104. mistralai/models/usermessage.py +36 -5
  105. mistralai/models/validationerror.py +2 -1
  106. mistralai/models/wandbintegration.py +11 -5
  107. mistralai/models/wandbintegrationout.py +12 -6
  108. mistralai/models_.py +48 -24
  109. mistralai/sdk.py +7 -0
  110. mistralai/sdkconfiguration.py +7 -7
  111. mistralai/utils/__init__.py +8 -0
  112. mistralai/utils/annotations.py +13 -2
  113. mistralai/utils/serializers.py +25 -0
  114. {mistralai-1.1.0.dist-info → mistralai-1.2.0.dist-info}/METADATA +90 -14
  115. mistralai-1.2.0.dist-info/RECORD +276 -0
  116. {mistralai-1.1.0.dist-info → mistralai-1.2.0.dist-info}/WHEEL +1 -1
  117. mistralai_azure/__init__.py +4 -0
  118. mistralai_azure/_version.py +12 -0
  119. mistralai_azure/chat.py +64 -30
  120. mistralai_azure/models/__init__.py +9 -3
  121. mistralai_azure/models/assistantmessage.py +11 -6
  122. mistralai_azure/models/chatcompletionchoice.py +10 -5
  123. mistralai_azure/models/chatcompletionrequest.py +32 -13
  124. mistralai_azure/models/chatcompletionresponse.py +2 -2
  125. mistralai_azure/models/chatcompletionstreamrequest.py +32 -13
  126. mistralai_azure/models/completionchunk.py +2 -2
  127. mistralai_azure/models/completionevent.py +1 -1
  128. mistralai_azure/models/completionresponsestreamchoice.py +9 -4
  129. mistralai_azure/models/deltamessage.py +14 -7
  130. mistralai_azure/models/function.py +2 -2
  131. mistralai_azure/models/functioncall.py +2 -1
  132. mistralai_azure/models/functionname.py +1 -1
  133. mistralai_azure/models/httpvalidationerror.py +0 -2
  134. mistralai_azure/models/responseformat.py +2 -2
  135. mistralai_azure/models/security.py +1 -2
  136. mistralai_azure/models/systemmessage.py +6 -6
  137. mistralai_azure/models/textchunk.py +9 -5
  138. mistralai_azure/models/tool.py +2 -2
  139. mistralai_azure/models/toolcall.py +2 -2
  140. mistralai_azure/models/toolchoice.py +2 -2
  141. mistralai_azure/models/toolmessage.py +2 -2
  142. mistralai_azure/models/usageinfo.py +1 -1
  143. mistralai_azure/models/usermessage.py +36 -5
  144. mistralai_azure/models/validationerror.py +2 -1
  145. mistralai_azure/sdkconfiguration.py +7 -7
  146. mistralai_azure/utils/__init__.py +8 -0
  147. mistralai_azure/utils/annotations.py +13 -2
  148. mistralai_azure/utils/serializers.py +25 -0
  149. mistralai_gcp/__init__.py +4 -0
  150. mistralai_gcp/_version.py +12 -0
  151. mistralai_gcp/chat.py +64 -30
  152. mistralai_gcp/fim.py +40 -30
  153. mistralai_gcp/models/__init__.py +9 -3
  154. mistralai_gcp/models/assistantmessage.py +11 -6
  155. mistralai_gcp/models/chatcompletionchoice.py +10 -5
  156. mistralai_gcp/models/chatcompletionrequest.py +32 -13
  157. mistralai_gcp/models/chatcompletionresponse.py +2 -2
  158. mistralai_gcp/models/chatcompletionstreamrequest.py +32 -13
  159. mistralai_gcp/models/completionchunk.py +2 -2
  160. mistralai_gcp/models/completionevent.py +1 -1
  161. mistralai_gcp/models/completionresponsestreamchoice.py +9 -4
  162. mistralai_gcp/models/deltamessage.py +14 -7
  163. mistralai_gcp/models/fimcompletionrequest.py +20 -13
  164. mistralai_gcp/models/fimcompletionresponse.py +2 -2
  165. mistralai_gcp/models/fimcompletionstreamrequest.py +20 -13
  166. mistralai_gcp/models/function.py +2 -2
  167. mistralai_gcp/models/functioncall.py +2 -1
  168. mistralai_gcp/models/functionname.py +1 -1
  169. mistralai_gcp/models/httpvalidationerror.py +0 -2
  170. mistralai_gcp/models/responseformat.py +2 -2
  171. mistralai_gcp/models/security.py +1 -2
  172. mistralai_gcp/models/systemmessage.py +6 -6
  173. mistralai_gcp/models/textchunk.py +9 -5
  174. mistralai_gcp/models/tool.py +2 -2
  175. mistralai_gcp/models/toolcall.py +2 -2
  176. mistralai_gcp/models/toolchoice.py +2 -2
  177. mistralai_gcp/models/toolmessage.py +2 -2
  178. mistralai_gcp/models/usageinfo.py +1 -1
  179. mistralai_gcp/models/usermessage.py +36 -5
  180. mistralai_gcp/models/validationerror.py +2 -1
  181. mistralai_gcp/sdkconfiguration.py +7 -7
  182. mistralai_gcp/utils/__init__.py +8 -0
  183. mistralai_gcp/utils/annotations.py +13 -2
  184. mistralai_gcp/utils/serializers.py +25 -0
  185. mistralai-1.1.0.dist-info/RECORD +0 -254
  186. {mistralai-1.1.0.dist-info → mistralai-1.2.0.dist-info}/LICENSE +0 -0
@@ -0,0 +1,58 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .apiendpoint import APIEndpoint
5
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
6
+ from pydantic import model_serializer
7
+ from typing import Dict, List, Optional
8
+ from typing_extensions import NotRequired, TypedDict
9
+
10
+
11
+ class BatchJobInTypedDict(TypedDict):
12
+ input_files: List[str]
13
+ endpoint: APIEndpoint
14
+ model: str
15
+ metadata: NotRequired[Nullable[Dict[str, str]]]
16
+ timeout_hours: NotRequired[int]
17
+
18
+
19
+ class BatchJobIn(BaseModel):
20
+ input_files: List[str]
21
+
22
+ endpoint: APIEndpoint
23
+
24
+ model: str
25
+
26
+ metadata: OptionalNullable[Dict[str, str]] = UNSET
27
+
28
+ timeout_hours: Optional[int] = 24
29
+
30
+ @model_serializer(mode="wrap")
31
+ def serialize_model(self, handler):
32
+ optional_fields = ["metadata", "timeout_hours"]
33
+ nullable_fields = ["metadata"]
34
+ null_default_fields = []
35
+
36
+ serialized = handler(self)
37
+
38
+ m = {}
39
+
40
+ for n, f in self.model_fields.items():
41
+ k = f.alias or n
42
+ val = serialized.get(k)
43
+ serialized.pop(k, None)
44
+
45
+ optional_nullable = k in optional_fields and k in nullable_fields
46
+ is_set = (
47
+ self.__pydantic_fields_set__.intersection({n})
48
+ or k in null_default_fields
49
+ ) # pylint: disable=no-member
50
+
51
+ if val is not None and val != UNSET_SENTINEL:
52
+ m[k] = val
53
+ elif val != UNSET_SENTINEL and (
54
+ not k in optional_fields or (optional_nullable and is_set)
55
+ ):
56
+ m[k] = val
57
+
58
+ return m
@@ -0,0 +1,117 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .batcherror import BatchError, BatchErrorTypedDict
5
+ from .batchjobstatus import BatchJobStatus
6
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
7
+ from mistralai.utils import validate_const
8
+ import pydantic
9
+ from pydantic import model_serializer
10
+ from pydantic.functional_validators import AfterValidator
11
+ from typing import Any, Dict, List, Literal, Optional
12
+ from typing_extensions import Annotated, NotRequired, TypedDict
13
+
14
+
15
+ BatchJobOutObject = Literal["batch"]
16
+
17
+
18
+ class BatchJobOutTypedDict(TypedDict):
19
+ id: str
20
+ input_files: List[str]
21
+ endpoint: str
22
+ model: str
23
+ errors: List[BatchErrorTypedDict]
24
+ status: BatchJobStatus
25
+ created_at: int
26
+ total_requests: int
27
+ completed_requests: int
28
+ succeeded_requests: int
29
+ failed_requests: int
30
+ object: BatchJobOutObject
31
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
32
+ output_file: NotRequired[Nullable[str]]
33
+ error_file: NotRequired[Nullable[str]]
34
+ started_at: NotRequired[Nullable[int]]
35
+ completed_at: NotRequired[Nullable[int]]
36
+
37
+
38
+ class BatchJobOut(BaseModel):
39
+ id: str
40
+
41
+ input_files: List[str]
42
+
43
+ endpoint: str
44
+
45
+ model: str
46
+
47
+ errors: List[BatchError]
48
+
49
+ status: BatchJobStatus
50
+
51
+ created_at: int
52
+
53
+ total_requests: int
54
+
55
+ completed_requests: int
56
+
57
+ succeeded_requests: int
58
+
59
+ failed_requests: int
60
+
61
+ OBJECT: Annotated[
62
+ Annotated[Optional[BatchJobOutObject], AfterValidator(validate_const("batch"))],
63
+ pydantic.Field(alias="object"),
64
+ ] = "batch"
65
+
66
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
67
+
68
+ output_file: OptionalNullable[str] = UNSET
69
+
70
+ error_file: OptionalNullable[str] = UNSET
71
+
72
+ started_at: OptionalNullable[int] = UNSET
73
+
74
+ completed_at: OptionalNullable[int] = UNSET
75
+
76
+ @model_serializer(mode="wrap")
77
+ def serialize_model(self, handler):
78
+ optional_fields = [
79
+ "object",
80
+ "metadata",
81
+ "output_file",
82
+ "error_file",
83
+ "started_at",
84
+ "completed_at",
85
+ ]
86
+ nullable_fields = [
87
+ "metadata",
88
+ "output_file",
89
+ "error_file",
90
+ "started_at",
91
+ "completed_at",
92
+ ]
93
+ null_default_fields = []
94
+
95
+ serialized = handler(self)
96
+
97
+ m = {}
98
+
99
+ for n, f in self.model_fields.items():
100
+ k = f.alias or n
101
+ val = serialized.get(k)
102
+ serialized.pop(k, None)
103
+
104
+ optional_nullable = k in optional_fields and k in nullable_fields
105
+ is_set = (
106
+ self.__pydantic_fields_set__.intersection({n})
107
+ or k in null_default_fields
108
+ ) # pylint: disable=no-member
109
+
110
+ if val is not None and val != UNSET_SENTINEL:
111
+ m[k] = val
112
+ elif val != UNSET_SENTINEL and (
113
+ not k in optional_fields or (optional_nullable and is_set)
114
+ ):
115
+ m[k] = val
116
+
117
+ return m
@@ -0,0 +1,30 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .batchjobout import BatchJobOut, BatchJobOutTypedDict
5
+ from mistralai.types import BaseModel
6
+ from mistralai.utils import validate_const
7
+ import pydantic
8
+ from pydantic.functional_validators import AfterValidator
9
+ from typing import List, Literal, Optional
10
+ from typing_extensions import Annotated, NotRequired, TypedDict
11
+
12
+
13
+ BatchJobsOutObject = Literal["list"]
14
+
15
+
16
+ class BatchJobsOutTypedDict(TypedDict):
17
+ total: int
18
+ data: NotRequired[List[BatchJobOutTypedDict]]
19
+ object: BatchJobsOutObject
20
+
21
+
22
+ class BatchJobsOut(BaseModel):
23
+ total: int
24
+
25
+ data: Optional[List[BatchJobOut]] = None
26
+
27
+ OBJECT: Annotated[
28
+ Annotated[Optional[BatchJobsOutObject], AfterValidator(validate_const("list"))],
29
+ pydantic.Field(alias="object"),
30
+ ] = "list"
@@ -0,0 +1,15 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from typing import Literal
5
+
6
+
7
+ BatchJobStatus = Literal[
8
+ "QUEUED",
9
+ "RUNNING",
10
+ "SUCCESS",
11
+ "FAILED",
12
+ "TIMEOUT_EXCEEDED",
13
+ "CANCELLATION_REQUESTED",
14
+ "CANCELLED",
15
+ ]
@@ -0,0 +1,104 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .systemmessage import SystemMessage, SystemMessageTypedDict
6
+ from .toolmessage import ToolMessage, ToolMessageTypedDict
7
+ from .usermessage import UserMessage, UserMessageTypedDict
8
+ from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL
9
+ from mistralai.utils import get_discriminator
10
+ import pydantic
11
+ from pydantic import Discriminator, Tag, model_serializer
12
+ from typing import List, Union
13
+ from typing_extensions import Annotated, TypedDict
14
+
15
+
16
+ TwoTypedDict = Union[
17
+ SystemMessageTypedDict,
18
+ UserMessageTypedDict,
19
+ AssistantMessageTypedDict,
20
+ ToolMessageTypedDict,
21
+ ]
22
+
23
+
24
+ Two = Annotated[
25
+ Union[
26
+ Annotated[AssistantMessage, Tag("assistant")],
27
+ Annotated[SystemMessage, Tag("system")],
28
+ Annotated[ToolMessage, Tag("tool")],
29
+ Annotated[UserMessage, Tag("user")],
30
+ ],
31
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
32
+ ]
33
+
34
+
35
+ OneTypedDict = Union[
36
+ SystemMessageTypedDict,
37
+ UserMessageTypedDict,
38
+ AssistantMessageTypedDict,
39
+ ToolMessageTypedDict,
40
+ ]
41
+
42
+
43
+ One = Annotated[
44
+ Union[
45
+ Annotated[AssistantMessage, Tag("assistant")],
46
+ Annotated[SystemMessage, Tag("system")],
47
+ Annotated[ToolMessage, Tag("tool")],
48
+ Annotated[UserMessage, Tag("user")],
49
+ ],
50
+ Discriminator(lambda m: get_discriminator(m, "role", "role")),
51
+ ]
52
+
53
+
54
+ ChatClassificationRequestInputsTypedDict = Union[
55
+ List[OneTypedDict], List[List[TwoTypedDict]]
56
+ ]
57
+ r"""Chat to classify"""
58
+
59
+
60
+ ChatClassificationRequestInputs = Union[List[One], List[List[Two]]]
61
+ r"""Chat to classify"""
62
+
63
+
64
+ class ChatClassificationRequestTypedDict(TypedDict):
65
+ inputs: ChatClassificationRequestInputsTypedDict
66
+ r"""Chat to classify"""
67
+ model: Nullable[str]
68
+
69
+
70
+ class ChatClassificationRequest(BaseModel):
71
+ inputs: Annotated[ChatClassificationRequestInputs, pydantic.Field(alias="input")]
72
+ r"""Chat to classify"""
73
+
74
+ model: Nullable[str]
75
+
76
+ @model_serializer(mode="wrap")
77
+ def serialize_model(self, handler):
78
+ optional_fields = []
79
+ nullable_fields = ["model"]
80
+ null_default_fields = []
81
+
82
+ serialized = handler(self)
83
+
84
+ m = {}
85
+
86
+ for n, f in self.model_fields.items():
87
+ k = f.alias or n
88
+ val = serialized.get(k)
89
+ serialized.pop(k, None)
90
+
91
+ optional_nullable = k in optional_fields and k in nullable_fields
92
+ is_set = (
93
+ self.__pydantic_fields_set__.intersection({n})
94
+ or k in null_default_fields
95
+ ) # pylint: disable=no-member
96
+
97
+ if val is not None and val != UNSET_SENTINEL:
98
+ m[k] = val
99
+ elif val != UNSET_SENTINEL and (
100
+ not k in optional_fields or (optional_nullable and is_set)
101
+ ):
102
+ m[k] = val
103
+
104
+ return m
@@ -2,11 +2,16 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
- from mistralai.types import BaseModel
6
- from typing import Literal, TypedDict
5
+ from mistralai.types import BaseModel, UnrecognizedStr
6
+ from mistralai.utils import validate_open_enum
7
+ from pydantic.functional_validators import PlainValidator
8
+ from typing import Literal, Union
9
+ from typing_extensions import Annotated, TypedDict
7
10
 
8
11
 
9
- FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"]
12
+ FinishReason = Union[
13
+ Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr
14
+ ]
10
15
 
11
16
 
12
17
  class ChatCompletionChoiceTypedDict(TypedDict):
@@ -20,4 +25,4 @@ class ChatCompletionChoice(BaseModel):
20
25
 
21
26
  message: AssistantMessage
22
27
 
23
- finish_reason: FinishReason
28
+ finish_reason: Annotated[FinishReason, PlainValidator(validate_open_enum(False))]
@@ -12,8 +12,8 @@ from .usermessage import UserMessage, UserMessageTypedDict
12
12
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
13
13
  from mistralai.utils import get_discriminator
14
14
  from pydantic import Discriminator, Tag, model_serializer
15
- from typing import List, Optional, TypedDict, Union
16
- from typing_extensions import Annotated, NotRequired
15
+ from typing import List, Optional, Union
16
+ from typing_extensions import Annotated, NotRequired, TypedDict
17
17
 
18
18
 
19
19
  StopTypedDict = Union[str, List[str]]
@@ -54,14 +54,12 @@ class ChatCompletionRequestTypedDict(TypedDict):
54
54
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
55
55
  messages: List[MessagesTypedDict]
56
56
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
57
- temperature: NotRequired[float]
58
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
57
+ temperature: NotRequired[Nullable[float]]
58
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
59
59
  top_p: NotRequired[float]
60
60
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
61
61
  max_tokens: NotRequired[Nullable[int]]
62
62
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
63
- min_tokens: NotRequired[Nullable[int]]
64
- r"""The minimum number of tokens to generate in the completion."""
65
63
  stream: NotRequired[bool]
66
64
  r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
67
65
  stop: NotRequired[StopTypedDict]
@@ -71,6 +69,12 @@ class ChatCompletionRequestTypedDict(TypedDict):
71
69
  response_format: NotRequired[ResponseFormatTypedDict]
72
70
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
73
71
  tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict]
72
+ presence_penalty: NotRequired[float]
73
+ r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
74
+ frequency_penalty: NotRequired[float]
75
+ r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
76
+ n: NotRequired[Nullable[int]]
77
+ r"""Number of completions to return for each request, input tokens are only billed once."""
74
78
  safe_prompt: NotRequired[bool]
75
79
  r"""Whether to inject a safety prompt before all conversations."""
76
80
 
@@ -82,8 +86,8 @@ class ChatCompletionRequest(BaseModel):
82
86
  messages: List[Messages]
83
87
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
84
88
 
85
- temperature: Optional[float] = 0.7
86
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
89
+ temperature: OptionalNullable[float] = UNSET
90
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
87
91
 
88
92
  top_p: Optional[float] = 1
89
93
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
@@ -91,9 +95,6 @@ class ChatCompletionRequest(BaseModel):
91
95
  max_tokens: OptionalNullable[int] = UNSET
92
96
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
93
97
 
94
- min_tokens: OptionalNullable[int] = UNSET
95
- r"""The minimum number of tokens to generate in the completion."""
96
-
97
98
  stream: Optional[bool] = False
98
99
  r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON."""
99
100
 
@@ -109,6 +110,15 @@ class ChatCompletionRequest(BaseModel):
109
110
 
110
111
  tool_choice: Optional[ChatCompletionRequestToolChoice] = None
111
112
 
113
+ presence_penalty: Optional[float] = 0
114
+ r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
115
+
116
+ frequency_penalty: Optional[float] = 0
117
+ r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
118
+
119
+ n: OptionalNullable[int] = UNSET
120
+ r"""Number of completions to return for each request, input tokens are only billed once."""
121
+
112
122
  safe_prompt: Optional[bool] = False
113
123
  r"""Whether to inject a safety prompt before all conversations."""
114
124
 
@@ -118,16 +128,25 @@ class ChatCompletionRequest(BaseModel):
118
128
  "temperature",
119
129
  "top_p",
120
130
  "max_tokens",
121
- "min_tokens",
122
131
  "stream",
123
132
  "stop",
124
133
  "random_seed",
125
134
  "response_format",
126
135
  "tools",
127
136
  "tool_choice",
137
+ "presence_penalty",
138
+ "frequency_penalty",
139
+ "n",
128
140
  "safe_prompt",
129
141
  ]
130
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
142
+ nullable_fields = [
143
+ "model",
144
+ "temperature",
145
+ "max_tokens",
146
+ "random_seed",
147
+ "tools",
148
+ "n",
149
+ ]
131
150
  null_default_fields = []
132
151
 
133
152
  serialized = handler(self)
@@ -4,8 +4,8 @@ from __future__ import annotations
4
4
  from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict
5
5
  from .usageinfo import UsageInfo, UsageInfoTypedDict
6
6
  from mistralai.types import BaseModel
7
- from typing import List, Optional, TypedDict
8
- from typing_extensions import NotRequired
7
+ from typing import List, Optional
8
+ from typing_extensions import NotRequired, TypedDict
9
9
 
10
10
 
11
11
  class ChatCompletionResponseTypedDict(TypedDict):
@@ -12,8 +12,8 @@ from .usermessage import UserMessage, UserMessageTypedDict
12
12
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
13
13
  from mistralai.utils import get_discriminator
14
14
  from pydantic import Discriminator, Tag, model_serializer
15
- from typing import List, Optional, TypedDict, Union
16
- from typing_extensions import Annotated, NotRequired
15
+ from typing import List, Optional, Union
16
+ from typing_extensions import Annotated, NotRequired, TypedDict
17
17
 
18
18
 
19
19
  ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]]
@@ -56,14 +56,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
56
56
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
57
57
  messages: List[ChatCompletionStreamRequestMessagesTypedDict]
58
58
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
59
- temperature: NotRequired[float]
60
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
59
+ temperature: NotRequired[Nullable[float]]
60
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
61
61
  top_p: NotRequired[float]
62
62
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
63
63
  max_tokens: NotRequired[Nullable[int]]
64
64
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
65
- min_tokens: NotRequired[Nullable[int]]
66
- r"""The minimum number of tokens to generate in the completion."""
67
65
  stream: NotRequired[bool]
68
66
  stop: NotRequired[ChatCompletionStreamRequestStopTypedDict]
69
67
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
@@ -72,6 +70,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
72
70
  response_format: NotRequired[ResponseFormatTypedDict]
73
71
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
74
72
  tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict]
73
+ presence_penalty: NotRequired[float]
74
+ r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
75
+ frequency_penalty: NotRequired[float]
76
+ r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
77
+ n: NotRequired[Nullable[int]]
78
+ r"""Number of completions to return for each request, input tokens are only billed once."""
75
79
  safe_prompt: NotRequired[bool]
76
80
  r"""Whether to inject a safety prompt before all conversations."""
77
81
 
@@ -83,8 +87,8 @@ class ChatCompletionStreamRequest(BaseModel):
83
87
  messages: List[ChatCompletionStreamRequestMessages]
84
88
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
85
89
 
86
- temperature: Optional[float] = 0.7
87
- r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
90
+ temperature: OptionalNullable[float] = UNSET
91
+ r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
88
92
 
89
93
  top_p: Optional[float] = 1
90
94
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
@@ -92,9 +96,6 @@ class ChatCompletionStreamRequest(BaseModel):
92
96
  max_tokens: OptionalNullable[int] = UNSET
93
97
  r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
94
98
 
95
- min_tokens: OptionalNullable[int] = UNSET
96
- r"""The minimum number of tokens to generate in the completion."""
97
-
98
99
  stream: Optional[bool] = True
99
100
 
100
101
  stop: Optional[ChatCompletionStreamRequestStop] = None
@@ -109,6 +110,15 @@ class ChatCompletionStreamRequest(BaseModel):
109
110
 
110
111
  tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None
111
112
 
113
+ presence_penalty: Optional[float] = 0
114
+ r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
115
+
116
+ frequency_penalty: Optional[float] = 0
117
+ r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
118
+
119
+ n: OptionalNullable[int] = UNSET
120
+ r"""Number of completions to return for each request, input tokens are only billed once."""
121
+
112
122
  safe_prompt: Optional[bool] = False
113
123
  r"""Whether to inject a safety prompt before all conversations."""
114
124
 
@@ -118,16 +128,25 @@ class ChatCompletionStreamRequest(BaseModel):
118
128
  "temperature",
119
129
  "top_p",
120
130
  "max_tokens",
121
- "min_tokens",
122
131
  "stream",
123
132
  "stop",
124
133
  "random_seed",
125
134
  "response_format",
126
135
  "tools",
127
136
  "tool_choice",
137
+ "presence_penalty",
138
+ "frequency_penalty",
139
+ "n",
128
140
  "safe_prompt",
129
141
  ]
130
- nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
142
+ nullable_fields = [
143
+ "model",
144
+ "temperature",
145
+ "max_tokens",
146
+ "random_seed",
147
+ "tools",
148
+ "n",
149
+ ]
131
150
  null_default_fields = []
132
151
 
133
152
  serialized = handler(self)
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
  from .metricout import MetricOut, MetricOutTypedDict
5
5
  from mistralai.types import BaseModel
6
- from typing import TypedDict
6
+ from typing_extensions import TypedDict
7
7
 
8
8
 
9
9
  class CheckpointOutTypedDict(TypedDict):
@@ -0,0 +1,21 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from typing import Dict, Optional
6
+ from typing_extensions import NotRequired, TypedDict
7
+
8
+
9
+ class ClassificationObjectTypedDict(TypedDict):
10
+ categories: NotRequired[Dict[str, bool]]
11
+ r"""Classifier result thresholded"""
12
+ category_scores: NotRequired[Dict[str, float]]
13
+ r"""Classifier result"""
14
+
15
+
16
+ class ClassificationObject(BaseModel):
17
+ categories: Optional[Dict[str, bool]] = None
18
+ r"""Classifier result thresholded"""
19
+
20
+ category_scores: Optional[Dict[str, float]] = None
21
+ r"""Classifier result"""
@@ -0,0 +1,59 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
+ import pydantic
6
+ from pydantic import model_serializer
7
+ from typing import List, Union
8
+ from typing_extensions import Annotated, NotRequired, TypedDict
9
+
10
+
11
+ ClassificationRequestInputsTypedDict = Union[str, List[str]]
12
+ r"""Text to classify."""
13
+
14
+
15
+ ClassificationRequestInputs = Union[str, List[str]]
16
+ r"""Text to classify."""
17
+
18
+
19
+ class ClassificationRequestTypedDict(TypedDict):
20
+ inputs: ClassificationRequestInputsTypedDict
21
+ r"""Text to classify."""
22
+ model: NotRequired[Nullable[str]]
23
+
24
+
25
+ class ClassificationRequest(BaseModel):
26
+ inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")]
27
+ r"""Text to classify."""
28
+
29
+ model: OptionalNullable[str] = UNSET
30
+
31
+ @model_serializer(mode="wrap")
32
+ def serialize_model(self, handler):
33
+ optional_fields = ["model"]
34
+ nullable_fields = ["model"]
35
+ null_default_fields = []
36
+
37
+ serialized = handler(self)
38
+
39
+ m = {}
40
+
41
+ for n, f in self.model_fields.items():
42
+ k = f.alias or n
43
+ val = serialized.get(k)
44
+ serialized.pop(k, None)
45
+
46
+ optional_nullable = k in optional_fields and k in nullable_fields
47
+ is_set = (
48
+ self.__pydantic_fields_set__.intersection({n})
49
+ or k in null_default_fields
50
+ ) # pylint: disable=no-member
51
+
52
+ if val is not None and val != UNSET_SENTINEL:
53
+ m[k] = val
54
+ elif val != UNSET_SENTINEL and (
55
+ not k in optional_fields or (optional_nullable and is_set)
56
+ ):
57
+ m[k] = val
58
+
59
+ return m