mistralai 1.9.10__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. mistralai/_hooks/registration.py +5 -0
  2. mistralai/_hooks/tracing.py +50 -0
  3. mistralai/_version.py +3 -3
  4. mistralai/accesses.py +51 -116
  5. mistralai/agents.py +58 -85
  6. mistralai/audio.py +8 -3
  7. mistralai/basesdk.py +15 -5
  8. mistralai/batch.py +6 -3
  9. mistralai/beta.py +10 -5
  10. mistralai/chat.py +70 -97
  11. mistralai/classifiers.py +57 -144
  12. mistralai/conversations.py +435 -412
  13. mistralai/documents.py +156 -359
  14. mistralai/embeddings.py +21 -42
  15. mistralai/extra/observability/__init__.py +15 -0
  16. mistralai/extra/observability/otel.py +393 -0
  17. mistralai/extra/run/tools.py +28 -16
  18. mistralai/files.py +53 -176
  19. mistralai/fim.py +46 -73
  20. mistralai/fine_tuning.py +6 -3
  21. mistralai/jobs.py +49 -158
  22. mistralai/libraries.py +71 -178
  23. mistralai/mistral_agents.py +298 -179
  24. mistralai/mistral_jobs.py +51 -138
  25. mistralai/models/__init__.py +94 -5
  26. mistralai/models/agent.py +15 -2
  27. mistralai/models/agentconversation.py +11 -3
  28. mistralai/models/agentcreationrequest.py +6 -2
  29. mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
  30. mistralai/models/agents_api_v1_agents_getop.py +40 -3
  31. mistralai/models/agents_api_v1_agents_listop.py +72 -2
  32. mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
  33. mistralai/models/agents_api_v1_conversations_listop.py +39 -2
  34. mistralai/models/agentscompletionrequest.py +21 -6
  35. mistralai/models/agentscompletionstreamrequest.py +21 -6
  36. mistralai/models/agentupdaterequest.py +18 -2
  37. mistralai/models/audiotranscriptionrequest.py +2 -0
  38. mistralai/models/batchjobin.py +10 -0
  39. mistralai/models/chatcompletionrequest.py +22 -5
  40. mistralai/models/chatcompletionstreamrequest.py +22 -5
  41. mistralai/models/conversationrequest.py +15 -4
  42. mistralai/models/conversationrestartrequest.py +50 -2
  43. mistralai/models/conversationrestartstreamrequest.py +50 -2
  44. mistralai/models/conversationstreamrequest.py +15 -4
  45. mistralai/models/documentout.py +26 -10
  46. mistralai/models/documentupdatein.py +24 -3
  47. mistralai/models/embeddingrequest.py +8 -8
  48. mistralai/models/files_api_routes_list_filesop.py +7 -0
  49. mistralai/models/fimcompletionrequest.py +8 -9
  50. mistralai/models/fimcompletionstreamrequest.py +8 -9
  51. mistralai/models/httpvalidationerror.py +11 -6
  52. mistralai/models/libraries_documents_list_v1op.py +15 -2
  53. mistralai/models/libraryout.py +10 -7
  54. mistralai/models/listfilesout.py +35 -4
  55. mistralai/models/mistralerror.py +26 -0
  56. mistralai/models/modelcapabilities.py +13 -4
  57. mistralai/models/modelconversation.py +8 -2
  58. mistralai/models/no_response_error.py +13 -0
  59. mistralai/models/ocrpageobject.py +26 -5
  60. mistralai/models/ocrrequest.py +17 -1
  61. mistralai/models/ocrtableobject.py +31 -0
  62. mistralai/models/prediction.py +4 -0
  63. mistralai/models/requestsource.py +7 -0
  64. mistralai/models/responseformat.py +4 -2
  65. mistralai/models/responseformats.py +0 -1
  66. mistralai/models/responsevalidationerror.py +25 -0
  67. mistralai/models/sdkerror.py +30 -14
  68. mistralai/models/sharingdelete.py +36 -5
  69. mistralai/models/sharingin.py +36 -5
  70. mistralai/models/sharingout.py +3 -3
  71. mistralai/models/toolexecutiondeltaevent.py +13 -4
  72. mistralai/models/toolexecutiondoneevent.py +13 -4
  73. mistralai/models/toolexecutionentry.py +9 -4
  74. mistralai/models/toolexecutionstartedevent.py +13 -4
  75. mistralai/models_.py +67 -212
  76. mistralai/ocr.py +33 -36
  77. mistralai/sdk.py +15 -2
  78. mistralai/transcriptions.py +21 -60
  79. mistralai/utils/__init__.py +18 -5
  80. mistralai/utils/eventstreaming.py +10 -0
  81. mistralai/utils/serializers.py +3 -2
  82. mistralai/utils/unmarshal_json_response.py +24 -0
  83. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/METADATA +89 -40
  84. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/RECORD +86 -75
  85. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/WHEEL +1 -1
  86. {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info/licenses}/LICENSE +0 -0
mistralai/models/agent.py CHANGED
@@ -12,7 +12,7 @@ from datetime import datetime
12
12
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
13
13
  from mistralai.utils import get_discriminator
14
14
  from pydantic import Discriminator, Tag, model_serializer
15
- from typing import List, Literal, Optional, Union
15
+ from typing import Any, Dict, List, Literal, Optional, Union
16
16
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
17
17
 
18
18
 
@@ -50,8 +50,11 @@ class AgentTypedDict(TypedDict):
50
50
  name: str
51
51
  id: str
52
52
  version: int
53
+ versions: List[int]
53
54
  created_at: datetime
54
55
  updated_at: datetime
56
+ deployment_chat: bool
57
+ source: str
55
58
  instructions: NotRequired[Nullable[str]]
56
59
  r"""Instruction prompt the model will follow during the conversation."""
57
60
  tools: NotRequired[List[AgentToolsTypedDict]]
@@ -60,6 +63,7 @@ class AgentTypedDict(TypedDict):
60
63
  r"""White-listed arguments from the completion API"""
61
64
  description: NotRequired[Nullable[str]]
62
65
  handoffs: NotRequired[Nullable[List[str]]]
66
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
63
67
  object: NotRequired[AgentObject]
64
68
 
65
69
 
@@ -72,10 +76,16 @@ class Agent(BaseModel):
72
76
 
73
77
  version: int
74
78
 
79
+ versions: List[int]
80
+
75
81
  created_at: datetime
76
82
 
77
83
  updated_at: datetime
78
84
 
85
+ deployment_chat: bool
86
+
87
+ source: str
88
+
79
89
  instructions: OptionalNullable[str] = UNSET
80
90
  r"""Instruction prompt the model will follow during the conversation."""
81
91
 
@@ -89,6 +99,8 @@ class Agent(BaseModel):
89
99
 
90
100
  handoffs: OptionalNullable[List[str]] = UNSET
91
101
 
102
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
103
+
92
104
  object: Optional[AgentObject] = "agent"
93
105
 
94
106
  @model_serializer(mode="wrap")
@@ -99,9 +111,10 @@ class Agent(BaseModel):
99
111
  "completion_args",
100
112
  "description",
101
113
  "handoffs",
114
+ "metadata",
102
115
  "object",
103
116
  ]
104
- nullable_fields = ["instructions", "description", "handoffs"]
117
+ nullable_fields = ["instructions", "description", "handoffs", "metadata"]
105
118
  null_default_fields = []
106
119
 
107
120
  serialized = handler(self)
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
  from datetime import datetime
5
5
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
6
6
  from pydantic import model_serializer
7
- from typing import Literal, Optional
7
+ from typing import Any, Dict, Literal, Optional
8
8
  from typing_extensions import NotRequired, TypedDict
9
9
 
10
10
 
@@ -20,7 +20,10 @@ class AgentConversationTypedDict(TypedDict):
20
20
  r"""Name given to the conversation."""
21
21
  description: NotRequired[Nullable[str]]
22
22
  r"""Description of the what the conversation is about."""
23
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
24
+ r"""Custom metadata for the conversation."""
23
25
  object: NotRequired[AgentConversationObject]
26
+ agent_version: NotRequired[Nullable[int]]
24
27
 
25
28
 
26
29
  class AgentConversation(BaseModel):
@@ -38,12 +41,17 @@ class AgentConversation(BaseModel):
38
41
  description: OptionalNullable[str] = UNSET
39
42
  r"""Description of the what the conversation is about."""
40
43
 
44
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
45
+ r"""Custom metadata for the conversation."""
46
+
41
47
  object: Optional[AgentConversationObject] = "conversation"
42
48
 
49
+ agent_version: OptionalNullable[int] = UNSET
50
+
43
51
  @model_serializer(mode="wrap")
44
52
  def serialize_model(self, handler):
45
- optional_fields = ["name", "description", "object"]
46
- nullable_fields = ["name", "description"]
53
+ optional_fields = ["name", "description", "metadata", "object", "agent_version"]
54
+ nullable_fields = ["name", "description", "metadata", "agent_version"]
47
55
  null_default_fields = []
48
56
 
49
57
  serialized = handler(self)
@@ -11,7 +11,7 @@ from .websearchtool import WebSearchTool, WebSearchToolTypedDict
11
11
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
12
12
  from mistralai.utils import get_discriminator
13
13
  from pydantic import Discriminator, Tag, model_serializer
14
- from typing import List, Optional, Union
14
+ from typing import Any, Dict, List, Optional, Union
15
15
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
16
16
 
17
17
 
@@ -52,6 +52,7 @@ class AgentCreationRequestTypedDict(TypedDict):
52
52
  r"""White-listed arguments from the completion API"""
53
53
  description: NotRequired[Nullable[str]]
54
54
  handoffs: NotRequired[Nullable[List[str]]]
55
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
55
56
 
56
57
 
57
58
  class AgentCreationRequest(BaseModel):
@@ -72,6 +73,8 @@ class AgentCreationRequest(BaseModel):
72
73
 
73
74
  handoffs: OptionalNullable[List[str]] = UNSET
74
75
 
76
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
77
+
75
78
  @model_serializer(mode="wrap")
76
79
  def serialize_model(self, handler):
77
80
  optional_fields = [
@@ -80,8 +83,9 @@ class AgentCreationRequest(BaseModel):
80
83
  "completion_args",
81
84
  "description",
82
85
  "handoffs",
86
+ "metadata",
83
87
  ]
84
- nullable_fields = ["instructions", "description", "handoffs"]
88
+ nullable_fields = ["instructions", "description", "handoffs", "metadata"]
85
89
  null_default_fields = []
86
90
 
87
91
  serialized = handler(self)
@@ -0,0 +1,16 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from mistralai.utils import FieldMetadata, PathParamMetadata
6
+ from typing_extensions import Annotated, TypedDict
7
+
8
+
9
+ class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict):
10
+ agent_id: str
11
+
12
+
13
+ class AgentsAPIV1AgentsDeleteRequest(BaseModel):
14
+ agent_id: Annotated[
15
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
16
+ ]
@@ -1,16 +1,53 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from mistralai.types import BaseModel
5
- from mistralai.utils import FieldMetadata, PathParamMetadata
6
- from typing_extensions import Annotated, TypedDict
4
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
+ from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata
6
+ from pydantic import model_serializer
7
+ from typing_extensions import Annotated, NotRequired, TypedDict
7
8
 
8
9
 
9
10
  class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict):
10
11
  agent_id: str
12
+ agent_version: NotRequired[Nullable[int]]
11
13
 
12
14
 
13
15
  class AgentsAPIV1AgentsGetRequest(BaseModel):
14
16
  agent_id: Annotated[
15
17
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
16
18
  ]
19
+
20
+ agent_version: Annotated[
21
+ OptionalNullable[int],
22
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
23
+ ] = UNSET
24
+
25
+ @model_serializer(mode="wrap")
26
+ def serialize_model(self, handler):
27
+ optional_fields = ["agent_version"]
28
+ nullable_fields = ["agent_version"]
29
+ null_default_fields = []
30
+
31
+ serialized = handler(self)
32
+
33
+ m = {}
34
+
35
+ for n, f in type(self).model_fields.items():
36
+ k = f.alias or n
37
+ val = serialized.get(k)
38
+ serialized.pop(k, None)
39
+
40
+ optional_nullable = k in optional_fields and k in nullable_fields
41
+ is_set = (
42
+ self.__pydantic_fields_set__.intersection({n})
43
+ or k in null_default_fields
44
+ ) # pylint: disable=no-member
45
+
46
+ if val is not None and val != UNSET_SENTINEL:
47
+ m[k] = val
48
+ elif val != UNSET_SENTINEL and (
49
+ not k in optional_fields or (optional_nullable and is_set)
50
+ ):
51
+ m[k] = val
52
+
53
+ return m
@@ -1,15 +1,22 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from mistralai.types import BaseModel
4
+ from .requestsource import RequestSource
5
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
6
  from mistralai.utils import FieldMetadata, QueryParamMetadata
6
- from typing import Optional
7
+ from pydantic import model_serializer
8
+ from typing import Any, Dict, List, Optional
7
9
  from typing_extensions import Annotated, NotRequired, TypedDict
8
10
 
9
11
 
10
12
  class AgentsAPIV1AgentsListRequestTypedDict(TypedDict):
11
13
  page: NotRequired[int]
12
14
  page_size: NotRequired[int]
15
+ deployment_chat: NotRequired[Nullable[bool]]
16
+ sources: NotRequired[Nullable[List[RequestSource]]]
17
+ name: NotRequired[Nullable[str]]
18
+ id: NotRequired[Nullable[str]]
19
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
13
20
 
14
21
 
15
22
  class AgentsAPIV1AgentsListRequest(BaseModel):
@@ -22,3 +29,66 @@ class AgentsAPIV1AgentsListRequest(BaseModel):
22
29
  Optional[int],
23
30
  FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
24
31
  ] = 20
32
+
33
+ deployment_chat: Annotated[
34
+ OptionalNullable[bool],
35
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
36
+ ] = UNSET
37
+
38
+ sources: Annotated[
39
+ OptionalNullable[List[RequestSource]],
40
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
41
+ ] = UNSET
42
+
43
+ name: Annotated[
44
+ OptionalNullable[str],
45
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
46
+ ] = UNSET
47
+
48
+ id: Annotated[
49
+ OptionalNullable[str],
50
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
51
+ ] = UNSET
52
+
53
+ metadata: Annotated[
54
+ OptionalNullable[Dict[str, Any]],
55
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
56
+ ] = UNSET
57
+
58
+ @model_serializer(mode="wrap")
59
+ def serialize_model(self, handler):
60
+ optional_fields = [
61
+ "page",
62
+ "page_size",
63
+ "deployment_chat",
64
+ "sources",
65
+ "name",
66
+ "id",
67
+ "metadata",
68
+ ]
69
+ nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"]
70
+ null_default_fields = []
71
+
72
+ serialized = handler(self)
73
+
74
+ m = {}
75
+
76
+ for n, f in type(self).model_fields.items():
77
+ k = f.alias or n
78
+ val = serialized.get(k)
79
+ serialized.pop(k, None)
80
+
81
+ optional_nullable = k in optional_fields and k in nullable_fields
82
+ is_set = (
83
+ self.__pydantic_fields_set__.intersection({n})
84
+ or k in null_default_fields
85
+ ) # pylint: disable=no-member
86
+
87
+ if val is not None and val != UNSET_SENTINEL:
88
+ m[k] = val
89
+ elif val != UNSET_SENTINEL and (
90
+ not k in optional_fields or (optional_nullable and is_set)
91
+ ):
92
+ m[k] = val
93
+
94
+ return m
@@ -0,0 +1,18 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from mistralai.utils import FieldMetadata, PathParamMetadata
6
+ from typing_extensions import Annotated, TypedDict
7
+
8
+
9
+ class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict):
10
+ conversation_id: str
11
+ r"""ID of the conversation from which we are fetching metadata."""
12
+
13
+
14
+ class AgentsAPIV1ConversationsDeleteRequest(BaseModel):
15
+ conversation_id: Annotated[
16
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
17
+ ]
18
+ r"""ID of the conversation from which we are fetching metadata."""
@@ -3,15 +3,17 @@
3
3
  from __future__ import annotations
4
4
  from .agentconversation import AgentConversation, AgentConversationTypedDict
5
5
  from .modelconversation import ModelConversation, ModelConversationTypedDict
6
- from mistralai.types import BaseModel
6
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
7
7
  from mistralai.utils import FieldMetadata, QueryParamMetadata
8
- from typing import Optional, Union
8
+ from pydantic import model_serializer
9
+ from typing import Any, Dict, Optional, Union
9
10
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
10
11
 
11
12
 
12
13
  class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict):
13
14
  page: NotRequired[int]
14
15
  page_size: NotRequired[int]
16
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
15
17
 
16
18
 
17
19
  class AgentsAPIV1ConversationsListRequest(BaseModel):
@@ -25,6 +27,41 @@ class AgentsAPIV1ConversationsListRequest(BaseModel):
25
27
  FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
26
28
  ] = 100
27
29
 
30
+ metadata: Annotated[
31
+ OptionalNullable[Dict[str, Any]],
32
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
33
+ ] = UNSET
34
+
35
+ @model_serializer(mode="wrap")
36
+ def serialize_model(self, handler):
37
+ optional_fields = ["page", "page_size", "metadata"]
38
+ nullable_fields = ["metadata"]
39
+ null_default_fields = []
40
+
41
+ serialized = handler(self)
42
+
43
+ m = {}
44
+
45
+ for n, f in type(self).model_fields.items():
46
+ k = f.alias or n
47
+ val = serialized.get(k)
48
+ serialized.pop(k, None)
49
+
50
+ optional_nullable = k in optional_fields and k in nullable_fields
51
+ is_set = (
52
+ self.__pydantic_fields_set__.intersection({n})
53
+ or k in null_default_fields
54
+ ) # pylint: disable=no-member
55
+
56
+ if val is not None and val != UNSET_SENTINEL:
57
+ m[k] = val
58
+ elif val != UNSET_SENTINEL and (
59
+ not k in optional_fields or (optional_nullable and is_set)
60
+ ):
61
+ m[k] = val
62
+
63
+ return m
64
+
28
65
 
29
66
  ResponseBodyTypedDict = TypeAliasType(
30
67
  "ResponseBodyTypedDict",
@@ -15,7 +15,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_
15
15
  from mistralai.utils import get_discriminator, validate_open_enum
16
16
  from pydantic import Discriminator, Tag, model_serializer
17
17
  from pydantic.functional_validators import PlainValidator
18
- from typing import List, Optional, Union
18
+ from typing import Any, Dict, List, Optional, Union
19
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
20
20
 
21
21
 
@@ -77,16 +77,19 @@ class AgentsCompletionRequestTypedDict(TypedDict):
77
77
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
78
78
  random_seed: NotRequired[Nullable[int]]
79
79
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
80
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
80
81
  response_format: NotRequired[ResponseFormatTypedDict]
82
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
81
83
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
82
84
  tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict]
83
85
  presence_penalty: NotRequired[float]
84
- r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
86
+ r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
85
87
  frequency_penalty: NotRequired[float]
86
- r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
88
+ r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
87
89
  n: NotRequired[Nullable[int]]
88
90
  r"""Number of completions to return for each request, input tokens are only billed once."""
89
91
  prediction: NotRequired[PredictionTypedDict]
92
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
90
93
  parallel_tool_calls: NotRequired[bool]
91
94
  prompt_mode: NotRequired[Nullable[MistralPromptMode]]
92
95
  r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
@@ -111,22 +114,26 @@ class AgentsCompletionRequest(BaseModel):
111
114
  random_seed: OptionalNullable[int] = UNSET
112
115
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
113
116
 
117
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
118
+
114
119
  response_format: Optional[ResponseFormat] = None
120
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
115
121
 
116
122
  tools: OptionalNullable[List[Tool]] = UNSET
117
123
 
118
124
  tool_choice: Optional[AgentsCompletionRequestToolChoice] = None
119
125
 
120
126
  presence_penalty: Optional[float] = None
121
- r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
127
+ r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
122
128
 
123
129
  frequency_penalty: Optional[float] = None
124
- r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
130
+ r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
125
131
 
126
132
  n: OptionalNullable[int] = UNSET
127
133
  r"""Number of completions to return for each request, input tokens are only billed once."""
128
134
 
129
135
  prediction: Optional[Prediction] = None
136
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
130
137
 
131
138
  parallel_tool_calls: Optional[bool] = None
132
139
 
@@ -142,6 +149,7 @@ class AgentsCompletionRequest(BaseModel):
142
149
  "stream",
143
150
  "stop",
144
151
  "random_seed",
152
+ "metadata",
145
153
  "response_format",
146
154
  "tools",
147
155
  "tool_choice",
@@ -152,7 +160,14 @@ class AgentsCompletionRequest(BaseModel):
152
160
  "parallel_tool_calls",
153
161
  "prompt_mode",
154
162
  ]
155
- nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
163
+ nullable_fields = [
164
+ "max_tokens",
165
+ "random_seed",
166
+ "metadata",
167
+ "tools",
168
+ "n",
169
+ "prompt_mode",
170
+ ]
156
171
  null_default_fields = []
157
172
 
158
173
  serialized = handler(self)
@@ -15,7 +15,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_
15
15
  from mistralai.utils import get_discriminator, validate_open_enum
16
16
  from pydantic import Discriminator, Tag, model_serializer
17
17
  from pydantic.functional_validators import PlainValidator
18
- from typing import List, Optional, Union
18
+ from typing import Any, Dict, List, Optional, Union
19
19
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
20
20
 
21
21
 
@@ -76,16 +76,19 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
76
76
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
77
77
  random_seed: NotRequired[Nullable[int]]
78
78
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
79
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
79
80
  response_format: NotRequired[ResponseFormatTypedDict]
81
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
80
82
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
81
83
  tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict]
82
84
  presence_penalty: NotRequired[float]
83
- r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
85
+ r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
84
86
  frequency_penalty: NotRequired[float]
85
- r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
87
+ r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
86
88
  n: NotRequired[Nullable[int]]
87
89
  r"""Number of completions to return for each request, input tokens are only billed once."""
88
90
  prediction: NotRequired[PredictionTypedDict]
91
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
89
92
  parallel_tool_calls: NotRequired[bool]
90
93
  prompt_mode: NotRequired[Nullable[MistralPromptMode]]
91
94
  r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
@@ -109,22 +112,26 @@ class AgentsCompletionStreamRequest(BaseModel):
109
112
  random_seed: OptionalNullable[int] = UNSET
110
113
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
111
114
 
115
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
116
+
112
117
  response_format: Optional[ResponseFormat] = None
118
+ r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
113
119
 
114
120
  tools: OptionalNullable[List[Tool]] = UNSET
115
121
 
116
122
  tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None
117
123
 
118
124
  presence_penalty: Optional[float] = None
119
- r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
125
+ r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative."""
120
126
 
121
127
  frequency_penalty: Optional[float] = None
122
- r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
128
+ r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
123
129
 
124
130
  n: OptionalNullable[int] = UNSET
125
131
  r"""Number of completions to return for each request, input tokens are only billed once."""
126
132
 
127
133
  prediction: Optional[Prediction] = None
134
+ r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content."""
128
135
 
129
136
  parallel_tool_calls: Optional[bool] = None
130
137
 
@@ -140,6 +147,7 @@ class AgentsCompletionStreamRequest(BaseModel):
140
147
  "stream",
141
148
  "stop",
142
149
  "random_seed",
150
+ "metadata",
143
151
  "response_format",
144
152
  "tools",
145
153
  "tool_choice",
@@ -150,7 +158,14 @@ class AgentsCompletionStreamRequest(BaseModel):
150
158
  "parallel_tool_calls",
151
159
  "prompt_mode",
152
160
  ]
153
- nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"]
161
+ nullable_fields = [
162
+ "max_tokens",
163
+ "random_seed",
164
+ "metadata",
165
+ "tools",
166
+ "n",
167
+ "prompt_mode",
168
+ ]
154
169
  null_default_fields = []
155
170
 
156
171
  serialized = handler(self)
@@ -11,7 +11,7 @@ from .websearchtool import WebSearchTool, WebSearchToolTypedDict
11
11
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
12
12
  from mistralai.utils import get_discriminator
13
13
  from pydantic import Discriminator, Tag, model_serializer
14
- from typing import List, Optional, Union
14
+ from typing import Any, Dict, List, Optional, Union
15
15
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
16
16
 
17
17
 
@@ -52,6 +52,8 @@ class AgentUpdateRequestTypedDict(TypedDict):
52
52
  name: NotRequired[Nullable[str]]
53
53
  description: NotRequired[Nullable[str]]
54
54
  handoffs: NotRequired[Nullable[List[str]]]
55
+ deployment_chat: NotRequired[Nullable[bool]]
56
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
55
57
 
56
58
 
57
59
  class AgentUpdateRequest(BaseModel):
@@ -72,6 +74,10 @@ class AgentUpdateRequest(BaseModel):
72
74
 
73
75
  handoffs: OptionalNullable[List[str]] = UNSET
74
76
 
77
+ deployment_chat: OptionalNullable[bool] = UNSET
78
+
79
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
80
+
75
81
  @model_serializer(mode="wrap")
76
82
  def serialize_model(self, handler):
77
83
  optional_fields = [
@@ -82,8 +88,18 @@ class AgentUpdateRequest(BaseModel):
82
88
  "name",
83
89
  "description",
84
90
  "handoffs",
91
+ "deployment_chat",
92
+ "metadata",
93
+ ]
94
+ nullable_fields = [
95
+ "instructions",
96
+ "model",
97
+ "name",
98
+ "description",
99
+ "handoffs",
100
+ "deployment_chat",
101
+ "metadata",
85
102
  ]
86
- nullable_fields = ["instructions", "model", "name", "description", "handoffs"]
87
103
  null_default_fields = []
88
104
 
89
105
  serialized = handler(self)
@@ -14,6 +14,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict
14
14
 
15
15
  class AudioTranscriptionRequestTypedDict(TypedDict):
16
16
  model: str
17
+ r"""ID of the model to be used."""
17
18
  file: NotRequired[FileTypedDict]
18
19
  file_url: NotRequired[Nullable[str]]
19
20
  r"""Url of a file to be transcribed"""
@@ -29,6 +30,7 @@ class AudioTranscriptionRequestTypedDict(TypedDict):
29
30
 
30
31
  class AudioTranscriptionRequest(BaseModel):
31
32
  model: Annotated[str, FieldMetadata(multipart=True)]
33
+ r"""ID of the model to be used."""
32
34
 
33
35
  file: Annotated[
34
36
  Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True))
@@ -12,25 +12,35 @@ from typing_extensions import Annotated, NotRequired, TypedDict
12
12
 
13
13
  class BatchJobInTypedDict(TypedDict):
14
14
  input_files: List[str]
15
+ r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```"""
15
16
  endpoint: APIEndpoint
16
17
  model: NotRequired[Nullable[str]]
18
+ r"""The model to be used for batch inference."""
17
19
  agent_id: NotRequired[Nullable[str]]
20
+ r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here."""
18
21
  metadata: NotRequired[Nullable[Dict[str, str]]]
22
+ r"""The metadata of your choice to be associated with the batch inference job."""
19
23
  timeout_hours: NotRequired[int]
24
+ r"""The timeout in hours for the batch inference job."""
20
25
 
21
26
 
22
27
  class BatchJobIn(BaseModel):
23
28
  input_files: List[str]
29
+ r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```"""
24
30
 
25
31
  endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))]
26
32
 
27
33
  model: OptionalNullable[str] = UNSET
34
+ r"""The model to be used for batch inference."""
28
35
 
29
36
  agent_id: OptionalNullable[str] = UNSET
37
+ r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here."""
30
38
 
31
39
  metadata: OptionalNullable[Dict[str, str]] = UNSET
40
+ r"""The metadata of your choice to be associated with the batch inference job."""
32
41
 
33
42
  timeout_hours: Optional[int] = 24
43
+ r"""The timeout in hours for the batch inference job."""
34
44
 
35
45
  @model_serializer(mode="wrap")
36
46
  def serialize_model(self, handler):