mistralai 1.7.1__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. mistralai/_version.py +2 -2
  2. mistralai/beta.py +20 -0
  3. mistralai/conversations.py +2657 -0
  4. mistralai/extra/__init__.py +10 -2
  5. mistralai/extra/exceptions.py +14 -0
  6. mistralai/extra/mcp/__init__.py +0 -0
  7. mistralai/extra/mcp/auth.py +166 -0
  8. mistralai/extra/mcp/base.py +155 -0
  9. mistralai/extra/mcp/sse.py +165 -0
  10. mistralai/extra/mcp/stdio.py +22 -0
  11. mistralai/extra/run/__init__.py +0 -0
  12. mistralai/extra/run/context.py +295 -0
  13. mistralai/extra/run/result.py +212 -0
  14. mistralai/extra/run/tools.py +225 -0
  15. mistralai/extra/run/utils.py +36 -0
  16. mistralai/extra/tests/test_struct_chat.py +1 -1
  17. mistralai/mistral_agents.py +1158 -0
  18. mistralai/models/__init__.py +470 -1
  19. mistralai/models/agent.py +129 -0
  20. mistralai/models/agentconversation.py +71 -0
  21. mistralai/models/agentcreationrequest.py +109 -0
  22. mistralai/models/agenthandoffdoneevent.py +33 -0
  23. mistralai/models/agenthandoffentry.py +75 -0
  24. mistralai/models/agenthandoffstartedevent.py +33 -0
  25. mistralai/models/agents_api_v1_agents_getop.py +16 -0
  26. mistralai/models/agents_api_v1_agents_listop.py +24 -0
  27. mistralai/models/agents_api_v1_agents_update_versionop.py +21 -0
  28. mistralai/models/agents_api_v1_agents_updateop.py +23 -0
  29. mistralai/models/agents_api_v1_conversations_append_streamop.py +28 -0
  30. mistralai/models/agents_api_v1_conversations_appendop.py +28 -0
  31. mistralai/models/agents_api_v1_conversations_getop.py +33 -0
  32. mistralai/models/agents_api_v1_conversations_historyop.py +16 -0
  33. mistralai/models/agents_api_v1_conversations_listop.py +37 -0
  34. mistralai/models/agents_api_v1_conversations_messagesop.py +16 -0
  35. mistralai/models/agents_api_v1_conversations_restart_streamop.py +26 -0
  36. mistralai/models/agents_api_v1_conversations_restartop.py +26 -0
  37. mistralai/models/agentupdaterequest.py +111 -0
  38. mistralai/models/builtinconnectors.py +13 -0
  39. mistralai/models/codeinterpretertool.py +17 -0
  40. mistralai/models/completionargs.py +100 -0
  41. mistralai/models/completionargsstop.py +13 -0
  42. mistralai/models/completionjobout.py +3 -3
  43. mistralai/models/conversationappendrequest.py +35 -0
  44. mistralai/models/conversationappendstreamrequest.py +37 -0
  45. mistralai/models/conversationevents.py +72 -0
  46. mistralai/models/conversationhistory.py +58 -0
  47. mistralai/models/conversationinputs.py +14 -0
  48. mistralai/models/conversationmessages.py +28 -0
  49. mistralai/models/conversationrequest.py +133 -0
  50. mistralai/models/conversationresponse.py +51 -0
  51. mistralai/models/conversationrestartrequest.py +42 -0
  52. mistralai/models/conversationrestartstreamrequest.py +44 -0
  53. mistralai/models/conversationstreamrequest.py +135 -0
  54. mistralai/models/conversationusageinfo.py +63 -0
  55. mistralai/models/documentlibrarytool.py +22 -0
  56. mistralai/models/functioncallentry.py +76 -0
  57. mistralai/models/functioncallentryarguments.py +15 -0
  58. mistralai/models/functioncallevent.py +36 -0
  59. mistralai/models/functionresultentry.py +69 -0
  60. mistralai/models/functiontool.py +21 -0
  61. mistralai/models/imagegenerationtool.py +17 -0
  62. mistralai/models/inputentries.py +18 -0
  63. mistralai/models/messageentries.py +18 -0
  64. mistralai/models/messageinputcontentchunks.py +26 -0
  65. mistralai/models/messageinputentry.py +89 -0
  66. mistralai/models/messageoutputcontentchunks.py +30 -0
  67. mistralai/models/messageoutputentry.py +100 -0
  68. mistralai/models/messageoutputevent.py +93 -0
  69. mistralai/models/modelconversation.py +127 -0
  70. mistralai/models/outputcontentchunks.py +30 -0
  71. mistralai/models/responsedoneevent.py +25 -0
  72. mistralai/models/responseerrorevent.py +27 -0
  73. mistralai/models/responsestartedevent.py +24 -0
  74. mistralai/models/ssetypes.py +18 -0
  75. mistralai/models/toolexecutiondoneevent.py +34 -0
  76. mistralai/models/toolexecutionentry.py +70 -0
  77. mistralai/models/toolexecutionstartedevent.py +31 -0
  78. mistralai/models/toolfilechunk.py +61 -0
  79. mistralai/models/toolreferencechunk.py +61 -0
  80. mistralai/models/websearchpremiumtool.py +17 -0
  81. mistralai/models/websearchtool.py +17 -0
  82. mistralai/sdk.py +3 -0
  83. {mistralai-1.7.1.dist-info → mistralai-1.8.0.dist-info}/METADATA +42 -7
  84. {mistralai-1.7.1.dist-info → mistralai-1.8.0.dist-info}/RECORD +86 -10
  85. {mistralai-1.7.1.dist-info → mistralai-1.8.0.dist-info}/LICENSE +0 -0
  86. {mistralai-1.7.1.dist-info → mistralai-1.8.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,37 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .agentconversation import AgentConversation, AgentConversationTypedDict
5
+ from .modelconversation import ModelConversation, ModelConversationTypedDict
6
+ from mistralai.types import BaseModel
7
+ from mistralai.utils import FieldMetadata, QueryParamMetadata
8
+ from typing import Optional, Union
9
+ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
10
+
11
+
12
+ class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict):
13
+ page: NotRequired[int]
14
+ page_size: NotRequired[int]
15
+
16
+
17
+ class AgentsAPIV1ConversationsListRequest(BaseModel):
18
+ page: Annotated[
19
+ Optional[int],
20
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
21
+ ] = 0
22
+
23
+ page_size: Annotated[
24
+ Optional[int],
25
+ FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
26
+ ] = 100
27
+
28
+
29
+ ResponseBodyTypedDict = TypeAliasType(
30
+ "ResponseBodyTypedDict",
31
+ Union[AgentConversationTypedDict, ModelConversationTypedDict],
32
+ )
33
+
34
+
35
+ ResponseBody = TypeAliasType(
36
+ "ResponseBody", Union[AgentConversation, ModelConversation]
37
+ )
@@ -0,0 +1,16 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from mistralai.utils import FieldMetadata, PathParamMetadata
6
+ from typing_extensions import Annotated, TypedDict
7
+
8
+
9
+ class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict):
10
+ conversation_id: str
11
+
12
+
13
+ class AgentsAPIV1ConversationsMessagesRequest(BaseModel):
14
+ conversation_id: Annotated[
15
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
16
+ ]
@@ -0,0 +1,26 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .conversationrestartstreamrequest import (
5
+ ConversationRestartStreamRequest,
6
+ ConversationRestartStreamRequestTypedDict,
7
+ )
8
+ from mistralai.types import BaseModel
9
+ from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata
10
+ from typing_extensions import Annotated, TypedDict
11
+
12
+
13
+ class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict):
14
+ conversation_id: str
15
+ conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict
16
+
17
+
18
+ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel):
19
+ conversation_id: Annotated[
20
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
+ ]
22
+
23
+ conversation_restart_stream_request: Annotated[
24
+ ConversationRestartStreamRequest,
25
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
26
+ ]
@@ -0,0 +1,26 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .conversationrestartrequest import (
5
+ ConversationRestartRequest,
6
+ ConversationRestartRequestTypedDict,
7
+ )
8
+ from mistralai.types import BaseModel
9
+ from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata
10
+ from typing_extensions import Annotated, TypedDict
11
+
12
+
13
+ class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict):
14
+ conversation_id: str
15
+ conversation_restart_request: ConversationRestartRequestTypedDict
16
+
17
+
18
+ class AgentsAPIV1ConversationsRestartRequest(BaseModel):
19
+ conversation_id: Annotated[
20
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
+ ]
22
+
23
+ conversation_restart_request: Annotated[
24
+ ConversationRestartRequest,
25
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
26
+ ]
@@ -0,0 +1,111 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict
5
+ from .completionargs import CompletionArgs, CompletionArgsTypedDict
6
+ from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict
7
+ from .functiontool import FunctionTool, FunctionToolTypedDict
8
+ from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict
9
+ from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict
10
+ from .websearchtool import WebSearchTool, WebSearchToolTypedDict
11
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
12
+ from mistralai.utils import get_discriminator
13
+ from pydantic import Discriminator, Tag, model_serializer
14
+ from typing import List, Optional, Union
15
+ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
16
+
17
+
18
+ AgentUpdateRequestToolsTypedDict = TypeAliasType(
19
+ "AgentUpdateRequestToolsTypedDict",
20
+ Union[
21
+ WebSearchToolTypedDict,
22
+ WebSearchPremiumToolTypedDict,
23
+ CodeInterpreterToolTypedDict,
24
+ ImageGenerationToolTypedDict,
25
+ FunctionToolTypedDict,
26
+ DocumentLibraryToolTypedDict,
27
+ ],
28
+ )
29
+
30
+
31
+ AgentUpdateRequestTools = Annotated[
32
+ Union[
33
+ Annotated[CodeInterpreterTool, Tag("code_interpreter")],
34
+ Annotated[DocumentLibraryTool, Tag("document_library")],
35
+ Annotated[FunctionTool, Tag("function")],
36
+ Annotated[ImageGenerationTool, Tag("image_generation")],
37
+ Annotated[WebSearchTool, Tag("web_search")],
38
+ Annotated[WebSearchPremiumTool, Tag("web_search_premium")],
39
+ ],
40
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
41
+ ]
42
+
43
+
44
+ class AgentUpdateRequestTypedDict(TypedDict):
45
+ instructions: NotRequired[Nullable[str]]
46
+ r"""Instruction prompt the model will follow during the conversation."""
47
+ tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]]
48
+ r"""List of tools which are available to the model during the conversation."""
49
+ completion_args: NotRequired[CompletionArgsTypedDict]
50
+ r"""White-listed arguments from the completion API"""
51
+ model: NotRequired[Nullable[str]]
52
+ name: NotRequired[Nullable[str]]
53
+ description: NotRequired[Nullable[str]]
54
+ handoffs: NotRequired[Nullable[List[str]]]
55
+
56
+
57
+ class AgentUpdateRequest(BaseModel):
58
+ instructions: OptionalNullable[str] = UNSET
59
+ r"""Instruction prompt the model will follow during the conversation."""
60
+
61
+ tools: Optional[List[AgentUpdateRequestTools]] = None
62
+ r"""List of tools which are available to the model during the conversation."""
63
+
64
+ completion_args: Optional[CompletionArgs] = None
65
+ r"""White-listed arguments from the completion API"""
66
+
67
+ model: OptionalNullable[str] = UNSET
68
+
69
+ name: OptionalNullable[str] = UNSET
70
+
71
+ description: OptionalNullable[str] = UNSET
72
+
73
+ handoffs: OptionalNullable[List[str]] = UNSET
74
+
75
+ @model_serializer(mode="wrap")
76
+ def serialize_model(self, handler):
77
+ optional_fields = [
78
+ "instructions",
79
+ "tools",
80
+ "completion_args",
81
+ "model",
82
+ "name",
83
+ "description",
84
+ "handoffs",
85
+ ]
86
+ nullable_fields = ["instructions", "model", "name", "description", "handoffs"]
87
+ null_default_fields = []
88
+
89
+ serialized = handler(self)
90
+
91
+ m = {}
92
+
93
+ for n, f in self.model_fields.items():
94
+ k = f.alias or n
95
+ val = serialized.get(k)
96
+ serialized.pop(k, None)
97
+
98
+ optional_nullable = k in optional_fields and k in nullable_fields
99
+ is_set = (
100
+ self.__pydantic_fields_set__.intersection({n})
101
+ or k in null_default_fields
102
+ ) # pylint: disable=no-member
103
+
104
+ if val is not None and val != UNSET_SENTINEL:
105
+ m[k] = val
106
+ elif val != UNSET_SENTINEL and (
107
+ not k in optional_fields or (optional_nullable and is_set)
108
+ ):
109
+ m[k] = val
110
+
111
+ return m
@@ -0,0 +1,13 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from typing import Literal
5
+
6
+
7
+ BuiltInConnectors = Literal[
8
+ "web_search",
9
+ "web_search_premium",
10
+ "code_interpreter",
11
+ "image_generation",
12
+ "document_library",
13
+ ]
@@ -0,0 +1,17 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from typing import Literal, Optional
6
+ from typing_extensions import NotRequired, TypedDict
7
+
8
+
9
+ CodeInterpreterToolType = Literal["code_interpreter"]
10
+
11
+
12
+ class CodeInterpreterToolTypedDict(TypedDict):
13
+ type: NotRequired[CodeInterpreterToolType]
14
+
15
+
16
+ class CodeInterpreterTool(BaseModel):
17
+ type: Optional[CodeInterpreterToolType] = "code_interpreter"
@@ -0,0 +1,100 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
6
+ from .responseformat import ResponseFormat, ResponseFormatTypedDict
7
+ from .toolchoiceenum import ToolChoiceEnum
8
+ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
9
+ from pydantic import model_serializer
10
+ from typing import Optional
11
+ from typing_extensions import NotRequired, TypedDict
12
+
13
+
14
+ class CompletionArgsTypedDict(TypedDict):
15
+ r"""White-listed arguments from the completion API"""
16
+
17
+ stop: NotRequired[Nullable[CompletionArgsStopTypedDict]]
18
+ presence_penalty: NotRequired[Nullable[float]]
19
+ frequency_penalty: NotRequired[Nullable[float]]
20
+ temperature: NotRequired[float]
21
+ top_p: NotRequired[Nullable[float]]
22
+ max_tokens: NotRequired[Nullable[int]]
23
+ random_seed: NotRequired[Nullable[int]]
24
+ prediction: NotRequired[Nullable[PredictionTypedDict]]
25
+ response_format: NotRequired[Nullable[ResponseFormatTypedDict]]
26
+ tool_choice: NotRequired[ToolChoiceEnum]
27
+
28
+
29
+ class CompletionArgs(BaseModel):
30
+ r"""White-listed arguments from the completion API"""
31
+
32
+ stop: OptionalNullable[CompletionArgsStop] = UNSET
33
+
34
+ presence_penalty: OptionalNullable[float] = UNSET
35
+
36
+ frequency_penalty: OptionalNullable[float] = UNSET
37
+
38
+ temperature: Optional[float] = 0.3
39
+
40
+ top_p: OptionalNullable[float] = UNSET
41
+
42
+ max_tokens: OptionalNullable[int] = UNSET
43
+
44
+ random_seed: OptionalNullable[int] = UNSET
45
+
46
+ prediction: OptionalNullable[Prediction] = UNSET
47
+
48
+ response_format: OptionalNullable[ResponseFormat] = UNSET
49
+
50
+ tool_choice: Optional[ToolChoiceEnum] = None
51
+
52
+ @model_serializer(mode="wrap")
53
+ def serialize_model(self, handler):
54
+ optional_fields = [
55
+ "stop",
56
+ "presence_penalty",
57
+ "frequency_penalty",
58
+ "temperature",
59
+ "top_p",
60
+ "max_tokens",
61
+ "random_seed",
62
+ "prediction",
63
+ "response_format",
64
+ "tool_choice",
65
+ ]
66
+ nullable_fields = [
67
+ "stop",
68
+ "presence_penalty",
69
+ "frequency_penalty",
70
+ "top_p",
71
+ "max_tokens",
72
+ "random_seed",
73
+ "prediction",
74
+ "response_format",
75
+ ]
76
+ null_default_fields = []
77
+
78
+ serialized = handler(self)
79
+
80
+ m = {}
81
+
82
+ for n, f in self.model_fields.items():
83
+ k = f.alias or n
84
+ val = serialized.get(k)
85
+ serialized.pop(k, None)
86
+
87
+ optional_nullable = k in optional_fields and k in nullable_fields
88
+ is_set = (
89
+ self.__pydantic_fields_set__.intersection({n})
90
+ or k in null_default_fields
91
+ ) # pylint: disable=no-member
92
+
93
+ if val is not None and val != UNSET_SENTINEL:
94
+ m[k] = val
95
+ elif val != UNSET_SENTINEL and (
96
+ not k in optional_fields or (optional_nullable and is_set)
97
+ ):
98
+ m[k] = val
99
+
100
+ return m
@@ -0,0 +1,13 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from typing import List, Union
5
+ from typing_extensions import TypeAliasType
6
+
7
+
8
+ CompletionArgsStopTypedDict = TypeAliasType(
9
+ "CompletionArgsStopTypedDict", Union[str, List[str]]
10
+ )
11
+
12
+
13
+ CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]])
@@ -28,7 +28,7 @@ Status = Literal[
28
28
  ]
29
29
  r"""The current status of the fine-tuning job."""
30
30
 
31
- Object = Literal["job"]
31
+ CompletionJobOutObject = Literal["job"]
32
32
  r"""The object type of the fine-tuning job."""
33
33
 
34
34
  IntegrationsTypedDict = WandbIntegrationOutTypedDict
@@ -63,7 +63,7 @@ class CompletionJobOutTypedDict(TypedDict):
63
63
  hyperparameters: CompletionTrainingParametersTypedDict
64
64
  validation_files: NotRequired[Nullable[List[str]]]
65
65
  r"""A list containing the IDs of uploaded files that contain validation data."""
66
- object: NotRequired[Object]
66
+ object: NotRequired[CompletionJobOutObject]
67
67
  r"""The object type of the fine-tuning job."""
68
68
  fine_tuned_model: NotRequired[Nullable[str]]
69
69
  r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running."""
@@ -105,7 +105,7 @@ class CompletionJobOut(BaseModel):
105
105
  validation_files: OptionalNullable[List[str]] = UNSET
106
106
  r"""A list containing the IDs of uploaded files that contain validation data."""
107
107
 
108
- object: Optional[Object] = "job"
108
+ object: Optional[CompletionJobOutObject] = "job"
109
109
  r"""The object type of the fine-tuning job."""
110
110
 
111
111
  fine_tuned_model: OptionalNullable[str] = UNSET
@@ -0,0 +1,35 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .completionargs import CompletionArgs, CompletionArgsTypedDict
5
+ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict
6
+ from mistralai.types import BaseModel
7
+ from typing import Literal, Optional
8
+ from typing_extensions import NotRequired, TypedDict
9
+
10
+
11
+ ConversationAppendRequestHandoffExecution = Literal["client", "server"]
12
+
13
+
14
+ class ConversationAppendRequestTypedDict(TypedDict):
15
+ inputs: ConversationInputsTypedDict
16
+ stream: NotRequired[bool]
17
+ store: NotRequired[bool]
18
+ r"""Whether to store the results into our servers or not."""
19
+ handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution]
20
+ completion_args: NotRequired[CompletionArgsTypedDict]
21
+ r"""White-listed arguments from the completion API"""
22
+
23
+
24
+ class ConversationAppendRequest(BaseModel):
25
+ inputs: ConversationInputs
26
+
27
+ stream: Optional[bool] = False
28
+
29
+ store: Optional[bool] = True
30
+ r"""Whether to store the results into our servers or not."""
31
+
32
+ handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server"
33
+
34
+ completion_args: Optional[CompletionArgs] = None
35
+ r"""White-listed arguments from the completion API"""
@@ -0,0 +1,37 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .completionargs import CompletionArgs, CompletionArgsTypedDict
5
+ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict
6
+ from mistralai.types import BaseModel
7
+ from typing import Literal, Optional
8
+ from typing_extensions import NotRequired, TypedDict
9
+
10
+
11
+ ConversationAppendStreamRequestHandoffExecution = Literal["client", "server"]
12
+
13
+
14
+ class ConversationAppendStreamRequestTypedDict(TypedDict):
15
+ inputs: ConversationInputsTypedDict
16
+ stream: NotRequired[bool]
17
+ store: NotRequired[bool]
18
+ r"""Whether to store the results into our servers or not."""
19
+ handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution]
20
+ completion_args: NotRequired[CompletionArgsTypedDict]
21
+ r"""White-listed arguments from the completion API"""
22
+
23
+
24
+ class ConversationAppendStreamRequest(BaseModel):
25
+ inputs: ConversationInputs
26
+
27
+ stream: Optional[bool] = True
28
+
29
+ store: Optional[bool] = True
30
+ r"""Whether to store the results into our servers or not."""
31
+
32
+ handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = (
33
+ "server"
34
+ )
35
+
36
+ completion_args: Optional[CompletionArgs] = None
37
+ r"""White-listed arguments from the completion API"""
@@ -0,0 +1,72 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict
5
+ from .agenthandoffstartedevent import (
6
+ AgentHandoffStartedEvent,
7
+ AgentHandoffStartedEventTypedDict,
8
+ )
9
+ from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict
10
+ from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict
11
+ from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict
12
+ from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict
13
+ from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict
14
+ from .ssetypes import SSETypes
15
+ from .toolexecutiondoneevent import (
16
+ ToolExecutionDoneEvent,
17
+ ToolExecutionDoneEventTypedDict,
18
+ )
19
+ from .toolexecutionstartedevent import (
20
+ ToolExecutionStartedEvent,
21
+ ToolExecutionStartedEventTypedDict,
22
+ )
23
+ from mistralai.types import BaseModel
24
+ from mistralai.utils import get_discriminator
25
+ from pydantic import Discriminator, Tag
26
+ from typing import Union
27
+ from typing_extensions import Annotated, TypeAliasType, TypedDict
28
+
29
+
30
+ ConversationEventsDataTypedDict = TypeAliasType(
31
+ "ConversationEventsDataTypedDict",
32
+ Union[
33
+ ResponseStartedEventTypedDict,
34
+ ResponseDoneEventTypedDict,
35
+ ResponseErrorEventTypedDict,
36
+ ToolExecutionStartedEventTypedDict,
37
+ ToolExecutionDoneEventTypedDict,
38
+ AgentHandoffStartedEventTypedDict,
39
+ AgentHandoffDoneEventTypedDict,
40
+ FunctionCallEventTypedDict,
41
+ MessageOutputEventTypedDict,
42
+ ],
43
+ )
44
+
45
+
46
+ ConversationEventsData = Annotated[
47
+ Union[
48
+ Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")],
49
+ Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")],
50
+ Annotated[ResponseDoneEvent, Tag("conversation.response.done")],
51
+ Annotated[ResponseErrorEvent, Tag("conversation.response.error")],
52
+ Annotated[ResponseStartedEvent, Tag("conversation.response.started")],
53
+ Annotated[FunctionCallEvent, Tag("function.call.delta")],
54
+ Annotated[MessageOutputEvent, Tag("message.output.delta")],
55
+ Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")],
56
+ Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")],
57
+ ],
58
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
59
+ ]
60
+
61
+
62
+ class ConversationEventsTypedDict(TypedDict):
63
+ event: SSETypes
64
+ r"""Server side events sent when streaming a conversation response."""
65
+ data: ConversationEventsDataTypedDict
66
+
67
+
68
+ class ConversationEvents(BaseModel):
69
+ event: SSETypes
70
+ r"""Server side events sent when streaming a conversation response."""
71
+
72
+ data: ConversationEventsData
@@ -0,0 +1,58 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict
5
+ from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict
6
+ from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict
7
+ from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict
8
+ from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict
9
+ from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict
10
+ from mistralai.types import BaseModel
11
+ from typing import List, Literal, Optional, Union
12
+ from typing_extensions import NotRequired, TypeAliasType, TypedDict
13
+
14
+
15
+ ConversationHistoryObject = Literal["conversation.history"]
16
+
17
+ EntriesTypedDict = TypeAliasType(
18
+ "EntriesTypedDict",
19
+ Union[
20
+ MessageInputEntryTypedDict,
21
+ FunctionResultEntryTypedDict,
22
+ ToolExecutionEntryTypedDict,
23
+ FunctionCallEntryTypedDict,
24
+ MessageOutputEntryTypedDict,
25
+ AgentHandoffEntryTypedDict,
26
+ ],
27
+ )
28
+
29
+
30
+ Entries = TypeAliasType(
31
+ "Entries",
32
+ Union[
33
+ MessageInputEntry,
34
+ FunctionResultEntry,
35
+ ToolExecutionEntry,
36
+ FunctionCallEntry,
37
+ MessageOutputEntry,
38
+ AgentHandoffEntry,
39
+ ],
40
+ )
41
+
42
+
43
+ class ConversationHistoryTypedDict(TypedDict):
44
+ r"""Retrieve all entries in a conversation."""
45
+
46
+ conversation_id: str
47
+ entries: List[EntriesTypedDict]
48
+ object: NotRequired[ConversationHistoryObject]
49
+
50
+
51
+ class ConversationHistory(BaseModel):
52
+ r"""Retrieve all entries in a conversation."""
53
+
54
+ conversation_id: str
55
+
56
+ entries: List[Entries]
57
+
58
+ object: Optional[ConversationHistoryObject] = "conversation.history"
@@ -0,0 +1,14 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .inputentries import InputEntries, InputEntriesTypedDict
5
+ from typing import List, Union
6
+ from typing_extensions import TypeAliasType
7
+
8
+
9
+ ConversationInputsTypedDict = TypeAliasType(
10
+ "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]]
11
+ )
12
+
13
+
14
+ ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]])
@@ -0,0 +1,28 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .messageentries import MessageEntries, MessageEntriesTypedDict
5
+ from mistralai.types import BaseModel
6
+ from typing import List, Literal, Optional
7
+ from typing_extensions import NotRequired, TypedDict
8
+
9
+
10
+ ConversationMessagesObject = Literal["conversation.messages"]
11
+
12
+
13
+ class ConversationMessagesTypedDict(TypedDict):
14
+ r"""Similar to the conversation history but only keep the messages"""
15
+
16
+ conversation_id: str
17
+ messages: List[MessageEntriesTypedDict]
18
+ object: NotRequired[ConversationMessagesObject]
19
+
20
+
21
+ class ConversationMessages(BaseModel):
22
+ r"""Similar to the conversation history but only keep the messages"""
23
+
24
+ conversation_id: str
25
+
26
+ messages: List[MessageEntries]
27
+
28
+ object: Optional[ConversationMessagesObject] = "conversation.messages"