mistralai 1.0.0rc1__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. mistralai/agents.py +434 -0
  2. mistralai/basesdk.py +43 -6
  3. mistralai/chat.py +29 -34
  4. mistralai/client.py +1 -1
  5. mistralai/embeddings.py +4 -4
  6. mistralai/files.py +10 -10
  7. mistralai/fim.py +17 -18
  8. mistralai/fine_tuning.py +10 -849
  9. mistralai/jobs.py +844 -0
  10. mistralai/models/__init__.py +6 -4
  11. mistralai/models/agentscompletionrequest.py +96 -0
  12. mistralai/models/agentscompletionstreamrequest.py +92 -0
  13. mistralai/models/assistantmessage.py +4 -9
  14. mistralai/models/chatcompletionchoice.py +4 -15
  15. mistralai/models/chatcompletionrequest.py +25 -30
  16. mistralai/models/chatcompletionstreamrequest.py +25 -30
  17. mistralai/models/completionresponsestreamchoice.py +4 -9
  18. mistralai/models/delete_model_v1_models_model_id_deleteop.py +2 -0
  19. mistralai/models/deltamessage.py +7 -12
  20. mistralai/models/detailedjobout.py +4 -9
  21. mistralai/models/embeddingrequest.py +12 -17
  22. mistralai/models/eventout.py +4 -9
  23. mistralai/models/fileschema.py +4 -9
  24. mistralai/models/fimcompletionrequest.py +19 -24
  25. mistralai/models/fimcompletionstreamrequest.py +19 -24
  26. mistralai/models/ftmodelout.py +4 -9
  27. mistralai/models/functioncall.py +9 -3
  28. mistralai/models/githubrepositoryin.py +4 -9
  29. mistralai/models/githubrepositoryout.py +4 -9
  30. mistralai/models/httpvalidationerror.py +1 -1
  31. mistralai/models/jobin.py +4 -9
  32. mistralai/models/jobmetadataout.py +4 -9
  33. mistralai/models/jobout.py +4 -9
  34. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +2 -0
  35. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +1 -59
  36. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +4 -9
  37. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +2 -0
  38. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +2 -0
  39. mistralai/models/legacyjobmetadataout.py +4 -9
  40. mistralai/models/metricout.py +4 -9
  41. mistralai/models/modelcard.py +4 -9
  42. mistralai/models/retrieve_model_v1_models_model_id_getop.py +2 -0
  43. mistralai/models/retrievefileout.py +4 -9
  44. mistralai/models/security.py +4 -4
  45. mistralai/models/systemmessage.py +6 -6
  46. mistralai/models/toolmessage.py +4 -9
  47. mistralai/models/trainingparameters.py +4 -9
  48. mistralai/models/trainingparametersin.py +4 -9
  49. mistralai/models/updateftmodelin.py +4 -9
  50. mistralai/models/uploadfileout.py +4 -9
  51. mistralai/models/usermessage.py +6 -6
  52. mistralai/models/validationerror.py +6 -6
  53. mistralai/models/wandbintegration.py +4 -9
  54. mistralai/models/wandbintegrationout.py +4 -9
  55. mistralai/models_.py +24 -24
  56. mistralai/sdk.py +14 -6
  57. mistralai/sdkconfiguration.py +5 -4
  58. mistralai/types/basemodel.py +10 -6
  59. mistralai/utils/__init__.py +4 -0
  60. mistralai/utils/eventstreaming.py +8 -9
  61. mistralai/utils/logger.py +16 -0
  62. mistralai/utils/retries.py +2 -2
  63. mistralai/utils/security.py +5 -2
  64. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/METADATA +153 -69
  65. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/RECORD +114 -107
  66. mistralai_azure/basesdk.py +42 -4
  67. mistralai_azure/chat.py +15 -20
  68. mistralai_azure/models/__init__.py +3 -3
  69. mistralai_azure/models/assistantmessage.py +4 -9
  70. mistralai_azure/models/chatcompletionchoice.py +4 -15
  71. mistralai_azure/models/chatcompletionrequest.py +21 -26
  72. mistralai_azure/models/chatcompletionstreamrequest.py +21 -26
  73. mistralai_azure/models/completionresponsestreamchoice.py +4 -9
  74. mistralai_azure/models/deltamessage.py +7 -12
  75. mistralai_azure/models/functioncall.py +9 -3
  76. mistralai_azure/models/httpvalidationerror.py +1 -1
  77. mistralai_azure/models/systemmessage.py +6 -6
  78. mistralai_azure/models/toolmessage.py +4 -9
  79. mistralai_azure/models/usermessage.py +6 -6
  80. mistralai_azure/models/validationerror.py +6 -6
  81. mistralai_azure/sdk.py +7 -2
  82. mistralai_azure/sdkconfiguration.py +5 -4
  83. mistralai_azure/types/basemodel.py +10 -6
  84. mistralai_azure/utils/__init__.py +4 -0
  85. mistralai_azure/utils/eventstreaming.py +8 -9
  86. mistralai_azure/utils/logger.py +16 -0
  87. mistralai_azure/utils/retries.py +2 -2
  88. mistralai_gcp/basesdk.py +42 -4
  89. mistralai_gcp/chat.py +12 -17
  90. mistralai_gcp/fim.py +12 -13
  91. mistralai_gcp/models/__init__.py +3 -3
  92. mistralai_gcp/models/assistantmessage.py +4 -9
  93. mistralai_gcp/models/chatcompletionchoice.py +4 -15
  94. mistralai_gcp/models/chatcompletionrequest.py +23 -28
  95. mistralai_gcp/models/chatcompletionstreamrequest.py +23 -28
  96. mistralai_gcp/models/completionresponsestreamchoice.py +4 -9
  97. mistralai_gcp/models/deltamessage.py +7 -12
  98. mistralai_gcp/models/fimcompletionrequest.py +19 -24
  99. mistralai_gcp/models/fimcompletionstreamrequest.py +19 -24
  100. mistralai_gcp/models/functioncall.py +9 -3
  101. mistralai_gcp/models/httpvalidationerror.py +1 -1
  102. mistralai_gcp/models/systemmessage.py +6 -6
  103. mistralai_gcp/models/toolmessage.py +4 -9
  104. mistralai_gcp/models/usermessage.py +6 -6
  105. mistralai_gcp/models/validationerror.py +6 -6
  106. mistralai_gcp/sdk.py +9 -0
  107. mistralai_gcp/sdkconfiguration.py +5 -4
  108. mistralai_gcp/types/basemodel.py +10 -6
  109. mistralai_gcp/utils/__init__.py +4 -0
  110. mistralai_gcp/utils/eventstreaming.py +8 -9
  111. mistralai_gcp/utils/logger.py +16 -0
  112. mistralai_gcp/utils/retries.py +2 -2
  113. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/LICENSE +0 -0
  114. {mistralai-1.0.0rc1.dist-info → mistralai-1.0.1.dist-info}/WHEEL +0 -0
@@ -14,13 +14,27 @@ from typing import List, Literal, Optional, TypedDict, Union
14
14
  from typing_extensions import Annotated, NotRequired
15
15
 
16
16
 
17
+ ChatCompletionRequestStopTypedDict = Union[str, List[str]]
18
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
19
+
20
+
21
+ ChatCompletionRequestStop = Union[str, List[str]]
22
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
23
+
24
+
25
+ ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
26
+
27
+
28
+ ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
29
+
30
+
17
31
  ChatCompletionRequestToolChoice = Literal["auto", "none", "any"]
18
32
 
19
33
  class ChatCompletionRequestTypedDict(TypedDict):
34
+ model: Nullable[str]
35
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
20
36
  messages: List[ChatCompletionRequestMessagesTypedDict]
21
37
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
22
- model: NotRequired[Nullable[str]]
23
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
24
38
  temperature: NotRequired[float]
25
39
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
40
  top_p: NotRequired[float]
@@ -41,10 +55,10 @@ class ChatCompletionRequestTypedDict(TypedDict):
41
55
 
42
56
 
43
57
  class ChatCompletionRequest(BaseModel):
58
+ model: Nullable[str]
59
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
44
60
  messages: List[ChatCompletionRequestMessages]
45
61
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
46
- model: OptionalNullable[str] = UNSET
47
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
48
62
  temperature: Optional[float] = 0.7
49
63
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
50
64
  top_p: Optional[float] = 1
@@ -65,7 +79,7 @@ class ChatCompletionRequest(BaseModel):
65
79
 
66
80
  @model_serializer(mode="wrap")
67
81
  def serialize_model(self, handler):
68
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
82
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
69
83
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
70
84
  null_default_fields = []
71
85
 
@@ -77,34 +91,15 @@ class ChatCompletionRequest(BaseModel):
77
91
  k = f.alias or n
78
92
  val = serialized.get(k)
79
93
 
94
+ optional_nullable = k in optional_fields and k in nullable_fields
95
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
96
+
80
97
  if val is not None and val != UNSET_SENTINEL:
81
98
  m[k] = val
82
99
  elif val != UNSET_SENTINEL and (
83
- not k in optional_fields
84
- or (
85
- k in optional_fields
86
- and k in nullable_fields
87
- and (
88
- self.__pydantic_fields_set__.intersection({n})
89
- or k in null_default_fields
90
- ) # pylint: disable=no-member
91
- )
100
+ not k in optional_fields or (optional_nullable and is_set)
92
101
  ):
93
102
  m[k] = val
94
103
 
95
104
  return m
96
105
 
97
-
98
- ChatCompletionRequestStopTypedDict = Union[str, List[str]]
99
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
100
-
101
-
102
- ChatCompletionRequestStop = Union[str, List[str]]
103
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
104
-
105
-
106
- ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
107
-
108
-
109
- ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
110
-
@@ -14,13 +14,27 @@ from typing import List, Literal, Optional, TypedDict, Union
14
14
  from typing_extensions import Annotated, NotRequired
15
15
 
16
16
 
17
+ StopTypedDict = Union[str, List[str]]
18
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
19
+
20
+
21
+ Stop = Union[str, List[str]]
22
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
23
+
24
+
25
+ MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
26
+
27
+
28
+ Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
29
+
30
+
17
31
  ToolChoice = Literal["auto", "none", "any"]
18
32
 
19
33
  class ChatCompletionStreamRequestTypedDict(TypedDict):
34
+ model: Nullable[str]
35
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
20
36
  messages: List[MessagesTypedDict]
21
37
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
22
- model: NotRequired[Nullable[str]]
23
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
24
38
  temperature: NotRequired[float]
25
39
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
26
40
  top_p: NotRequired[float]
@@ -40,10 +54,10 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
40
54
 
41
55
 
42
56
  class ChatCompletionStreamRequest(BaseModel):
57
+ model: Nullable[str]
58
+ r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
43
59
  messages: List[Messages]
44
60
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
45
- model: OptionalNullable[str] = UNSET
46
- r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
47
61
  temperature: Optional[float] = 0.7
48
62
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
49
63
  top_p: Optional[float] = 1
@@ -63,7 +77,7 @@ class ChatCompletionStreamRequest(BaseModel):
63
77
 
64
78
  @model_serializer(mode="wrap")
65
79
  def serialize_model(self, handler):
66
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
80
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"]
67
81
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"]
68
82
  null_default_fields = []
69
83
 
@@ -75,34 +89,15 @@ class ChatCompletionStreamRequest(BaseModel):
75
89
  k = f.alias or n
76
90
  val = serialized.get(k)
77
91
 
92
+ optional_nullable = k in optional_fields and k in nullable_fields
93
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
94
+
78
95
  if val is not None and val != UNSET_SENTINEL:
79
96
  m[k] = val
80
97
  elif val != UNSET_SENTINEL and (
81
- not k in optional_fields
82
- or (
83
- k in optional_fields
84
- and k in nullable_fields
85
- and (
86
- self.__pydantic_fields_set__.intersection({n})
87
- or k in null_default_fields
88
- ) # pylint: disable=no-member
89
- )
98
+ not k in optional_fields or (optional_nullable and is_set)
90
99
  ):
91
100
  m[k] = val
92
101
 
93
102
  return m
94
103
 
95
-
96
- StopTypedDict = Union[str, List[str]]
97
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
98
-
99
-
100
- Stop = Union[str, List[str]]
101
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
102
-
103
-
104
- MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict]
105
-
106
-
107
- Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))]
108
-
@@ -34,18 +34,13 @@ class CompletionResponseStreamChoice(BaseModel):
34
34
  k = f.alias or n
35
35
  val = serialized.get(k)
36
36
 
37
+ optional_nullable = k in optional_fields and k in nullable_fields
38
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
39
+
37
40
  if val is not None and val != UNSET_SENTINEL:
38
41
  m[k] = val
39
42
  elif val != UNSET_SENTINEL and (
40
- not k in optional_fields
41
- or (
42
- k in optional_fields
43
- and k in nullable_fields
44
- and (
45
- self.__pydantic_fields_set__.intersection({n})
46
- or k in null_default_fields
47
- ) # pylint: disable=no-member
48
- )
43
+ not k in optional_fields or (optional_nullable and is_set)
49
44
  ):
50
45
  m[k] = val
51
46
 
@@ -4,20 +4,20 @@ from __future__ import annotations
4
4
  from .toolcall import ToolCall, ToolCallTypedDict
5
5
  from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
6
6
  from pydantic import model_serializer
7
- from typing import Optional, TypedDict
7
+ from typing import List, Optional, TypedDict
8
8
  from typing_extensions import NotRequired
9
9
 
10
10
 
11
11
  class DeltaMessageTypedDict(TypedDict):
12
12
  role: NotRequired[str]
13
13
  content: NotRequired[str]
14
- tool_calls: NotRequired[Nullable[ToolCallTypedDict]]
14
+ tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
15
15
 
16
16
 
17
17
  class DeltaMessage(BaseModel):
18
18
  role: Optional[str] = None
19
19
  content: Optional[str] = None
20
- tool_calls: OptionalNullable[ToolCall] = UNSET
20
+ tool_calls: OptionalNullable[List[ToolCall]] = UNSET
21
21
 
22
22
  @model_serializer(mode="wrap")
23
23
  def serialize_model(self, handler):
@@ -33,18 +33,13 @@ class DeltaMessage(BaseModel):
33
33
  k = f.alias or n
34
34
  val = serialized.get(k)
35
35
 
36
+ optional_nullable = k in optional_fields and k in nullable_fields
37
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
38
+
36
39
  if val is not None and val != UNSET_SENTINEL:
37
40
  m[k] = val
38
41
  elif val != UNSET_SENTINEL and (
39
- not k in optional_fields
40
- or (
41
- k in optional_fields
42
- and k in nullable_fields
43
- and (
44
- self.__pydantic_fields_set__.intersection({n})
45
- or k in null_default_fields
46
- ) # pylint: disable=no-member
47
- )
42
+ not k in optional_fields or (optional_nullable and is_set)
48
43
  ):
49
44
  m[k] = val
50
45
 
@@ -7,14 +7,22 @@ from typing import List, Optional, TypedDict, Union
7
7
  from typing_extensions import NotRequired
8
8
 
9
9
 
10
+ FIMCompletionRequestStopTypedDict = Union[str, List[str]]
11
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
12
+
13
+
14
+ FIMCompletionRequestStop = Union[str, List[str]]
15
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
16
+
17
+
10
18
  class FIMCompletionRequestTypedDict(TypedDict):
11
- prompt: str
12
- r"""The text/code to complete."""
13
- model: NotRequired[Nullable[str]]
19
+ model: Nullable[str]
14
20
  r"""ID of the model to use. Only compatible for now with:
15
21
  - `codestral-2405`
16
22
  - `codestral-latest`
17
23
  """
24
+ prompt: str
25
+ r"""The text/code to complete."""
18
26
  temperature: NotRequired[float]
19
27
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
20
28
  top_p: NotRequired[float]
@@ -34,13 +42,13 @@ class FIMCompletionRequestTypedDict(TypedDict):
34
42
 
35
43
 
36
44
  class FIMCompletionRequest(BaseModel):
37
- prompt: str
38
- r"""The text/code to complete."""
39
- model: OptionalNullable[str] = UNSET
45
+ model: Nullable[str]
40
46
  r"""ID of the model to use. Only compatible for now with:
41
47
  - `codestral-2405`
42
48
  - `codestral-latest`
43
49
  """
50
+ prompt: str
51
+ r"""The text/code to complete."""
44
52
  temperature: Optional[float] = 0.7
45
53
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
46
54
  top_p: Optional[float] = 1
@@ -60,7 +68,7 @@ class FIMCompletionRequest(BaseModel):
60
68
 
61
69
  @model_serializer(mode="wrap")
62
70
  def serialize_model(self, handler):
63
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
71
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
64
72
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
65
73
  null_default_fields = []
66
74
 
@@ -72,28 +80,15 @@ class FIMCompletionRequest(BaseModel):
72
80
  k = f.alias or n
73
81
  val = serialized.get(k)
74
82
 
83
+ optional_nullable = k in optional_fields and k in nullable_fields
84
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
85
+
75
86
  if val is not None and val != UNSET_SENTINEL:
76
87
  m[k] = val
77
88
  elif val != UNSET_SENTINEL and (
78
- not k in optional_fields
79
- or (
80
- k in optional_fields
81
- and k in nullable_fields
82
- and (
83
- self.__pydantic_fields_set__.intersection({n})
84
- or k in null_default_fields
85
- ) # pylint: disable=no-member
86
- )
89
+ not k in optional_fields or (optional_nullable and is_set)
87
90
  ):
88
91
  m[k] = val
89
92
 
90
93
  return m
91
94
 
92
-
93
- FIMCompletionRequestStopTypedDict = Union[str, List[str]]
94
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
95
-
96
-
97
- FIMCompletionRequestStop = Union[str, List[str]]
98
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
99
-
@@ -7,14 +7,22 @@ from typing import List, Optional, TypedDict, Union
7
7
  from typing_extensions import NotRequired
8
8
 
9
9
 
10
+ FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]]
11
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
12
+
13
+
14
+ FIMCompletionStreamRequestStop = Union[str, List[str]]
15
+ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
16
+
17
+
10
18
  class FIMCompletionStreamRequestTypedDict(TypedDict):
11
- prompt: str
12
- r"""The text/code to complete."""
13
- model: NotRequired[Nullable[str]]
19
+ model: Nullable[str]
14
20
  r"""ID of the model to use. Only compatible for now with:
15
21
  - `codestral-2405`
16
22
  - `codestral-latest`
17
23
  """
24
+ prompt: str
25
+ r"""The text/code to complete."""
18
26
  temperature: NotRequired[float]
19
27
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
20
28
  top_p: NotRequired[float]
@@ -33,13 +41,13 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
33
41
 
34
42
 
35
43
  class FIMCompletionStreamRequest(BaseModel):
36
- prompt: str
37
- r"""The text/code to complete."""
38
- model: OptionalNullable[str] = UNSET
44
+ model: Nullable[str]
39
45
  r"""ID of the model to use. Only compatible for now with:
40
46
  - `codestral-2405`
41
47
  - `codestral-latest`
42
48
  """
49
+ prompt: str
50
+ r"""The text/code to complete."""
43
51
  temperature: Optional[float] = 0.7
44
52
  r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both."""
45
53
  top_p: Optional[float] = 1
@@ -58,7 +66,7 @@ class FIMCompletionStreamRequest(BaseModel):
58
66
 
59
67
  @model_serializer(mode="wrap")
60
68
  def serialize_model(self, handler):
61
- optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
69
+ optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"]
62
70
  nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"]
63
71
  null_default_fields = []
64
72
 
@@ -70,28 +78,15 @@ class FIMCompletionStreamRequest(BaseModel):
70
78
  k = f.alias or n
71
79
  val = serialized.get(k)
72
80
 
81
+ optional_nullable = k in optional_fields and k in nullable_fields
82
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
83
+
73
84
  if val is not None and val != UNSET_SENTINEL:
74
85
  m[k] = val
75
86
  elif val != UNSET_SENTINEL and (
76
- not k in optional_fields
77
- or (
78
- k in optional_fields
79
- and k in nullable_fields
80
- and (
81
- self.__pydantic_fields_set__.intersection({n})
82
- or k in null_default_fields
83
- ) # pylint: disable=no-member
84
- )
87
+ not k in optional_fields or (optional_nullable and is_set)
85
88
  ):
86
89
  m[k] = val
87
90
 
88
91
  return m
89
92
 
90
-
91
- FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]]
92
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
93
-
94
-
95
- FIMCompletionStreamRequestStop = Union[str, List[str]]
96
- r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
97
-
@@ -2,15 +2,21 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from mistralai_gcp.types import BaseModel
5
- from typing import TypedDict
5
+ from typing import Any, Dict, TypedDict, Union
6
+
7
+
8
+ ArgumentsTypedDict = Union[Dict[str, Any], str]
9
+
10
+
11
+ Arguments = Union[Dict[str, Any], str]
6
12
 
7
13
 
8
14
  class FunctionCallTypedDict(TypedDict):
9
15
  name: str
10
- arguments: str
16
+ arguments: ArgumentsTypedDict
11
17
 
12
18
 
13
19
  class FunctionCall(BaseModel):
14
20
  name: str
15
- arguments: str
21
+ arguments: Arguments
16
22
 
@@ -2,8 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .validationerror import ValidationError
5
+ from mistralai_gcp import utils
5
6
  from mistralai_gcp.types import BaseModel
6
- import mistralai_gcp.utils as utils
7
7
  from typing import List, Optional
8
8
 
9
9
  class HTTPValidationErrorData(BaseModel):
@@ -7,6 +7,12 @@ from typing import List, Literal, Optional, TypedDict, Union
7
7
  from typing_extensions import NotRequired
8
8
 
9
9
 
10
+ ContentTypedDict = Union[str, List[ContentChunkTypedDict]]
11
+
12
+
13
+ Content = Union[str, List[ContentChunk]]
14
+
15
+
10
16
  Role = Literal["system"]
11
17
 
12
18
  class SystemMessageTypedDict(TypedDict):
@@ -18,9 +24,3 @@ class SystemMessage(BaseModel):
18
24
  content: Content
19
25
  role: Optional[Role] = "system"
20
26
 
21
-
22
- ContentTypedDict = Union[str, List[ContentChunkTypedDict]]
23
-
24
-
25
- Content = Union[str, List[ContentChunk]]
26
-
@@ -36,18 +36,13 @@ class ToolMessage(BaseModel):
36
36
  k = f.alias or n
37
37
  val = serialized.get(k)
38
38
 
39
+ optional_nullable = k in optional_fields and k in nullable_fields
40
+ is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member
41
+
39
42
  if val is not None and val != UNSET_SENTINEL:
40
43
  m[k] = val
41
44
  elif val != UNSET_SENTINEL and (
42
- not k in optional_fields
43
- or (
44
- k in optional_fields
45
- and k in nullable_fields
46
- and (
47
- self.__pydantic_fields_set__.intersection({n})
48
- or k in null_default_fields
49
- ) # pylint: disable=no-member
50
- )
45
+ not k in optional_fields or (optional_nullable and is_set)
51
46
  ):
52
47
  m[k] = val
53
48
 
@@ -7,6 +7,12 @@ from typing import List, Literal, Optional, TypedDict, Union
7
7
  from typing_extensions import NotRequired
8
8
 
9
9
 
10
+ UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]]
11
+
12
+
13
+ UserMessageContent = Union[str, List[TextChunk]]
14
+
15
+
10
16
  UserMessageRole = Literal["user"]
11
17
 
12
18
  class UserMessageTypedDict(TypedDict):
@@ -18,9 +24,3 @@ class UserMessage(BaseModel):
18
24
  content: UserMessageContent
19
25
  role: Optional[UserMessageRole] = "user"
20
26
 
21
-
22
- UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]]
23
-
24
-
25
- UserMessageContent = Union[str, List[TextChunk]]
26
-
@@ -5,6 +5,12 @@ from mistralai_gcp.types import BaseModel
5
5
  from typing import List, TypedDict, Union
6
6
 
7
7
 
8
+ LocTypedDict = Union[str, int]
9
+
10
+
11
+ Loc = Union[str, int]
12
+
13
+
8
14
  class ValidationErrorTypedDict(TypedDict):
9
15
  loc: List[LocTypedDict]
10
16
  msg: str
@@ -16,9 +22,3 @@ class ValidationError(BaseModel):
16
22
  msg: str
17
23
  type: str
18
24
 
19
-
20
- LocTypedDict = Union[str, int]
21
-
22
-
23
- Loc = Union[str, int]
24
-
mistralai_gcp/sdk.py CHANGED
@@ -11,11 +11,13 @@ import httpx
11
11
  from mistralai_gcp import models
12
12
  from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks
13
13
  from mistralai_gcp.chat import Chat
14
+ from mistralai_gcp.fim import Fim
14
15
  from mistralai_gcp.types import Nullable
15
16
 
16
17
  from .basesdk import BaseSDK
17
18
  from .httpclient import AsyncHttpClient, HttpClient
18
19
  from .sdkconfiguration import SDKConfiguration
20
+ from .utils.logger import Logger, NoOpLogger
19
21
  from .utils.retries import RetryConfig
20
22
 
21
23
 
@@ -23,6 +25,7 @@ class MistralGoogleCloud(BaseSDK):
23
25
  r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it."""
24
26
 
25
27
  chat: Chat
28
+ fim: Fim
26
29
  r"""Chat Completion API"""
27
30
 
28
31
  def __init__(
@@ -32,6 +35,7 @@ class MistralGoogleCloud(BaseSDK):
32
35
  client: Optional[HttpClient] = None,
33
36
  async_client: Optional[AsyncHttpClient] = None,
34
37
  retry_config: Optional[Nullable[RetryConfig]] = None,
38
+ debug_logger: Optional[Logger] = None,
35
39
  ) -> None:
36
40
  r"""Instantiates the SDK configuring it with the provided parameters.
37
41
 
@@ -73,6 +77,9 @@ class MistralGoogleCloud(BaseSDK):
73
77
  if async_client is None:
74
78
  async_client = httpx.AsyncClient()
75
79
 
80
+ if debug_logger is None:
81
+ debug_logger = NoOpLogger()
82
+
76
83
  assert issubclass(
77
84
  type(async_client), AsyncHttpClient
78
85
  ), "The provided async_client must implement the AsyncHttpClient protocol."
@@ -94,6 +101,7 @@ class MistralGoogleCloud(BaseSDK):
94
101
  server_url=f"https://{region}-aiplatform.googleapis.com",
95
102
  server=None,
96
103
  retry_config=retry_config,
104
+ debug_logger=debug_logger,
97
105
  ),
98
106
  )
99
107
 
@@ -116,6 +124,7 @@ class MistralGoogleCloud(BaseSDK):
116
124
 
117
125
  def _init_sdks(self):
118
126
  self.chat = Chat(self.sdk_configuration)
127
+ self.fim = Fim(self.sdk_configuration)
119
128
 
120
129
 
121
130
  class GoogleCloudBeforeRequestHook(BeforeRequestHook):
@@ -3,7 +3,7 @@
3
3
 
4
4
  from ._hooks import SDKHooks
5
5
  from .httpclient import AsyncHttpClient, HttpClient
6
- from .utils import RetryConfig, remove_suffix
6
+ from .utils import Logger, RetryConfig, remove_suffix
7
7
  from dataclasses import dataclass
8
8
  from mistralai_gcp import models
9
9
  from mistralai_gcp.types import OptionalNullable, UNSET
@@ -23,14 +23,15 @@ SERVERS = {
23
23
  class SDKConfiguration:
24
24
  client: HttpClient
25
25
  async_client: AsyncHttpClient
26
+ debug_logger: Logger
26
27
  security: Optional[Union[models.Security,Callable[[], models.Security]]] = None
27
28
  server_url: Optional[str] = ""
28
29
  server: Optional[str] = ""
29
30
  language: str = "python"
30
31
  openapi_doc_version: str = "0.0.2"
31
- sdk_version: str = "1.0.0rc1"
32
- gen_version: str = "2.382.2"
33
- user_agent: str = "speakeasy-sdk/python 1.0.0rc1 2.382.2 0.0.2 mistralai-gcp"
32
+ sdk_version: str = "1.0.0-rc.4"
33
+ gen_version: str = "2.390.6"
34
+ user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai-gcp"
34
35
  retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
35
36
  timeout_ms: Optional[int] = None
36
37
 
@@ -2,8 +2,8 @@
2
2
 
3
3
  from pydantic import ConfigDict, model_serializer
4
4
  from pydantic import BaseModel as PydanticBaseModel
5
- from typing import Literal, Optional, TypeVar, Union, NewType
6
- from typing_extensions import TypeAliasType
5
+ from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType
6
+ from typing_extensions import TypeAliasType, TypeAlias
7
7
 
8
8
 
9
9
  class BaseModel(PydanticBaseModel):
@@ -26,10 +26,14 @@ UNSET_SENTINEL = "~?~unset~?~sentinel~?~"
26
26
 
27
27
 
28
28
  T = TypeVar("T")
29
- Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,))
30
- OptionalNullable = TypeAliasType(
31
- "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,)
32
- )
29
+ if TYPE_CHECKING:
30
+ Nullable: TypeAlias = Union[T, None]
31
+ OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset]
32
+ else:
33
+ Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,))
34
+ OptionalNullable = TypeAliasType(
35
+ "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,)
36
+ )
33
37
 
34
38
  UnrecognizedInt = NewType("UnrecognizedInt", int)
35
39
  UnrecognizedStr = NewType("UnrecognizedStr", str)