mistralai 1.3.1__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mistralai/mistral_jobs.py CHANGED
@@ -24,7 +24,7 @@ class MistralJobs(BaseSDK):
24
24
  server_url: Optional[str] = None,
25
25
  timeout_ms: Optional[int] = None,
26
26
  http_headers: Optional[Mapping[str, str]] = None,
27
- ) -> Optional[models.BatchJobsOut]:
27
+ ) -> models.BatchJobsOut:
28
28
  r"""Get Batch Jobs
29
29
 
30
30
  Get a list of batch jobs for your organization and user.
@@ -97,8 +97,13 @@ class MistralJobs(BaseSDK):
97
97
  )
98
98
 
99
99
  if utils.match_response(http_res, "200", "application/json"):
100
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut])
101
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
100
+ return utils.unmarshal_json(http_res.text, models.BatchJobsOut)
101
+ if utils.match_response(http_res, "4XX", "*"):
102
+ http_res_text = utils.stream_to_text(http_res)
103
+ raise models.SDKError(
104
+ "API error occurred", http_res.status_code, http_res_text, http_res
105
+ )
106
+ if utils.match_response(http_res, "5XX", "*"):
102
107
  http_res_text = utils.stream_to_text(http_res)
103
108
  raise models.SDKError(
104
109
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -127,7 +132,7 @@ class MistralJobs(BaseSDK):
127
132
  server_url: Optional[str] = None,
128
133
  timeout_ms: Optional[int] = None,
129
134
  http_headers: Optional[Mapping[str, str]] = None,
130
- ) -> Optional[models.BatchJobsOut]:
135
+ ) -> models.BatchJobsOut:
131
136
  r"""Get Batch Jobs
132
137
 
133
138
  Get a list of batch jobs for your organization and user.
@@ -200,8 +205,13 @@ class MistralJobs(BaseSDK):
200
205
  )
201
206
 
202
207
  if utils.match_response(http_res, "200", "application/json"):
203
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut])
204
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
208
+ return utils.unmarshal_json(http_res.text, models.BatchJobsOut)
209
+ if utils.match_response(http_res, "4XX", "*"):
210
+ http_res_text = await utils.stream_to_text_async(http_res)
211
+ raise models.SDKError(
212
+ "API error occurred", http_res.status_code, http_res_text, http_res
213
+ )
214
+ if utils.match_response(http_res, "5XX", "*"):
205
215
  http_res_text = await utils.stream_to_text_async(http_res)
206
216
  raise models.SDKError(
207
217
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -228,7 +238,7 @@ class MistralJobs(BaseSDK):
228
238
  server_url: Optional[str] = None,
229
239
  timeout_ms: Optional[int] = None,
230
240
  http_headers: Optional[Mapping[str, str]] = None,
231
- ) -> Optional[models.BatchJobOut]:
241
+ ) -> models.BatchJobOut:
232
242
  r"""Create Batch Job
233
243
 
234
244
  Create a new batch job, it will be queued for processing.
@@ -300,8 +310,13 @@ class MistralJobs(BaseSDK):
300
310
  )
301
311
 
302
312
  if utils.match_response(http_res, "200", "application/json"):
303
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
304
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
313
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
314
+ if utils.match_response(http_res, "4XX", "*"):
315
+ http_res_text = utils.stream_to_text(http_res)
316
+ raise models.SDKError(
317
+ "API error occurred", http_res.status_code, http_res_text, http_res
318
+ )
319
+ if utils.match_response(http_res, "5XX", "*"):
305
320
  http_res_text = utils.stream_to_text(http_res)
306
321
  raise models.SDKError(
307
322
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -328,7 +343,7 @@ class MistralJobs(BaseSDK):
328
343
  server_url: Optional[str] = None,
329
344
  timeout_ms: Optional[int] = None,
330
345
  http_headers: Optional[Mapping[str, str]] = None,
331
- ) -> Optional[models.BatchJobOut]:
346
+ ) -> models.BatchJobOut:
332
347
  r"""Create Batch Job
333
348
 
334
349
  Create a new batch job, it will be queued for processing.
@@ -400,8 +415,13 @@ class MistralJobs(BaseSDK):
400
415
  )
401
416
 
402
417
  if utils.match_response(http_res, "200", "application/json"):
403
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
404
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
418
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
419
+ if utils.match_response(http_res, "4XX", "*"):
420
+ http_res_text = await utils.stream_to_text_async(http_res)
421
+ raise models.SDKError(
422
+ "API error occurred", http_res.status_code, http_res_text, http_res
423
+ )
424
+ if utils.match_response(http_res, "5XX", "*"):
405
425
  http_res_text = await utils.stream_to_text_async(http_res)
406
426
  raise models.SDKError(
407
427
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -424,7 +444,7 @@ class MistralJobs(BaseSDK):
424
444
  server_url: Optional[str] = None,
425
445
  timeout_ms: Optional[int] = None,
426
446
  http_headers: Optional[Mapping[str, str]] = None,
427
- ) -> Optional[models.BatchJobOut]:
447
+ ) -> models.BatchJobOut:
428
448
  r"""Get Batch Job
429
449
 
430
450
  Get a batch job details by its UUID.
@@ -485,8 +505,13 @@ class MistralJobs(BaseSDK):
485
505
  )
486
506
 
487
507
  if utils.match_response(http_res, "200", "application/json"):
488
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
489
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
508
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
509
+ if utils.match_response(http_res, "4XX", "*"):
510
+ http_res_text = utils.stream_to_text(http_res)
511
+ raise models.SDKError(
512
+ "API error occurred", http_res.status_code, http_res_text, http_res
513
+ )
514
+ if utils.match_response(http_res, "5XX", "*"):
490
515
  http_res_text = utils.stream_to_text(http_res)
491
516
  raise models.SDKError(
492
517
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -509,7 +534,7 @@ class MistralJobs(BaseSDK):
509
534
  server_url: Optional[str] = None,
510
535
  timeout_ms: Optional[int] = None,
511
536
  http_headers: Optional[Mapping[str, str]] = None,
512
- ) -> Optional[models.BatchJobOut]:
537
+ ) -> models.BatchJobOut:
513
538
  r"""Get Batch Job
514
539
 
515
540
  Get a batch job details by its UUID.
@@ -570,8 +595,13 @@ class MistralJobs(BaseSDK):
570
595
  )
571
596
 
572
597
  if utils.match_response(http_res, "200", "application/json"):
573
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
574
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
598
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
599
+ if utils.match_response(http_res, "4XX", "*"):
600
+ http_res_text = await utils.stream_to_text_async(http_res)
601
+ raise models.SDKError(
602
+ "API error occurred", http_res.status_code, http_res_text, http_res
603
+ )
604
+ if utils.match_response(http_res, "5XX", "*"):
575
605
  http_res_text = await utils.stream_to_text_async(http_res)
576
606
  raise models.SDKError(
577
607
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -594,7 +624,7 @@ class MistralJobs(BaseSDK):
594
624
  server_url: Optional[str] = None,
595
625
  timeout_ms: Optional[int] = None,
596
626
  http_headers: Optional[Mapping[str, str]] = None,
597
- ) -> Optional[models.BatchJobOut]:
627
+ ) -> models.BatchJobOut:
598
628
  r"""Cancel Batch Job
599
629
 
600
630
  Request the cancellation of a batch job.
@@ -655,8 +685,13 @@ class MistralJobs(BaseSDK):
655
685
  )
656
686
 
657
687
  if utils.match_response(http_res, "200", "application/json"):
658
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
659
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
688
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
689
+ if utils.match_response(http_res, "4XX", "*"):
690
+ http_res_text = utils.stream_to_text(http_res)
691
+ raise models.SDKError(
692
+ "API error occurred", http_res.status_code, http_res_text, http_res
693
+ )
694
+ if utils.match_response(http_res, "5XX", "*"):
660
695
  http_res_text = utils.stream_to_text(http_res)
661
696
  raise models.SDKError(
662
697
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -679,7 +714,7 @@ class MistralJobs(BaseSDK):
679
714
  server_url: Optional[str] = None,
680
715
  timeout_ms: Optional[int] = None,
681
716
  http_headers: Optional[Mapping[str, str]] = None,
682
- ) -> Optional[models.BatchJobOut]:
717
+ ) -> models.BatchJobOut:
683
718
  r"""Cancel Batch Job
684
719
 
685
720
  Request the cancellation of a batch job.
@@ -740,8 +775,13 @@ class MistralJobs(BaseSDK):
740
775
  )
741
776
 
742
777
  if utils.match_response(http_res, "200", "application/json"):
743
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
744
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
778
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
779
+ if utils.match_response(http_res, "4XX", "*"):
780
+ http_res_text = await utils.stream_to_text_async(http_res)
781
+ raise models.SDKError(
782
+ "API error occurred", http_res.status_code, http_res_text, http_res
783
+ )
784
+ if utils.match_response(http_res, "5XX", "*"):
745
785
  http_res_text = await utils.stream_to_text_async(http_res)
746
786
  raise models.SDKError(
747
787
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -273,6 +273,7 @@ from .listfilesout import ListFilesOut, ListFilesOutTypedDict
273
273
  from .metricout import MetricOut, MetricOutTypedDict
274
274
  from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
275
275
  from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict
276
+ from .prediction import Prediction, PredictionType, PredictionTypedDict
276
277
  from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
277
278
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
278
279
  from .responseformats import ResponseFormats
@@ -342,6 +343,7 @@ from .wandbintegrationout import (
342
343
  WandbIntegrationOutTypedDict,
343
344
  )
344
345
 
346
+
345
347
  __all__ = [
346
348
  "APIEndpoint",
347
349
  "AgentsCompletionRequest",
@@ -566,6 +568,9 @@ __all__ = [
566
568
  "Object",
567
569
  "One",
568
570
  "OneTypedDict",
571
+ "Prediction",
572
+ "PredictionType",
573
+ "PredictionTypedDict",
569
574
  "QueryParamStatus",
570
575
  "ReferenceChunk",
571
576
  "ReferenceChunkType",
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -83,6 +84,7 @@ class AgentsCompletionRequestTypedDict(TypedDict):
83
84
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
84
85
  n: NotRequired[Nullable[int]]
85
86
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
+ prediction: NotRequired[PredictionTypedDict]
86
88
 
87
89
 
88
90
  class AgentsCompletionRequest(BaseModel):
@@ -119,6 +121,8 @@ class AgentsCompletionRequest(BaseModel):
119
121
  n: OptionalNullable[int] = UNSET
120
122
  r"""Number of completions to return for each request, input tokens are only billed once."""
121
123
 
124
+ prediction: Optional[Prediction] = None
125
+
122
126
  @model_serializer(mode="wrap")
123
127
  def serialize_model(self, handler):
124
128
  optional_fields = [
@@ -132,6 +136,7 @@ class AgentsCompletionRequest(BaseModel):
132
136
  "presence_penalty",
133
137
  "frequency_penalty",
134
138
  "n",
139
+ "prediction",
135
140
  ]
136
141
  nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
137
142
  null_default_fields = []
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -82,6 +83,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
82
83
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
83
84
  n: NotRequired[Nullable[int]]
84
85
  r"""Number of completions to return for each request, input tokens are only billed once."""
86
+ prediction: NotRequired[PredictionTypedDict]
85
87
 
86
88
 
87
89
  class AgentsCompletionStreamRequest(BaseModel):
@@ -117,6 +119,8 @@ class AgentsCompletionStreamRequest(BaseModel):
117
119
  n: OptionalNullable[int] = UNSET
118
120
  r"""Number of completions to return for each request, input tokens are only billed once."""
119
121
 
122
+ prediction: Optional[Prediction] = None
123
+
120
124
  @model_serializer(mode="wrap")
121
125
  def serialize_model(self, handler):
122
126
  optional_fields = [
@@ -130,6 +134,7 @@ class AgentsCompletionStreamRequest(BaseModel):
130
134
  "presence_penalty",
131
135
  "frequency_penalty",
132
136
  "n",
137
+ "prediction",
133
138
  ]
134
139
  nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
135
140
  null_default_fields = []
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -83,6 +84,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
83
84
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
84
85
  n: NotRequired[Nullable[int]]
85
86
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
+ prediction: NotRequired[PredictionTypedDict]
86
88
  safe_prompt: NotRequired[bool]
87
89
  r"""Whether to inject a safety prompt before all conversations."""
88
90
 
@@ -127,6 +129,8 @@ class ChatCompletionRequest(BaseModel):
127
129
  n: OptionalNullable[int] = UNSET
128
130
  r"""Number of completions to return for each request, input tokens are only billed once."""
129
131
 
132
+ prediction: Optional[Prediction] = None
133
+
130
134
  safe_prompt: Optional[bool] = None
131
135
  r"""Whether to inject a safety prompt before all conversations."""
132
136
 
@@ -145,6 +149,7 @@ class ChatCompletionRequest(BaseModel):
145
149
  "presence_penalty",
146
150
  "frequency_penalty",
147
151
  "n",
152
+ "prediction",
148
153
  "safe_prompt",
149
154
  ]
150
155
  nullable_fields = [
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -86,6 +87,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
86
87
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
87
88
  n: NotRequired[Nullable[int]]
88
89
  r"""Number of completions to return for each request, input tokens are only billed once."""
90
+ prediction: NotRequired[PredictionTypedDict]
89
91
  safe_prompt: NotRequired[bool]
90
92
  r"""Whether to inject a safety prompt before all conversations."""
91
93
 
@@ -129,6 +131,8 @@ class ChatCompletionStreamRequest(BaseModel):
129
131
  n: OptionalNullable[int] = UNSET
130
132
  r"""Number of completions to return for each request, input tokens are only billed once."""
131
133
 
134
+ prediction: Optional[Prediction] = None
135
+
132
136
  safe_prompt: Optional[bool] = None
133
137
  r"""Whether to inject a safety prompt before all conversations."""
134
138
 
@@ -147,6 +151,7 @@ class ChatCompletionStreamRequest(BaseModel):
147
151
  "presence_penalty",
148
152
  "frequency_penalty",
149
153
  "n",
154
+ "prediction",
150
155
  "safe_prompt",
151
156
  ]
152
157
  nullable_fields = [
@@ -6,6 +6,7 @@ from .sampletype import SampleType
6
6
  from .source import Source
7
7
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
8
8
  from mistralai.utils import validate_open_enum
9
+ import pydantic
9
10
  from pydantic import model_serializer
10
11
  from pydantic.functional_validators import PlainValidator
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -16,7 +17,7 @@ class FileSchemaTypedDict(TypedDict):
16
17
  r"""The unique identifier of the file."""
17
18
  object: str
18
19
  r"""The object type, which is always \"file\"."""
19
- bytes: int
20
+ size_bytes: int
20
21
  r"""The size of the file, in bytes."""
21
22
  created_at: int
22
23
  r"""The UNIX timestamp (in seconds) of the event."""
@@ -35,7 +36,7 @@ class FileSchema(BaseModel):
35
36
  object: str
36
37
  r"""The object type, which is always \"file\"."""
37
38
 
38
- bytes: int
39
+ size_bytes: Annotated[int, pydantic.Field(alias="bytes")]
39
40
  r"""The size of the file, in bytes."""
40
41
 
41
42
  created_at: int
@@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict):
10
10
  name: str
11
11
  parameters: Dict[str, Any]
12
12
  description: NotRequired[str]
13
+ strict: NotRequired[bool]
13
14
 
14
15
 
15
16
  class Function(BaseModel):
@@ -18,3 +19,5 @@ class Function(BaseModel):
18
19
  parameters: Dict[str, Any]
19
20
 
20
21
  description: Optional[str] = ""
22
+
23
+ strict: Optional[bool] = False
@@ -0,0 +1,26 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mistralai.types import BaseModel
5
+ from mistralai.utils import validate_const
6
+ import pydantic
7
+ from pydantic.functional_validators import AfterValidator
8
+ from typing import Literal, Optional
9
+ from typing_extensions import Annotated, NotRequired, TypedDict
10
+
11
+
12
+ PredictionType = Literal["content"]
13
+
14
+
15
+ class PredictionTypedDict(TypedDict):
16
+ type: PredictionType
17
+ content: NotRequired[str]
18
+
19
+
20
+ class Prediction(BaseModel):
21
+ TYPE: Annotated[
22
+ Annotated[Optional[PredictionType], AfterValidator(validate_const("content"))],
23
+ pydantic.Field(alias="type"),
24
+ ] = "content"
25
+
26
+ content: Optional[str] = ""
@@ -6,6 +6,7 @@ from .sampletype import SampleType
6
6
  from .source import Source
7
7
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
8
8
  from mistralai.utils import validate_open_enum
9
+ import pydantic
9
10
  from pydantic import model_serializer
10
11
  from pydantic.functional_validators import PlainValidator
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -16,7 +17,7 @@ class RetrieveFileOutTypedDict(TypedDict):
16
17
  r"""The unique identifier of the file."""
17
18
  object: str
18
19
  r"""The object type, which is always \"file\"."""
19
- bytes: int
20
+ size_bytes: int
20
21
  r"""The size of the file, in bytes."""
21
22
  created_at: int
22
23
  r"""The UNIX timestamp (in seconds) of the event."""
@@ -36,7 +37,7 @@ class RetrieveFileOut(BaseModel):
36
37
  object: str
37
38
  r"""The object type, which is always \"file\"."""
38
39
 
39
- bytes: int
40
+ size_bytes: Annotated[int, pydantic.Field(alias="bytes")]
40
41
  r"""The size of the file, in bytes."""
41
42
 
42
43
  created_at: int
@@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict):
14
14
  function: FunctionCallTypedDict
15
15
  id: NotRequired[str]
16
16
  type: NotRequired[ToolTypes]
17
+ index: NotRequired[int]
17
18
 
18
19
 
19
20
  class ToolCall(BaseModel):
@@ -24,3 +25,5 @@ class ToolCall(BaseModel):
24
25
  type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = (
25
26
  None
26
27
  )
28
+
29
+ index: Optional[int] = 0
@@ -6,6 +6,7 @@ from .sampletype import SampleType
6
6
  from .source import Source
7
7
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
8
8
  from mistralai.utils import validate_open_enum
9
+ import pydantic
9
10
  from pydantic import model_serializer
10
11
  from pydantic.functional_validators import PlainValidator
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -16,7 +17,7 @@ class UploadFileOutTypedDict(TypedDict):
16
17
  r"""The unique identifier of the file."""
17
18
  object: str
18
19
  r"""The object type, which is always \"file\"."""
19
- bytes: int
20
+ size_bytes: int
20
21
  r"""The size of the file, in bytes."""
21
22
  created_at: int
22
23
  r"""The UNIX timestamp (in seconds) of the event."""
@@ -35,7 +36,7 @@ class UploadFileOut(BaseModel):
35
36
  object: str
36
37
  r"""The object type, which is always \"file\"."""
37
38
 
38
- bytes: int
39
+ size_bytes: Annotated[int, pydantic.Field(alias="bytes")]
39
40
  r"""The size of the file, in bytes."""
40
41
 
41
42
  created_at: int