nebu 0.1.45__py3-none-any.whl → 0.1.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nebu/__init__.py +0 -1
- nebu/builders/builder.py +0 -0
- nebu/data.py +24 -3
- nebu/processors/consumer.py +581 -389
- nebu/processors/decorate.py +441 -411
- {nebu-0.1.45.dist-info → nebu-0.1.48.dist-info}/METADATA +1 -1
- {nebu-0.1.45.dist-info → nebu-0.1.48.dist-info}/RECORD +10 -12
- {nebu-0.1.45.dist-info → nebu-0.1.48.dist-info}/WHEEL +1 -1
- nebu/adapter.py +0 -20
- nebu/chatx/convert.py +0 -362
- nebu/chatx/openai.py +0 -976
- {nebu-0.1.45.dist-info → nebu-0.1.48.dist-info}/licenses/LICENSE +0 -0
- {nebu-0.1.45.dist-info → nebu-0.1.48.dist-info}/top_level.txt +0 -0
nebu/chatx/openai.py
DELETED
@@ -1,976 +0,0 @@
|
|
1
|
-
# generated by datamodel-codegen:
|
2
|
-
# filename: openai.yaml (subset for Chat Completions)
|
3
|
-
# timestamp: 2025-04-10T17:23:37+00:00
|
4
|
-
|
5
|
-
from __future__ import annotations
|
6
|
-
|
7
|
-
from typing import Dict, List, Literal, Optional, Union
|
8
|
-
|
9
|
-
from pydantic import BaseModel, ConfigDict, Field, RootModel
|
10
|
-
|
11
|
-
|
12
|
-
class FunctionDefinition(BaseModel):
|
13
|
-
name: str = Field(..., description="The name of the function to call.")
|
14
|
-
|
15
|
-
|
16
|
-
class AssistantsNamedToolChoice(BaseModel):
|
17
|
-
type: str = Field(
|
18
|
-
...,
|
19
|
-
description="The type of the tool. If type is `function`, the function name must be set. Possible values: ['function', 'code_interpreter', 'file_search']",
|
20
|
-
)
|
21
|
-
function: FunctionDefinition | None = None
|
22
|
-
|
23
|
-
|
24
|
-
class ChatCompletionFunctionCallOption(FunctionDefinition):
|
25
|
-
pass
|
26
|
-
|
27
|
-
|
28
|
-
class ChatCompletionDeleted(BaseModel):
|
29
|
-
object: str = Field(
|
30
|
-
...,
|
31
|
-
description="The type of object being deleted. Possible values: ['list', 'assistant', 'chat.completion.deleted', 'chat.completion', 'chat.completion.chunk']",
|
32
|
-
)
|
33
|
-
id: str = Field(..., description="The ID of the chat completion that was deleted.")
|
34
|
-
deleted: bool = Field(..., description="Whether the chat completion was deleted.")
|
35
|
-
|
36
|
-
|
37
|
-
class FunctionCall(BaseModel):
|
38
|
-
name: str = Field(..., description="The name of the function to call.")
|
39
|
-
arguments: str = Field(
|
40
|
-
...,
|
41
|
-
description="The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.",
|
42
|
-
)
|
43
|
-
|
44
|
-
|
45
|
-
class ChatCompletionMessageToolCall(BaseModel):
|
46
|
-
id: str = Field(..., description="The ID of the tool call.")
|
47
|
-
type: Literal["function"] = Field(
|
48
|
-
...,
|
49
|
-
description="The type of the tool. Currently, only `function` is supported.",
|
50
|
-
)
|
51
|
-
function: FunctionCall = Field(
|
52
|
-
..., description="The function that the model called."
|
53
|
-
)
|
54
|
-
|
55
|
-
|
56
|
-
class FunctionCallChunk(BaseModel):
|
57
|
-
name: str | None = Field(None, description="The name of the function to call.")
|
58
|
-
arguments: str | None = Field(
|
59
|
-
None,
|
60
|
-
description="The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.",
|
61
|
-
)
|
62
|
-
|
63
|
-
|
64
|
-
class ChatCompletionMessageToolCallChunk(BaseModel):
|
65
|
-
index: int
|
66
|
-
id: str | None = Field(None, description="The ID of the tool call.")
|
67
|
-
type: Literal["function"] | None = Field(
|
68
|
-
None,
|
69
|
-
description="The type of the tool. Currently, only `function` is supported.",
|
70
|
-
)
|
71
|
-
function: FunctionCallChunk | None = None
|
72
|
-
|
73
|
-
|
74
|
-
class ChatCompletionMessageToolCalls(RootModel[List[ChatCompletionMessageToolCall]]):
|
75
|
-
root: List[ChatCompletionMessageToolCall] = Field(
|
76
|
-
...,
|
77
|
-
description="The tool calls generated by the model, such as function calls.",
|
78
|
-
)
|
79
|
-
|
80
|
-
|
81
|
-
class ChatCompletionModalities(RootModel[List[str] | None]):
|
82
|
-
root: List[str] | None = Field(
|
83
|
-
None,
|
84
|
-
description='Output types that you would like the model to generate for this request.\\nMost models are capable of generating text, which is the default:\\n\\n`["text"]`\\n\\nThe `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To\\nrequest that this model generate both text and audio responses, you can\\nuse:\\n\\n`["text", "audio"]`\\n\'. Possible values: [\'text\', \'audio\']',
|
85
|
-
)
|
86
|
-
|
87
|
-
|
88
|
-
class ChatCompletionNamedToolChoice(BaseModel):
|
89
|
-
type: Literal["function"] = Field(
|
90
|
-
...,
|
91
|
-
description="The type of the tool. Currently, only `function` is supported.",
|
92
|
-
)
|
93
|
-
function: FunctionDefinition
|
94
|
-
|
95
|
-
|
96
|
-
class AudioInputData(BaseModel):
|
97
|
-
id: str = Field(
|
98
|
-
...,
|
99
|
-
description="Unique identifier for a previous audio response from the model.\n",
|
100
|
-
)
|
101
|
-
|
102
|
-
|
103
|
-
class ResponseFunctionCall(BaseModel):
|
104
|
-
arguments: str = Field(
|
105
|
-
...,
|
106
|
-
description="The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.",
|
107
|
-
)
|
108
|
-
name: str = Field(..., description="The name of the function to call.")
|
109
|
-
|
110
|
-
|
111
|
-
class ChatCompletionRequestFunctionMessage(BaseModel):
|
112
|
-
role: Literal["function"] = Field(
|
113
|
-
..., description="The role of the messages author, in this case `function`."
|
114
|
-
)
|
115
|
-
content: str = Field(..., description="The contents of the function message.")
|
116
|
-
name: str = Field(..., description="The name of the function to call.")
|
117
|
-
|
118
|
-
|
119
|
-
class InputAudio(BaseModel):
|
120
|
-
data: str = Field(..., description="Base64 encoded audio data.")
|
121
|
-
format: str = Field(
|
122
|
-
...,
|
123
|
-
description="The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".\\n'. Possible values: ['wav', 'mp3']",
|
124
|
-
)
|
125
|
-
|
126
|
-
|
127
|
-
class ChatCompletionRequestMessageContentPartAudio(BaseModel):
|
128
|
-
type: str = Field(
|
129
|
-
..., description="The type of the content part. Always `input_audio`."
|
130
|
-
)
|
131
|
-
input_audio: InputAudio
|
132
|
-
|
133
|
-
|
134
|
-
class FileData(BaseModel):
|
135
|
-
filename: str | None = Field(
|
136
|
-
None,
|
137
|
-
description="The name of the file, used when passing the file to the model as a \nstring.\n",
|
138
|
-
)
|
139
|
-
file_data: str | None = Field(
|
140
|
-
None,
|
141
|
-
description="The base64 encoded file data, used when passing the file to the model \nas a string.\n",
|
142
|
-
)
|
143
|
-
file_id: str | None = Field(
|
144
|
-
None, description="The ID of an uploaded file to use as input.\n"
|
145
|
-
)
|
146
|
-
|
147
|
-
|
148
|
-
class ChatCompletionRequestMessageContentPartFile(BaseModel):
|
149
|
-
type: str = Field(..., description="The type of the content part. Always `file`.")
|
150
|
-
file: FileData
|
151
|
-
|
152
|
-
|
153
|
-
class ImageUrl(BaseModel):
|
154
|
-
url: str = Field(
|
155
|
-
..., description="Either a URL of the image or the base64 encoded image data."
|
156
|
-
)
|
157
|
-
detail: str | None = Field(
|
158
|
-
"auto",
|
159
|
-
description="Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding). Possible values: ['auto', 'low', 'high']",
|
160
|
-
)
|
161
|
-
|
162
|
-
|
163
|
-
class ChatCompletionRequestMessageContentPartImage(BaseModel):
|
164
|
-
type: str = Field(
|
165
|
-
..., description="The type of the content part. Always `image_url`."
|
166
|
-
)
|
167
|
-
image_url: ImageUrl
|
168
|
-
|
169
|
-
|
170
|
-
class ChatCompletionRequestMessageContentPartRefusal(BaseModel):
|
171
|
-
type: str = Field(
|
172
|
-
..., description="The type of the content part. Always `refusal`."
|
173
|
-
)
|
174
|
-
refusal: str = Field(..., description="The refusal message generated by the model.")
|
175
|
-
|
176
|
-
|
177
|
-
class ChatCompletionRequestMessageContentPartText(BaseModel):
|
178
|
-
type: str = Field(..., description="The type of the content part. Always `text`.")
|
179
|
-
text: str = Field(..., description="The text content.")
|
180
|
-
|
181
|
-
|
182
|
-
class ChatCompletionRequestSystemMessageContentPart(
|
183
|
-
RootModel[ChatCompletionRequestMessageContentPartText]
|
184
|
-
):
|
185
|
-
root: ChatCompletionRequestMessageContentPartText
|
186
|
-
|
187
|
-
|
188
|
-
class ChatCompletionRequestToolMessageContentPart(
|
189
|
-
ChatCompletionRequestSystemMessageContentPart
|
190
|
-
):
|
191
|
-
pass
|
192
|
-
|
193
|
-
|
194
|
-
class ChatCompletionRequestUserMessageContentPart(
|
195
|
-
RootModel[
|
196
|
-
Union[
|
197
|
-
ChatCompletionRequestMessageContentPartText,
|
198
|
-
ChatCompletionRequestMessageContentPartImage,
|
199
|
-
ChatCompletionRequestMessageContentPartAudio,
|
200
|
-
ChatCompletionRequestMessageContentPartFile,
|
201
|
-
]
|
202
|
-
]
|
203
|
-
):
|
204
|
-
root: Union[
|
205
|
-
ChatCompletionRequestMessageContentPartText,
|
206
|
-
ChatCompletionRequestMessageContentPartImage,
|
207
|
-
ChatCompletionRequestMessageContentPartAudio,
|
208
|
-
ChatCompletionRequestMessageContentPartFile,
|
209
|
-
]
|
210
|
-
|
211
|
-
|
212
|
-
class ChatCompletionRequestAssistantMessageContentPart(
|
213
|
-
RootModel[
|
214
|
-
Union[
|
215
|
-
ChatCompletionRequestMessageContentPartText,
|
216
|
-
ChatCompletionRequestMessageContentPartRefusal,
|
217
|
-
]
|
218
|
-
]
|
219
|
-
):
|
220
|
-
root: Union[
|
221
|
-
ChatCompletionRequestMessageContentPartText,
|
222
|
-
ChatCompletionRequestMessageContentPartRefusal,
|
223
|
-
]
|
224
|
-
|
225
|
-
|
226
|
-
class ChatCompletionRequestDeveloperMessageContent(
|
227
|
-
RootModel[List[ChatCompletionRequestMessageContentPartText]]
|
228
|
-
):
|
229
|
-
root: List[ChatCompletionRequestMessageContentPartText] = Field(
|
230
|
-
...,
|
231
|
-
description="An array of content parts with a defined type. For developer messages, only type `text` is supported.",
|
232
|
-
min_length=1,
|
233
|
-
title="Array of content parts",
|
234
|
-
)
|
235
|
-
|
236
|
-
|
237
|
-
class ChatCompletionRequestDeveloperMessage(BaseModel):
|
238
|
-
content: Union[str, ChatCompletionRequestDeveloperMessageContent] = Field(
|
239
|
-
..., description="The contents of the developer message."
|
240
|
-
)
|
241
|
-
role: Literal["developer"] = Field(
|
242
|
-
..., description="The role of the messages author, in this case `developer`."
|
243
|
-
)
|
244
|
-
name: str | None = Field(
|
245
|
-
None,
|
246
|
-
description="An optional name for the participant. Provides the model information to differentiate between participants of the same role.",
|
247
|
-
)
|
248
|
-
|
249
|
-
|
250
|
-
class ChatCompletionRequestSystemMessageContent(
|
251
|
-
RootModel[List[ChatCompletionRequestSystemMessageContentPart]]
|
252
|
-
):
|
253
|
-
root: List[ChatCompletionRequestSystemMessageContentPart] = Field(
|
254
|
-
...,
|
255
|
-
description="An array of content parts with a defined type. For system messages, only type `text` is supported.",
|
256
|
-
min_length=1,
|
257
|
-
title="Array of content parts",
|
258
|
-
)
|
259
|
-
|
260
|
-
|
261
|
-
class ChatCompletionRequestSystemMessage(BaseModel):
|
262
|
-
content: Union[str, ChatCompletionRequestSystemMessageContent] = Field(
|
263
|
-
..., description="The contents of the system message."
|
264
|
-
)
|
265
|
-
role: Literal["system"] = Field(
|
266
|
-
..., description="The role of the messages author, in this case `system`."
|
267
|
-
)
|
268
|
-
name: str | None = Field(
|
269
|
-
None,
|
270
|
-
description="An optional name for the participant. Provides the model information to differentiate between participants of the same role.",
|
271
|
-
)
|
272
|
-
|
273
|
-
|
274
|
-
class ChatCompletionRequestToolMessageContent(
|
275
|
-
RootModel[List[ChatCompletionRequestToolMessageContentPart]]
|
276
|
-
):
|
277
|
-
root: List[ChatCompletionRequestToolMessageContentPart] = Field(
|
278
|
-
...,
|
279
|
-
description="An array of content parts with a defined type. For tool messages, only type `text` is supported.",
|
280
|
-
min_length=1,
|
281
|
-
title="Array of content parts",
|
282
|
-
)
|
283
|
-
|
284
|
-
|
285
|
-
class ChatCompletionRequestToolMessage(BaseModel):
|
286
|
-
role: Literal["tool"] = Field(
|
287
|
-
..., description="The role of the messages author, in this case `tool`."
|
288
|
-
)
|
289
|
-
content: Union[str, ChatCompletionRequestToolMessageContent] = Field(
|
290
|
-
..., description="The contents of the tool message."
|
291
|
-
)
|
292
|
-
tool_call_id: str = Field(
|
293
|
-
..., description="Tool call that this message is responding to."
|
294
|
-
)
|
295
|
-
|
296
|
-
|
297
|
-
class ChatCompletionRequestUserMessageContent(
|
298
|
-
RootModel[List[ChatCompletionRequestUserMessageContentPart]]
|
299
|
-
):
|
300
|
-
root: List[ChatCompletionRequestUserMessageContentPart] = Field(
|
301
|
-
...,
|
302
|
-
description="An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs.",
|
303
|
-
min_length=1,
|
304
|
-
title="Array of content parts",
|
305
|
-
)
|
306
|
-
|
307
|
-
|
308
|
-
class ChatCompletionRequestUserMessage(BaseModel):
|
309
|
-
content: Union[str, ChatCompletionRequestUserMessageContent] = Field(
|
310
|
-
..., description="The contents of the user message.\n"
|
311
|
-
)
|
312
|
-
role: Literal["user"] = Field(
|
313
|
-
..., description="The role of the messages author, in this case `user`."
|
314
|
-
)
|
315
|
-
name: str | None = Field(
|
316
|
-
None,
|
317
|
-
description="An optional name for the participant. Provides the model information to differentiate between participants of the same role.",
|
318
|
-
)
|
319
|
-
|
320
|
-
|
321
|
-
class ChatCompletionRequestAssistantMessageContent(
|
322
|
-
RootModel[List[ChatCompletionRequestAssistantMessageContentPart] | None]
|
323
|
-
):
|
324
|
-
root: List[ChatCompletionRequestAssistantMessageContentPart] | None = Field(
|
325
|
-
None,
|
326
|
-
description="An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.",
|
327
|
-
min_length=1,
|
328
|
-
title="Array of content parts",
|
329
|
-
)
|
330
|
-
|
331
|
-
|
332
|
-
class ChatCompletionRequestAssistantMessage(BaseModel):
|
333
|
-
content: Union[str, ChatCompletionRequestAssistantMessageContent, None] = Field(
|
334
|
-
None,
|
335
|
-
description="The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n",
|
336
|
-
)
|
337
|
-
refusal: str | None = Field(
|
338
|
-
None, description="The refusal message by the assistant."
|
339
|
-
)
|
340
|
-
role: Literal["assistant"] = Field(
|
341
|
-
..., description="The role of the messages author, in this case `assistant`."
|
342
|
-
)
|
343
|
-
name: str | None = Field(
|
344
|
-
None,
|
345
|
-
description="An optional name for the participant. Provides the model information to differentiate between participants of the same role.",
|
346
|
-
)
|
347
|
-
audio: AudioInputData | None = Field(
|
348
|
-
None,
|
349
|
-
description="Data about a previous audio response from the model. \n[Learn more](/docs/guides/audio).\n",
|
350
|
-
)
|
351
|
-
tool_calls: ChatCompletionMessageToolCalls | None = None
|
352
|
-
function_call: ResponseFunctionCall | None = Field( # Changed from FunctionCall
|
353
|
-
None,
|
354
|
-
description="Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.",
|
355
|
-
)
|
356
|
-
|
357
|
-
|
358
|
-
class ChatCompletionRequestMessage(
|
359
|
-
RootModel[
|
360
|
-
Union[
|
361
|
-
ChatCompletionRequestDeveloperMessage,
|
362
|
-
ChatCompletionRequestSystemMessage,
|
363
|
-
ChatCompletionRequestUserMessage,
|
364
|
-
ChatCompletionRequestAssistantMessage,
|
365
|
-
ChatCompletionRequestToolMessage,
|
366
|
-
ChatCompletionRequestFunctionMessage,
|
367
|
-
]
|
368
|
-
]
|
369
|
-
):
|
370
|
-
root: Union[
|
371
|
-
ChatCompletionRequestDeveloperMessage,
|
372
|
-
ChatCompletionRequestSystemMessage,
|
373
|
-
ChatCompletionRequestUserMessage,
|
374
|
-
ChatCompletionRequestAssistantMessage,
|
375
|
-
ChatCompletionRequestToolMessage,
|
376
|
-
ChatCompletionRequestFunctionMessage,
|
377
|
-
]
|
378
|
-
|
379
|
-
|
380
|
-
class FunctionParameters(BaseModel):
|
381
|
-
pass
|
382
|
-
model_config = ConfigDict(
|
383
|
-
extra="allow",
|
384
|
-
)
|
385
|
-
|
386
|
-
|
387
|
-
class FunctionObject(BaseModel):
|
388
|
-
description: str | None = Field(
|
389
|
-
None,
|
390
|
-
description="A description of what the function does, used by the model to choose when and how to call the function.",
|
391
|
-
)
|
392
|
-
name: str = Field(
|
393
|
-
...,
|
394
|
-
description="The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.",
|
395
|
-
)
|
396
|
-
parameters: FunctionParameters | None = None
|
397
|
-
strict: bool | None = Field(
|
398
|
-
False,
|
399
|
-
description="Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).",
|
400
|
-
)
|
401
|
-
|
402
|
-
|
403
|
-
class ChatCompletionTool(BaseModel):
|
404
|
-
type: Literal["function"] = Field(
|
405
|
-
...,
|
406
|
-
description="The type of the tool. Currently, only `function` is supported.",
|
407
|
-
)
|
408
|
-
function: FunctionObject
|
409
|
-
|
410
|
-
|
411
|
-
class ChatCompletionFunctions(BaseModel):
|
412
|
-
description: str | None = Field(
|
413
|
-
None,
|
414
|
-
description="A description of what the function does, used by the model to choose when and how to call the function.",
|
415
|
-
)
|
416
|
-
name: str = Field(
|
417
|
-
...,
|
418
|
-
description="The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.",
|
419
|
-
)
|
420
|
-
parameters: FunctionParameters | None = None
|
421
|
-
|
422
|
-
|
423
|
-
class PredictionContentText(
|
424
|
-
RootModel[List[ChatCompletionRequestMessageContentPartText]]
|
425
|
-
):
|
426
|
-
root: List[ChatCompletionRequestMessageContentPartText] = Field(
|
427
|
-
...,
|
428
|
-
description="An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs.",
|
429
|
-
min_length=1,
|
430
|
-
title="Array of content parts",
|
431
|
-
)
|
432
|
-
|
433
|
-
|
434
|
-
class PredictionContent(BaseModel):
|
435
|
-
type: str = Field(
|
436
|
-
...,
|
437
|
-
description="The type of the predicted content you want to provide. This type is\ncurrently always `content`.\n. Possible values: ['content']",
|
438
|
-
)
|
439
|
-
content: Union[str, PredictionContentText] = Field(
|
440
|
-
...,
|
441
|
-
description="The content that should be matched when generating a model response.\nIf generated tokens would match this content, the entire model response\ncan be returned much more quickly.\n",
|
442
|
-
)
|
443
|
-
|
444
|
-
|
445
|
-
class ResponseFormatJsonObject(BaseModel):
|
446
|
-
type: Literal["json_object"] = Field(
|
447
|
-
...,
|
448
|
-
description="The type of response format being defined. Always `json_object`.",
|
449
|
-
)
|
450
|
-
|
451
|
-
|
452
|
-
class ResponseFormatJsonSchemaSchema(FunctionParameters):
|
453
|
-
pass
|
454
|
-
|
455
|
-
|
456
|
-
class ResponseFormatText(BaseModel):
|
457
|
-
type: Literal["text"] = Field(
|
458
|
-
..., description="The type of response format being defined. Always `text`."
|
459
|
-
)
|
460
|
-
|
461
|
-
|
462
|
-
class JsonSchemaDefinition(BaseModel):
|
463
|
-
description: str | None = Field(
|
464
|
-
None,
|
465
|
-
description="A description of what the response format is for, used by the model to\ndetermine how to respond in the format.\n",
|
466
|
-
)
|
467
|
-
name: str | None = Field( # Make optional as per TextResponseFormatJsonSchema
|
468
|
-
None,
|
469
|
-
description="The name of the response format. Must be a-z, A-Z, 0-9, or contain\nunderscores and dashes, with a maximum length of 64.\n",
|
470
|
-
)
|
471
|
-
schema_: ResponseFormatJsonSchemaSchema = Field(..., alias="schema")
|
472
|
-
strict: bool | None = Field(
|
473
|
-
False,
|
474
|
-
description="Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n",
|
475
|
-
)
|
476
|
-
|
477
|
-
|
478
|
-
class ResponseFormatJsonSchema(BaseModel):
|
479
|
-
type: Literal["json_schema"] = Field(
|
480
|
-
...,
|
481
|
-
description="The type of response format being defined. Always `json_schema`.",
|
482
|
-
)
|
483
|
-
json_schema: JsonSchemaDefinition = Field(
|
484
|
-
...,
|
485
|
-
description="Structured Outputs configuration options, including a JSON Schema.\n",
|
486
|
-
title="JSON schema",
|
487
|
-
)
|
488
|
-
|
489
|
-
|
490
|
-
class StopConfigurationItem(RootModel[List[str] | None]):
|
491
|
-
root: List[str] | None = Field(
|
492
|
-
None,
|
493
|
-
description="Up to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.\n",
|
494
|
-
max_length=4,
|
495
|
-
min_length=1,
|
496
|
-
)
|
497
|
-
|
498
|
-
|
499
|
-
class StopConfiguration(RootModel[Union[str, StopConfigurationItem, None]]):
|
500
|
-
root: Union[str, StopConfigurationItem, None] = Field(
|
501
|
-
None,
|
502
|
-
description="Up to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.\n",
|
503
|
-
)
|
504
|
-
|
505
|
-
|
506
|
-
class ModelIdsShared(RootModel[str]):
|
507
|
-
root: str = Field(..., examples=["gpt-4o"])
|
508
|
-
|
509
|
-
|
510
|
-
class WebSearchLocation(BaseModel):
|
511
|
-
country: str | None = Field(
|
512
|
-
None,
|
513
|
-
description="The two-letter \n[ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,\ne.g. `US`.\n",
|
514
|
-
)
|
515
|
-
region: str | None = Field(
|
516
|
-
None,
|
517
|
-
description="Free text input for the region of the user, e.g. `California`.\n",
|
518
|
-
)
|
519
|
-
city: str | None = Field(
|
520
|
-
None,
|
521
|
-
description="Free text input for the city of the user, e.g. `San Francisco`.\n",
|
522
|
-
)
|
523
|
-
timezone: str | None = Field(
|
524
|
-
None,
|
525
|
-
description="The [IANA timezone](https://timeapi.io/documentation/iana-timezones) \nof the user, e.g. `America/Los_Angeles`.\n",
|
526
|
-
)
|
527
|
-
|
528
|
-
|
529
|
-
class UserLocation(BaseModel):
|
530
|
-
type: str = Field(
|
531
|
-
...,
|
532
|
-
description="The type of location approximation. Always `approximate`.\n"
|
533
|
-
" Possible values: ['approximate']",
|
534
|
-
)
|
535
|
-
approximate: WebSearchLocation
|
536
|
-
|
537
|
-
|
538
|
-
class WebSearchOptions(BaseModel):
|
539
|
-
user_location: UserLocation | None = Field(
|
540
|
-
None, description="Approximate location parameters for the search.\n"
|
541
|
-
)
|
542
|
-
search_context_size: str | None = (
|
543
|
-
"medium" # Possible values: ['low', 'medium', 'high']
|
544
|
-
)
|
545
|
-
|
546
|
-
|
547
|
-
class VoiceIdsShared(RootModel[str]):
|
548
|
-
root: str = Field(..., examples=["ash"])
|
549
|
-
|
550
|
-
|
551
|
-
class AudioOutputSettings(BaseModel):
|
552
|
-
voice: VoiceIdsShared = Field(
|
553
|
-
...,
|
554
|
-
description="The voice the model uses to respond. Supported voices are \n`alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and `shimmer`.\n",
|
555
|
-
)
|
556
|
-
format: str = Field(
|
557
|
-
...,
|
558
|
-
description="Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`.\n. Possible values: ['wav', 'mp3', 'flac', 'opus', 'pcm16']",
|
559
|
-
)
|
560
|
-
|
561
|
-
|
562
|
-
class ParallelToolCalls(RootModel[bool]):
|
563
|
-
root: bool = Field(
|
564
|
-
...,
|
565
|
-
description="Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.",
|
566
|
-
)
|
567
|
-
|
568
|
-
|
569
|
-
class ModelResponseProperties(BaseModel):
|
570
|
-
metadata: Dict[str, str] | None = (
|
571
|
-
None # Added based on CreateModelResponseProperties
|
572
|
-
)
|
573
|
-
temperature: float | None = Field(
|
574
|
-
1.0,
|
575
|
-
description="What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.\n",
|
576
|
-
examples=[1],
|
577
|
-
ge=0.0,
|
578
|
-
le=2.0,
|
579
|
-
)
|
580
|
-
top_p: float | None = Field(
|
581
|
-
1.0,
|
582
|
-
description="An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.\n",
|
583
|
-
examples=[1],
|
584
|
-
ge=0.0,
|
585
|
-
le=1.0,
|
586
|
-
)
|
587
|
-
user: str | None = Field(
|
588
|
-
None,
|
589
|
-
description="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n",
|
590
|
-
examples=["user-1234"],
|
591
|
-
)
|
592
|
-
|
593
|
-
|
594
|
-
class ChatCompletionRequest(ModelResponseProperties):
|
595
|
-
messages: List[ChatCompletionRequestMessage] = Field(
|
596
|
-
...,
|
597
|
-
description="A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n",
|
598
|
-
min_length=1,
|
599
|
-
)
|
600
|
-
model: str = Field(
|
601
|
-
...,
|
602
|
-
description="Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI\noffers a wide range of models with different capabilities, performance\ncharacteristics, and price points. Refer to the [model guide](/docs/models)\nto browse and compare available models.\n",
|
603
|
-
)
|
604
|
-
modalities: ChatCompletionModalities | None = None
|
605
|
-
reasoning_effort: str | None = (
|
606
|
-
"medium" # Possible values: ['low', 'medium', 'high']
|
607
|
-
)
|
608
|
-
max_completion_tokens: int | None = Field(
|
609
|
-
None,
|
610
|
-
description="An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n",
|
611
|
-
)
|
612
|
-
frequency_penalty: float | None = Field(
|
613
|
-
0.0,
|
614
|
-
description="Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim.\n",
|
615
|
-
ge=-2.0,
|
616
|
-
le=2.0,
|
617
|
-
)
|
618
|
-
presence_penalty: float | None = Field(
|
619
|
-
0.0,
|
620
|
-
description="Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics.\n",
|
621
|
-
ge=-2.0,
|
622
|
-
le=2.0,
|
623
|
-
)
|
624
|
-
web_search_options: WebSearchOptions | None = Field(
|
625
|
-
None,
|
626
|
-
description="This tool searches the web for relevant results to use in a response.\nLearn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).\n",
|
627
|
-
title="Web search",
|
628
|
-
)
|
629
|
-
top_logprobs: int | None = Field(
|
630
|
-
None,
|
631
|
-
description="An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n",
|
632
|
-
ge=0,
|
633
|
-
le=20,
|
634
|
-
)
|
635
|
-
response_format: Union[
|
636
|
-
ResponseFormatText, ResponseFormatJsonSchema, ResponseFormatJsonObject, None
|
637
|
-
] = Field(
|
638
|
-
None,
|
639
|
-
description='An object specifying the format that the model must output.\n\nSetting to `{ "type": "json_schema", "json_schema": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n\nSetting to `{ "type": "json_object" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.\n',
|
640
|
-
)
|
641
|
-
service_tier: str | None = Field(
|
642
|
-
"auto",
|
643
|
-
description="Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system\n will utilize scale tier credits until they are exhausted.\n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n"
|
644
|
-
+ " Possible values: ['scale', 'default', 'auto']",
|
645
|
-
)
|
646
|
-
audio: AudioOutputSettings | None = Field(
|
647
|
-
None,
|
648
|
-
description='Parameters for audio output. Required when audio output is requested with\n`modalities: ["audio"]`. [Learn more](/docs/guides/audio).\n',
|
649
|
-
)
|
650
|
-
store: bool | None = Field(
|
651
|
-
False,
|
652
|
-
description="Whether or not to store the output of this chat completion request for \nuse in our [model distillation](/docs/guides/distillation) or\n[evals](/docs/guides/evals) products.\n",
|
653
|
-
)
|
654
|
-
stream: bool | None = Field(
|
655
|
-
False,
|
656
|
-
description="If set to true, the model response data will be streamed to the client\nas it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\nSee the [Streaming section below](/docs/api-reference/chat/streaming)\nfor more information, along with the [streaming responses](/docs/guides/streaming-responses)\nguide for more information on how to handle the streaming events.\n",
|
657
|
-
)
|
658
|
-
stop: StopConfiguration | None = None
|
659
|
-
logit_bias: Dict[str, int] | None = Field(
|
660
|
-
None,
|
661
|
-
description="Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the\ntokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should\ndecrease or increase likelihood of selection; values like -100 or 100\nshould result in a ban or exclusive selection of the relevant token.\n",
|
662
|
-
)
|
663
|
-
logprobs: bool | None = Field(
|
664
|
-
False,
|
665
|
-
description="Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the\n`content` of `message`.\\n",
|
666
|
-
)
|
667
|
-
max_tokens: int | None = Field(
|
668
|
-
None,
|
669
|
-
description="The maximum number of [tokens](/tokenizer) that can be generated in the\nchat completion. This value can be used to control\n[costs](https://openai.com/api/pricing/) for text generated via API.\\n\\nThis value is now deprecated in favor of `max_completion_tokens`, and is\\nnot compatible with [o1 series models](/docs/guides/reasoning).\\n",
|
670
|
-
)
|
671
|
-
n: int | None = Field(
|
672
|
-
1,
|
673
|
-
description="How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.",
|
674
|
-
examples=[1],
|
675
|
-
ge=1,
|
676
|
-
le=128,
|
677
|
-
)
|
678
|
-
prediction: PredictionContent | None = Field(
|
679
|
-
None,
|
680
|
-
description="Configuration for a [Predicted Output](/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\\nregenerating a file with only minor changes to most of the content.\\n",
|
681
|
-
)
|
682
|
-
seed: int | None = Field(
|
683
|
-
None,
|
684
|
-
description="This feature is in Beta.\\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\\n",
|
685
|
-
)
|
686
|
-
stream_options: ChatCompletionStreamOptions | None = None
|
687
|
-
tools: List[ChatCompletionTool] | None = Field(
|
688
|
-
None,
|
689
|
-
description="A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\\n",
|
690
|
-
)
|
691
|
-
tool_choice: ChatCompletionToolChoiceOption | None = None
|
692
|
-
parallel_tool_calls: ParallelToolCalls | None = Field(
|
693
|
-
default_factory=lambda: ParallelToolCalls.model_validate(True)
|
694
|
-
)
|
695
|
-
function_call: Union[str, ChatCompletionFunctionCallOption, None] = Field(
|
696
|
-
None,
|
697
|
-
description="Deprecated in favor of `tool_choice`.\\n\\nControls which (if any) function is called by the model.\\n\\n`none` means the model will not call a function and instead generates a\\nmessage.\\n\\n`auto` means the model can pick between generating a message or calling a\\nfunction.\\n\\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the\\nmodel to call that function.\\n\\n`none` is the default when no functions are present. `auto` is the default\\nif functions are present.\\n'. Possible values for string: ['none', 'auto']",
|
698
|
-
)
|
699
|
-
functions: List[ChatCompletionFunctions] | None = Field(
|
700
|
-
None,
|
701
|
-
description="Deprecated in favor of `tools`.\\n\\nA list of functions the model may generate JSON inputs for.\\n",
|
702
|
-
max_length=128,
|
703
|
-
min_length=1,
|
704
|
-
)
|
705
|
-
|
706
|
-
|
707
|
-
# Insert missing class definition
|
708
|
-
class ChatCompletionStreamOptions(BaseModel):
|
709
|
-
include_usage: bool | None = Field(
|
710
|
-
None,
|
711
|
-
description="If set, an additional chunk will be streamed before the `data: [DONE]`\\\\\\\\nmessage. The `usage` field on this chunk shows the token usage statistics\\\\\\\\nfor the entire request, and the `choices` field will always be an empty\\\\\\\\narray. \\\\\\\\\\\\\\\\\\\\\\\\n\\\\\\\\\\\\\\\\\\\\\\\\nAll other chunks will also include a `usage` field, but with a null\\\\\\\\\\\\\\\\\\\\\\\\nvalue. **NOTE:** If the stream is interrupted, you may not receive the\\\\\\\\\\\\\\\\\\\\\\\\nfinal usage chunk which contains the total token usage for the request.\\\\\\\\\\\\\\\\\\\\\\\\n",
|
712
|
-
)
|
713
|
-
|
714
|
-
|
715
|
-
class ChatCompletionToolChoiceOption(
|
716
|
-
RootModel[Union[str, ChatCompletionNamedToolChoice]]
|
717
|
-
):
|
718
|
-
root: Union[str, ChatCompletionNamedToolChoice] = Field(
|
719
|
-
...,
|
720
|
-
description=(
|
721
|
-
'Controls which (if any) tool is called by the model.\\n`none` means the model will not call a function and instead generates a message.\\n`auto` means the model can pick between generating a message or calling one or more tools.\\n`required` means the model must call one or more tools.\\nSpecifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.\\n\\n`none` is the default when no tools are present. `auto` is the default if tools are present.\\n'
|
722
|
-
" Possible values for string: ['none', 'auto', 'required']"
|
723
|
-
),
|
724
|
-
)
|
725
|
-
|
726
|
-
|
727
|
-
class CompletionUsage(BaseModel):
|
728
|
-
completion_tokens: int = Field(
|
729
|
-
..., description="Number of tokens in the generated completion."
|
730
|
-
)
|
731
|
-
|
732
|
-
|
733
|
-
# Re-insert ChatCompletionChoice definition
|
734
|
-
class ChatCompletionChoice(BaseModel):
|
735
|
-
finish_reason: str | None = Field( # Made optional as per stream response
|
736
|
-
None,
|
737
|
-
description="The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\\n`length` if the maximum number of tokens specified in the request was reached,\\n`content_filter` if content was omitted due to a flag from our content filters,\\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\\n"
|
738
|
-
+ " Possible values: ['stop', 'length', 'tool_calls', 'content_filter', 'function_call']",
|
739
|
-
)
|
740
|
-
index: int = Field(
|
741
|
-
..., description="The index of the choice in the list of choices."
|
742
|
-
)
|
743
|
-
message: "ChatCompletionResponseMessage"
|
744
|
-
logprobs: Union["Logprobs", None] = (
|
745
|
-
Field( # Made optional as per CreateChatCompletionFunctionResponse
|
746
|
-
None, description="Log probability information for the choice."
|
747
|
-
)
|
748
|
-
)
|
749
|
-
|
750
|
-
|
751
|
-
# Re-insert CreateChatCompletionResponse definition
|
752
|
-
class ChatCompletionResponse(BaseModel):
|
753
|
-
id: str = Field(..., description="A unique identifier for the chat completion.")
|
754
|
-
choices: List[ChatCompletionChoice] = Field(
|
755
|
-
...,
|
756
|
-
description="A list of chat completion choices. Can be more than one if `n` is greater than 1.",
|
757
|
-
)
|
758
|
-
created: int = Field(
|
759
|
-
...,
|
760
|
-
description="The Unix timestamp (in seconds) of when the chat completion was created.",
|
761
|
-
)
|
762
|
-
model: str = Field(..., description="The model used for the chat completion.")
|
763
|
-
service_tier: str | None = Field(
|
764
|
-
None,
|
765
|
-
description="The service tier used for processing the request. Possible values: ['scale', 'default', 'auto']",
|
766
|
-
examples=["scale"],
|
767
|
-
)
|
768
|
-
system_fingerprint: str | None = Field(
|
769
|
-
None,
|
770
|
-
description="This fingerprint represents the backend configuration that the model runs with.\\n\\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\\n",
|
771
|
-
)
|
772
|
-
object: Literal["chat.completion"] = Field(
|
773
|
-
..., description="The object type, which is always `chat.completion`."
|
774
|
-
)
|
775
|
-
usage: CompletionUsage | None = None
|
776
|
-
|
777
|
-
|
778
|
-
# Re-insert ChatCompletionStreamChoice definition
|
779
|
-
class ChatCompletionStreamChoice(BaseModel):
|
780
|
-
delta: "ChatCompletionStreamResponseDelta"
|
781
|
-
logprobs: Union["Logprobs", None] = Field(
|
782
|
-
None, description="Log probability information for the choice."
|
783
|
-
)
|
784
|
-
finish_reason: str | None = Field( # Made optional
|
785
|
-
None,
|
786
|
-
description="The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\\n`length` if the maximum number of tokens specified in the request was reached,\\n`content_filter` if content was omitted due to a flag from our content filters,\\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\\n"
|
787
|
-
+ " Possible values: ['stop', 'length', 'tool_calls', 'content_filter', 'function_call']",
|
788
|
-
)
|
789
|
-
index: int = Field(
|
790
|
-
..., description="The index of the choice in the list of choices."
|
791
|
-
)
|
792
|
-
|
793
|
-
|
794
|
-
# Re-insert CreateChatCompletionStreamResponse definition
|
795
|
-
class CreateChatCompletionStreamResponse(BaseModel):
|
796
|
-
id: str = Field(
|
797
|
-
...,
|
798
|
-
description="A unique identifier for the chat completion. Each chunk has the same ID.",
|
799
|
-
)
|
800
|
-
choices: List[ChatCompletionStreamChoice] = Field(
|
801
|
-
...,
|
802
|
-
description=(
|
803
|
-
"A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the"
|
804
|
-
'\\nlast chunk if you set `stream_options: {"include_usage": true}` in your request.\\n'
|
805
|
-
),
|
806
|
-
)
|
807
|
-
created: int = Field(
|
808
|
-
...,
|
809
|
-
description="The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.",
|
810
|
-
)
|
811
|
-
model: str = Field(..., description="The model to generate the completion.")
|
812
|
-
service_tier: str | None = Field(
|
813
|
-
None,
|
814
|
-
description="The service tier used for processing the request. Possible values: ['scale', 'default', 'auto']",
|
815
|
-
examples=["scale"],
|
816
|
-
)
|
817
|
-
system_fingerprint: str | None = Field(
|
818
|
-
None,
|
819
|
-
description="This fingerprint represents the backend configuration that the model runs with.\\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\\n",
|
820
|
-
)
|
821
|
-
object: Literal["chat.completion.chunk"] = Field(
|
822
|
-
..., description="The object type, which is always `chat.completion.chunk`."
|
823
|
-
)
|
824
|
-
usage: CompletionUsage | None = Field(
|
825
|
-
None,
|
826
|
-
description=(
|
827
|
-
"An optional field that will only be present when you set"
|
828
|
-
'\\n`stream_options: {"include_usage": true}` in your request. When present, it'
|
829
|
-
"\\ncontains a null value **except for the last chunk** which contains the"
|
830
|
-
"\\ntoken usage statistics for the entire request.\\n\\n**NOTE:** If the stream is interrupted or cancelled, you may not"
|
831
|
-
"\\nreceive the final usage chunk which contains the total token usage for"
|
832
|
-
"\\nthe request.\\n"
|
833
|
-
),
|
834
|
-
)
|
835
|
-
|
836
|
-
|
837
|
-
class ChatCompletionList(BaseModel):
|
838
|
-
object: Literal["list"] = Field(
|
839
|
-
...,
|
840
|
-
description="The type of object being deleted. Possible values: ['list']",
|
841
|
-
)
|
842
|
-
|
843
|
-
|
844
|
-
# Add remaining missing definitions
|
845
|
-
class UrlCitationAnnotationData(BaseModel):
|
846
|
-
end_index: int = Field(
|
847
|
-
...,
|
848
|
-
description="The index of the last character of the URL citation in the message.",
|
849
|
-
)
|
850
|
-
start_index: int = Field(
|
851
|
-
...,
|
852
|
-
description="The index of the first character of the URL citation in the message.",
|
853
|
-
)
|
854
|
-
url: str = Field(..., description="The URL of the web resource.")
|
855
|
-
title: str = Field(..., description="The title of the web resource.")
|
856
|
-
|
857
|
-
|
858
|
-
class UrlCitationAnnotation(BaseModel):
|
859
|
-
type: str = Field(
|
860
|
-
..., description="The type of the URL citation. Always `url_citation`."
|
861
|
-
)
|
862
|
-
url_citation: UrlCitationAnnotationData = Field(
|
863
|
-
..., description="A URL citation when using web search."
|
864
|
-
)
|
865
|
-
|
866
|
-
|
867
|
-
class AudioResponseData(BaseModel):
|
868
|
-
id: str = Field(..., description="Unique identifier for this audio response.")
|
869
|
-
expires_at: int = Field(
|
870
|
-
...,
|
871
|
-
description="The Unix timestamp (in seconds) for when this audio response will\\nno longer be accessible on the server for use in multi-turn\\nconversations.\\n",
|
872
|
-
)
|
873
|
-
data: str = Field(
|
874
|
-
...,
|
875
|
-
description="Base64 encoded audio bytes generated by the model, in the format\\nspecified in the request.\\n",
|
876
|
-
)
|
877
|
-
transcript: str = Field(
|
878
|
-
..., description="Transcript of the audio generated by the model."
|
879
|
-
)
|
880
|
-
|
881
|
-
|
882
|
-
class TopLogprob(BaseModel):
|
883
|
-
token: str = Field(..., description="The token.")
|
884
|
-
logprob: float = Field(
|
885
|
-
...,
|
886
|
-
description="The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.",
|
887
|
-
)
|
888
|
-
bytes: List[int] | None = Field(
|
889
|
-
...,
|
890
|
-
description="A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.",
|
891
|
-
)
|
892
|
-
|
893
|
-
|
894
|
-
class PromptTokensDetails(BaseModel):
|
895
|
-
audio_tokens: int | None = Field(
|
896
|
-
0, description="Audio input tokens present in the prompt."
|
897
|
-
)
|
898
|
-
cached_tokens: int | None = Field(
|
899
|
-
0, description="Cached tokens present in the prompt."
|
900
|
-
)
|
901
|
-
|
902
|
-
|
903
|
-
class ChatCompletionStreamResponseDelta(BaseModel):
|
904
|
-
content: str | None = Field(None, description="The contents of the chunk message.")
|
905
|
-
function_call: FunctionCallChunk | None = Field(
|
906
|
-
None,
|
907
|
-
description="Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.",
|
908
|
-
)
|
909
|
-
tool_calls: List[ChatCompletionMessageToolCallChunk] | None = None
|
910
|
-
role: str | None = Field(
|
911
|
-
None,
|
912
|
-
description="The role of the author of this message."
|
913
|
-
" Possible values: ['developer', 'function', 'system', 'tool', 'user', 'assistant']",
|
914
|
-
)
|
915
|
-
refusal: str | None = Field(
|
916
|
-
None, description="The refusal message generated by the model."
|
917
|
-
)
|
918
|
-
|
919
|
-
|
920
|
-
class CompletionTokensDetails(BaseModel):
|
921
|
-
accepted_prediction_tokens: int | None = Field(
|
922
|
-
0,
|
923
|
-
description="The number of tokens accepted from the prediction. This is the number of tokens that were generated by the model and accepted by the system as part of the prediction process.",
|
924
|
-
)
|
925
|
-
|
926
|
-
rejected_prediction_tokens: int | None = Field(
|
927
|
-
0,
|
928
|
-
description="The number of tokens rejected from the prediction. This is the number of tokens that were generated by the model but rejected by the system as part of the prediction process. This can happen due to various reasons such as content filters, token limits, or other constraints.",
|
929
|
-
)
|
930
|
-
|
931
|
-
|
932
|
-
class Logprobs(BaseModel):
|
933
|
-
content: List["ChatCompletionTokenLogprob"] | None = Field( # Made optional
|
934
|
-
None, description="Log probability information for the choice."
|
935
|
-
)
|
936
|
-
|
937
|
-
|
938
|
-
# Insert missing ChatCompletionResponseMessage definition
|
939
|
-
class ChatCompletionResponseMessage(BaseModel):
|
940
|
-
content: Optional[str] = Field(None, description="The contents of the message.")
|
941
|
-
refusal: Optional[str] = Field(
|
942
|
-
None, description="The refusal message generated by the model."
|
943
|
-
)
|
944
|
-
tool_calls: Optional[ChatCompletionMessageToolCalls] = None
|
945
|
-
annotations: Optional[List[UrlCitationAnnotation]] = Field(
|
946
|
-
None,
|
947
|
-
description="Annotations for the message, when applicable, as when using the\\\\n[web search tool](/docs/guides/tools-web-search?api-mode=chat).\\\\n",
|
948
|
-
)
|
949
|
-
role: Literal["assistant"] = Field(
|
950
|
-
..., description="The role of the author of this message."
|
951
|
-
)
|
952
|
-
function_call: Optional[ResponseFunctionCall] = Field(
|
953
|
-
None,
|
954
|
-
description="Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.",
|
955
|
-
)
|
956
|
-
audio: Optional[AudioResponseData] = Field(
|
957
|
-
None,
|
958
|
-
description="If the audio output modality is requested, this object contains data\\\\\\\\nabout the audio response from the model. [Learn more](/docs/guides/audio).\\\\\\\\n",
|
959
|
-
)
|
960
|
-
|
961
|
-
|
962
|
-
# Insert missing ChatCompletionTokenLogprob definition
|
963
|
-
class ChatCompletionTokenLogprob(BaseModel):
|
964
|
-
token: str = Field(..., description="The token.")
|
965
|
-
logprob: float = Field(
|
966
|
-
...,
|
967
|
-
description="The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.",
|
968
|
-
)
|
969
|
-
bytes: List[int] | None = Field(
|
970
|
-
...,
|
971
|
-
description="A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.",
|
972
|
-
)
|
973
|
-
top_logprobs: List["TopLogprob"] = Field(
|
974
|
-
...,
|
975
|
-
description="List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.",
|
976
|
-
)
|