objectiveai 1.1.12 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/dist/index.cjs +3060 -663
- package/dist/index.d.ts +37572 -1200
- package/dist/index.js +3056 -662
- package/package.json +64 -61
package/dist/index.js
CHANGED
|
@@ -1,117 +1,1313 @@
|
|
|
1
|
+
import z from "zod";
|
|
2
|
+
// Expressions
|
|
3
|
+
export const ExpressionSchema = z
|
|
4
|
+
.object({
|
|
5
|
+
$jmespath: z.string().describe("A JMESPath expression."),
|
|
6
|
+
})
|
|
7
|
+
.describe("An expression which evaluates to a value.");
|
|
8
|
+
export const JsonValueSchema = z
|
|
9
|
+
.lazy(() => z.union([
|
|
10
|
+
z.null(),
|
|
11
|
+
z.boolean(),
|
|
12
|
+
z.number(),
|
|
13
|
+
z.string(),
|
|
14
|
+
z.array(JsonValueSchema),
|
|
15
|
+
z.record(z.string(), JsonValueSchema),
|
|
16
|
+
]))
|
|
17
|
+
.describe("A JSON value.");
|
|
18
|
+
export const JsonValueExpressionSchema = z
|
|
19
|
+
.lazy(() => z.union([
|
|
20
|
+
z.null(),
|
|
21
|
+
z.boolean(),
|
|
22
|
+
z.number(),
|
|
23
|
+
z.string(),
|
|
24
|
+
z.array(JsonValueExpressionSchema),
|
|
25
|
+
z.record(z.string(), JsonValueExpressionSchema),
|
|
26
|
+
ExpressionSchema.describe("An expression which evaluates to a JSON value."),
|
|
27
|
+
]))
|
|
28
|
+
.describe(JsonValueSchema.description);
|
|
29
|
+
// Errors
|
|
30
|
+
export const ObjectiveAIErrorSchema = z
|
|
31
|
+
.object({
|
|
32
|
+
code: z.uint32().describe("The status code of the error."),
|
|
33
|
+
message: z.any().describe("The message or details of the error."),
|
|
34
|
+
})
|
|
35
|
+
.describe("An error returned by the ObjectiveAI API.");
|
|
36
|
+
// Messages
|
|
37
|
+
export const MessageSchema = z
|
|
38
|
+
.discriminatedUnion("role", [
|
|
39
|
+
Message.DeveloperSchema,
|
|
40
|
+
Message.SystemSchema,
|
|
41
|
+
Message.UserSchema,
|
|
42
|
+
Message.ToolSchema,
|
|
43
|
+
Message.AssistantSchema,
|
|
44
|
+
])
|
|
45
|
+
.describe("A message exchanged in a chat conversation.");
|
|
46
|
+
export const MessageExpressionSchema = z
|
|
47
|
+
.union([
|
|
48
|
+
z
|
|
49
|
+
.discriminatedUnion("role", [
|
|
50
|
+
Message.DeveloperExpressionSchema,
|
|
51
|
+
Message.SystemExpressionSchema,
|
|
52
|
+
Message.UserExpressionSchema,
|
|
53
|
+
Message.ToolExpressionSchema,
|
|
54
|
+
Message.AssistantExpressionSchema,
|
|
55
|
+
])
|
|
56
|
+
.describe(MessageSchema.description),
|
|
57
|
+
ExpressionSchema.describe("An expression which evaluates to a message."),
|
|
58
|
+
])
|
|
59
|
+
.describe(MessageSchema.description);
|
|
60
|
+
export var Message;
|
|
61
|
+
(function (Message) {
|
|
62
|
+
Message.SimpleContentPartSchema = z
|
|
63
|
+
.object({
|
|
64
|
+
type: z.literal("text"),
|
|
65
|
+
text: z.string().describe("The text content."),
|
|
66
|
+
})
|
|
67
|
+
.describe("A simple text content part.");
|
|
68
|
+
Message.SimpleContentSchema = z
|
|
69
|
+
.union([SimpleContent.TextSchema, SimpleContent.PartsSchema])
|
|
70
|
+
.describe("Simple content.");
|
|
71
|
+
Message.SimpleContentExpressionSchema = z
|
|
72
|
+
.union([
|
|
73
|
+
SimpleContent.TextSchema,
|
|
74
|
+
SimpleContent.PartsExpressionSchema,
|
|
75
|
+
ExpressionSchema.describe("An expression which evaluates to simple content."),
|
|
76
|
+
])
|
|
77
|
+
.describe(Message.SimpleContentSchema.description);
|
|
78
|
+
let SimpleContent;
|
|
79
|
+
(function (SimpleContent) {
|
|
80
|
+
SimpleContent.TextSchema = z.string().describe("Plain text content.");
|
|
81
|
+
SimpleContent.PartSchema = z
|
|
82
|
+
.object({
|
|
83
|
+
type: z.literal("text"),
|
|
84
|
+
text: z.string().describe("The text content."),
|
|
85
|
+
})
|
|
86
|
+
.describe("A simple content part.");
|
|
87
|
+
SimpleContent.PartExpressionSchema = z
|
|
88
|
+
.union([
|
|
89
|
+
SimpleContent.PartSchema,
|
|
90
|
+
ExpressionSchema.describe("An expression which evaluates to a simple content part."),
|
|
91
|
+
])
|
|
92
|
+
.describe(SimpleContent.PartSchema.description);
|
|
93
|
+
SimpleContent.PartsSchema = z
|
|
94
|
+
.array(SimpleContent.PartSchema)
|
|
95
|
+
.describe("An array of simple content parts.");
|
|
96
|
+
SimpleContent.PartsExpressionSchema = z
|
|
97
|
+
.array(SimpleContent.PartExpressionSchema)
|
|
98
|
+
.describe(SimpleContent.PartsSchema.description);
|
|
99
|
+
})(SimpleContent = Message.SimpleContent || (Message.SimpleContent = {}));
|
|
100
|
+
Message.RichContentSchema = z
|
|
101
|
+
.union([RichContent.TextSchema, RichContent.PartsSchema])
|
|
102
|
+
.describe("Rich content.");
|
|
103
|
+
Message.RichContentExpressionSchema = z
|
|
104
|
+
.union([
|
|
105
|
+
RichContent.TextSchema,
|
|
106
|
+
RichContent.PartsExpressionSchema,
|
|
107
|
+
ExpressionSchema.describe("An expression which evaluates to rich content."),
|
|
108
|
+
])
|
|
109
|
+
.describe(Message.RichContentSchema.description);
|
|
110
|
+
let RichContent;
|
|
111
|
+
(function (RichContent) {
|
|
112
|
+
RichContent.TextSchema = z.string().describe("Plain text content.");
|
|
113
|
+
RichContent.PartSchema = z
|
|
114
|
+
.discriminatedUnion("type", [
|
|
115
|
+
Part.TextSchema,
|
|
116
|
+
Part.ImageUrlSchema,
|
|
117
|
+
Part.InputAudioSchema,
|
|
118
|
+
Part.VideoUrlSchema,
|
|
119
|
+
Part.FileSchema,
|
|
120
|
+
])
|
|
121
|
+
.describe("A rich content part.");
|
|
122
|
+
RichContent.PartExpressionSchema = z
|
|
123
|
+
.union([
|
|
124
|
+
RichContent.PartSchema,
|
|
125
|
+
ExpressionSchema.describe("An expression which evaluates to a rich content part."),
|
|
126
|
+
])
|
|
127
|
+
.describe(RichContent.PartSchema.description);
|
|
128
|
+
let Part;
|
|
129
|
+
(function (Part) {
|
|
130
|
+
Part.TextSchema = z
|
|
131
|
+
.object({
|
|
132
|
+
type: z.literal("text"),
|
|
133
|
+
text: Text.TextSchema,
|
|
134
|
+
})
|
|
135
|
+
.describe("A text rich content part.");
|
|
136
|
+
let Text;
|
|
137
|
+
(function (Text) {
|
|
138
|
+
Text.TextSchema = z.string().describe("The text content.");
|
|
139
|
+
})(Text = Part.Text || (Part.Text = {}));
|
|
140
|
+
Part.ImageUrlSchema = z
|
|
141
|
+
.object({
|
|
142
|
+
type: z.literal("image_url"),
|
|
143
|
+
image_url: ImageUrl.DefinitionSchema,
|
|
144
|
+
})
|
|
145
|
+
.describe("An image rich content part.");
|
|
146
|
+
let ImageUrl;
|
|
147
|
+
(function (ImageUrl) {
|
|
148
|
+
ImageUrl.DetailSchema = z
|
|
149
|
+
.enum(["auto", "low", "high"])
|
|
150
|
+
.describe("Specifies the detail level of the image.");
|
|
151
|
+
ImageUrl.UrlSchema = z
|
|
152
|
+
.string()
|
|
153
|
+
.describe("Either a URL of the image or the base64 encoded image data.");
|
|
154
|
+
ImageUrl.DefinitionSchema = z
|
|
155
|
+
.object({
|
|
156
|
+
url: ImageUrl.UrlSchema,
|
|
157
|
+
detail: ImageUrl.DetailSchema.optional().nullable(),
|
|
158
|
+
})
|
|
159
|
+
.describe("The URL of the image and its optional detail level.");
|
|
160
|
+
})(ImageUrl = Part.ImageUrl || (Part.ImageUrl = {}));
|
|
161
|
+
Part.InputAudioSchema = z
|
|
162
|
+
.object({
|
|
163
|
+
type: z.literal("input_audio"),
|
|
164
|
+
input_audio: InputAudio.DefinitionSchema,
|
|
165
|
+
})
|
|
166
|
+
.describe("An audio rich content part.");
|
|
167
|
+
let InputAudio;
|
|
168
|
+
(function (InputAudio) {
|
|
169
|
+
InputAudio.FormatSchema = z
|
|
170
|
+
.enum(["wav", "mp3"])
|
|
171
|
+
.describe("The format of the encoded audio data.");
|
|
172
|
+
InputAudio.DataSchema = z
|
|
173
|
+
.string()
|
|
174
|
+
.describe("Base64 encoded audio data.");
|
|
175
|
+
InputAudio.DefinitionSchema = z
|
|
176
|
+
.object({
|
|
177
|
+
data: InputAudio.DataSchema,
|
|
178
|
+
format: InputAudio.FormatSchema,
|
|
179
|
+
})
|
|
180
|
+
.describe("The audio data and its format.");
|
|
181
|
+
})(InputAudio = Part.InputAudio || (Part.InputAudio = {}));
|
|
182
|
+
Part.VideoUrlSchema = z
|
|
183
|
+
.object({
|
|
184
|
+
type: z.enum(["video_url", "input_video"]),
|
|
185
|
+
video_url: VideoUrl.DefinitionSchema,
|
|
186
|
+
})
|
|
187
|
+
.describe("A video rich content part.");
|
|
188
|
+
let VideoUrl;
|
|
189
|
+
(function (VideoUrl) {
|
|
190
|
+
VideoUrl.UrlSchema = z.string().describe("URL of the video.");
|
|
191
|
+
VideoUrl.DefinitionSchema = z.object({
|
|
192
|
+
url: VideoUrl.UrlSchema,
|
|
193
|
+
});
|
|
194
|
+
})(VideoUrl = Part.VideoUrl || (Part.VideoUrl = {}));
|
|
195
|
+
Part.FileSchema = z
|
|
196
|
+
.object({
|
|
197
|
+
type: z.literal("file"),
|
|
198
|
+
file: File.DefinitionSchema,
|
|
199
|
+
})
|
|
200
|
+
.describe("A file rich content part.");
|
|
201
|
+
let File;
|
|
202
|
+
(function (File) {
|
|
203
|
+
File.FileDataSchema = z
|
|
204
|
+
.string()
|
|
205
|
+
.describe("The base64 encoded file data, used when passing the file to the model as a string.");
|
|
206
|
+
File.FileIdSchema = z
|
|
207
|
+
.string()
|
|
208
|
+
.describe("The ID of an uploaded file to use as input.");
|
|
209
|
+
File.FilenameSchema = z
|
|
210
|
+
.string()
|
|
211
|
+
.describe("The name of the file, used when passing the file to the model as a string.");
|
|
212
|
+
File.FileUrlSchema = z
|
|
213
|
+
.string()
|
|
214
|
+
.describe("The URL of the file, used when passing the file to the model as a URL.");
|
|
215
|
+
File.DefinitionSchema = z
|
|
216
|
+
.object({
|
|
217
|
+
file_data: File.FileDataSchema.optional().nullable(),
|
|
218
|
+
file_id: File.FileIdSchema.optional().nullable(),
|
|
219
|
+
filename: File.FilenameSchema.optional().nullable(),
|
|
220
|
+
file_url: File.FileUrlSchema.optional().nullable(),
|
|
221
|
+
})
|
|
222
|
+
.describe("The file to be used as input, either as base64 data, an uploaded file ID, or a URL.");
|
|
223
|
+
})(File = Part.File || (Part.File = {}));
|
|
224
|
+
})(Part = RichContent.Part || (RichContent.Part = {}));
|
|
225
|
+
RichContent.PartsSchema = z
|
|
226
|
+
.array(RichContent.PartSchema)
|
|
227
|
+
.describe("An array of rich content parts.");
|
|
228
|
+
RichContent.PartsExpressionSchema = z
|
|
229
|
+
.array(RichContent.PartExpressionSchema)
|
|
230
|
+
.describe(RichContent.PartsSchema.description);
|
|
231
|
+
})(RichContent = Message.RichContent || (Message.RichContent = {}));
|
|
232
|
+
Message.NameSchema = z
|
|
233
|
+
.string()
|
|
234
|
+
.describe("An optional name for the participant. Provides the model information to differentiate between participants of the same role.");
|
|
235
|
+
Message.NameExpressionSchema = z
|
|
236
|
+
.union([
|
|
237
|
+
Message.NameSchema,
|
|
238
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
239
|
+
])
|
|
240
|
+
.describe(Message.NameSchema.description);
|
|
241
|
+
Message.DeveloperSchema = z
|
|
242
|
+
.object({
|
|
243
|
+
role: z.literal("developer"),
|
|
244
|
+
content: Message.SimpleContentSchema,
|
|
245
|
+
name: Message.NameSchema.optional().nullable(),
|
|
246
|
+
})
|
|
247
|
+
.describe("Developer-provided instructions that the model should follow, regardless of messages sent by the user.");
|
|
248
|
+
Message.DeveloperExpressionSchema = z
|
|
249
|
+
.object({
|
|
250
|
+
role: z.literal("developer"),
|
|
251
|
+
content: Message.SimpleContentExpressionSchema,
|
|
252
|
+
name: Message.NameExpressionSchema.optional().nullable(),
|
|
253
|
+
})
|
|
254
|
+
.describe(Message.DeveloperSchema.description);
|
|
255
|
+
Message.SystemSchema = z
|
|
256
|
+
.object({
|
|
257
|
+
role: z.literal("system"),
|
|
258
|
+
content: Message.SimpleContentSchema,
|
|
259
|
+
name: Message.NameSchema.optional().nullable(),
|
|
260
|
+
})
|
|
261
|
+
.describe("Developer-provided instructions that the model should follow, regardless of messages sent by the user.");
|
|
262
|
+
Message.SystemExpressionSchema = z
|
|
263
|
+
.object({
|
|
264
|
+
role: z.literal("system"),
|
|
265
|
+
content: Message.SimpleContentExpressionSchema,
|
|
266
|
+
name: Message.NameExpressionSchema.optional().nullable(),
|
|
267
|
+
})
|
|
268
|
+
.describe(Message.SystemSchema.description);
|
|
269
|
+
Message.UserSchema = z
|
|
270
|
+
.object({
|
|
271
|
+
role: z.literal("user"),
|
|
272
|
+
content: Message.RichContentSchema,
|
|
273
|
+
name: Message.NameSchema.optional().nullable(),
|
|
274
|
+
})
|
|
275
|
+
.describe("Messages sent by an end user, containing prompts or additional context information.");
|
|
276
|
+
Message.UserExpressionSchema = z
|
|
277
|
+
.object({
|
|
278
|
+
role: z.literal("user"),
|
|
279
|
+
content: Message.RichContentExpressionSchema,
|
|
280
|
+
name: Message.NameExpressionSchema.optional().nullable(),
|
|
281
|
+
})
|
|
282
|
+
.describe(Message.UserSchema.description);
|
|
283
|
+
Message.ToolSchema = z
|
|
284
|
+
.object({
|
|
285
|
+
role: z.literal("tool"),
|
|
286
|
+
content: Message.RichContentSchema,
|
|
287
|
+
tool_call_id: Tool.ToolCallIdSchema,
|
|
288
|
+
})
|
|
289
|
+
.describe("Messages sent by tools in response to tool calls made by the assistant.");
|
|
290
|
+
Message.ToolExpressionSchema = z
|
|
291
|
+
.object({
|
|
292
|
+
role: z.literal("tool"),
|
|
293
|
+
content: Message.RichContentExpressionSchema,
|
|
294
|
+
tool_call_id: Tool.ToolCallIdExpressionSchema,
|
|
295
|
+
})
|
|
296
|
+
.describe(Message.ToolSchema.description);
|
|
297
|
+
let Tool;
|
|
298
|
+
(function (Tool) {
|
|
299
|
+
Tool.ToolCallIdSchema = z
|
|
300
|
+
.string()
|
|
301
|
+
.describe("The ID of the tool call that this message is responding to.");
|
|
302
|
+
Tool.ToolCallIdExpressionSchema = z
|
|
303
|
+
.union([
|
|
304
|
+
Tool.ToolCallIdSchema,
|
|
305
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
306
|
+
])
|
|
307
|
+
.describe(Tool.ToolCallIdSchema.description);
|
|
308
|
+
})(Tool = Message.Tool || (Message.Tool = {}));
|
|
309
|
+
Message.AssistantSchema = z
|
|
310
|
+
.object({
|
|
311
|
+
role: z.literal("assistant"),
|
|
312
|
+
content: Message.RichContentSchema.optional().nullable(),
|
|
313
|
+
name: Message.NameSchema.optional().nullable(),
|
|
314
|
+
refusal: Assistant.RefusalSchema.optional().nullable(),
|
|
315
|
+
tool_calls: Assistant.ToolCallsSchema.optional().nullable(),
|
|
316
|
+
reasoning: Assistant.ReasoningSchema.optional().nullable(),
|
|
317
|
+
})
|
|
318
|
+
.describe("Messages sent by the model in response to user messages.");
|
|
319
|
+
Message.AssistantExpressionSchema = z
|
|
320
|
+
.object({
|
|
321
|
+
role: z.literal("assistant"),
|
|
322
|
+
content: Message.RichContentExpressionSchema.optional().nullable(),
|
|
323
|
+
name: Message.NameExpressionSchema.optional().nullable(),
|
|
324
|
+
refusal: Assistant.RefusalExpressionSchema.optional().nullable(),
|
|
325
|
+
tool_calls: Assistant.ToolCallsExpressionSchema.optional().nullable(),
|
|
326
|
+
reasoning: Assistant.ReasoningExpressionSchema.optional().nullable(),
|
|
327
|
+
})
|
|
328
|
+
.describe(Message.AssistantSchema.description);
|
|
329
|
+
let Assistant;
|
|
330
|
+
(function (Assistant) {
|
|
331
|
+
Assistant.RefusalSchema = z
|
|
332
|
+
.string()
|
|
333
|
+
.describe("The refusal message by the assistant.");
|
|
334
|
+
Assistant.RefusalExpressionSchema = z
|
|
335
|
+
.union([
|
|
336
|
+
Assistant.RefusalSchema,
|
|
337
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
338
|
+
])
|
|
339
|
+
.describe(Assistant.RefusalSchema.description);
|
|
340
|
+
Assistant.ReasoningSchema = z
|
|
341
|
+
.string()
|
|
342
|
+
.describe("The reasoning provided by the assistant.");
|
|
343
|
+
Assistant.ReasoningExpressionSchema = z
|
|
344
|
+
.union([
|
|
345
|
+
Assistant.ReasoningSchema,
|
|
346
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
347
|
+
])
|
|
348
|
+
.describe(Assistant.ReasoningSchema.description);
|
|
349
|
+
Assistant.ToolCallSchema = z
|
|
350
|
+
.union([ToolCall.FunctionSchema])
|
|
351
|
+
.describe("A tool call made by the assistant.");
|
|
352
|
+
Assistant.ToolCallExpressionSchema = z
|
|
353
|
+
.union([
|
|
354
|
+
ToolCall.FunctionExpressionSchema,
|
|
355
|
+
ExpressionSchema.describe("An expression which evaluates to a tool call."),
|
|
356
|
+
])
|
|
357
|
+
.describe(Assistant.ToolCallSchema.description);
|
|
358
|
+
let ToolCall;
|
|
359
|
+
(function (ToolCall) {
|
|
360
|
+
ToolCall.IdSchema = z
|
|
361
|
+
.string()
|
|
362
|
+
.describe("The unique identifier for the tool call.");
|
|
363
|
+
ToolCall.IdExpressionSchema = z
|
|
364
|
+
.union([
|
|
365
|
+
ToolCall.IdSchema,
|
|
366
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
367
|
+
])
|
|
368
|
+
.describe(ToolCall.IdSchema.description);
|
|
369
|
+
ToolCall.FunctionSchema = z
|
|
370
|
+
.object({
|
|
371
|
+
type: z.literal("function"),
|
|
372
|
+
id: ToolCall.IdSchema,
|
|
373
|
+
function: Function.DefinitionSchema,
|
|
374
|
+
})
|
|
375
|
+
.describe("A function tool call made by the assistant.");
|
|
376
|
+
ToolCall.FunctionExpressionSchema = z
|
|
377
|
+
.object({
|
|
378
|
+
type: z.literal("function"),
|
|
379
|
+
id: ToolCall.IdExpressionSchema,
|
|
380
|
+
function: Function.DefinitionExpressionSchema,
|
|
381
|
+
})
|
|
382
|
+
.describe(ToolCall.FunctionSchema.description);
|
|
383
|
+
let Function;
|
|
384
|
+
(function (Function) {
|
|
385
|
+
Function.NameSchema = z
|
|
386
|
+
.string()
|
|
387
|
+
.describe("The name of the function called.");
|
|
388
|
+
Function.NameExpressionSchema = z
|
|
389
|
+
.union([
|
|
390
|
+
Function.NameSchema,
|
|
391
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
392
|
+
])
|
|
393
|
+
.describe(Function.NameSchema.description);
|
|
394
|
+
Function.ArgumentsSchema = z
|
|
395
|
+
.string()
|
|
396
|
+
.describe("The arguments passed to the function.");
|
|
397
|
+
Function.ArgumentsExpressionSchema = z
|
|
398
|
+
.union([
|
|
399
|
+
Function.ArgumentsSchema,
|
|
400
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
401
|
+
])
|
|
402
|
+
.describe(Function.ArgumentsSchema.description);
|
|
403
|
+
Function.DefinitionSchema = z
|
|
404
|
+
.object({
|
|
405
|
+
name: Function.NameSchema,
|
|
406
|
+
arguments: Function.ArgumentsSchema,
|
|
407
|
+
})
|
|
408
|
+
.describe("The name and arguments of the function called.");
|
|
409
|
+
Function.DefinitionExpressionSchema = z
|
|
410
|
+
.object({
|
|
411
|
+
name: Function.NameExpressionSchema,
|
|
412
|
+
arguments: Function.ArgumentsExpressionSchema,
|
|
413
|
+
})
|
|
414
|
+
.describe(Function.DefinitionSchema.description);
|
|
415
|
+
})(Function = ToolCall.Function || (ToolCall.Function = {}));
|
|
416
|
+
})(ToolCall = Assistant.ToolCall || (Assistant.ToolCall = {}));
|
|
417
|
+
Assistant.ToolCallsSchema = z
|
|
418
|
+
.array(Assistant.ToolCallSchema)
|
|
419
|
+
.describe("Tool calls made by the assistant.");
|
|
420
|
+
Assistant.ToolCallsExpressionSchema = z
|
|
421
|
+
.union([
|
|
422
|
+
z
|
|
423
|
+
.array(Assistant.ToolCallExpressionSchema)
|
|
424
|
+
.describe(Assistant.ToolCallsSchema.description),
|
|
425
|
+
ExpressionSchema.describe("An expression which evaluates to an array of tool calls."),
|
|
426
|
+
])
|
|
427
|
+
.describe(Assistant.ToolCallsSchema.description);
|
|
428
|
+
})(Assistant = Message.Assistant || (Message.Assistant = {}));
|
|
429
|
+
})(Message || (Message = {}));
|
|
430
|
+
export const MessagesSchema = z
|
|
431
|
+
.array(MessageSchema)
|
|
432
|
+
.describe("A list of messages exchanged in a chat conversation.");
|
|
433
|
+
export const MessagesExpressionSchema = z
|
|
434
|
+
.union([
|
|
435
|
+
z.array(MessageExpressionSchema).describe(MessagesSchema.description),
|
|
436
|
+
ExpressionSchema.describe("An expression which evaluates to an array of messages."),
|
|
437
|
+
])
|
|
438
|
+
.describe(MessagesSchema.description);
|
|
439
|
+
// Tools
|
|
440
|
+
export const ToolSchema = z
|
|
441
|
+
.union([Tool.FunctionSchema])
|
|
442
|
+
.describe("A tool that the assistant can call.");
|
|
443
|
+
export const ToolExpressionSchema = z
|
|
444
|
+
.union([
|
|
445
|
+
Tool.FunctionExpressionSchema,
|
|
446
|
+
ExpressionSchema.describe("An expression which evaluates to a tool."),
|
|
447
|
+
])
|
|
448
|
+
.describe(ToolSchema.description);
|
|
449
|
+
export var Tool;
|
|
450
|
+
(function (Tool) {
|
|
451
|
+
Tool.FunctionSchema = z
|
|
452
|
+
.object({
|
|
453
|
+
type: z.literal("function"),
|
|
454
|
+
function: Function.DefinitionSchema,
|
|
455
|
+
})
|
|
456
|
+
.describe("A function tool that the assistant can call.");
|
|
457
|
+
Tool.FunctionExpressionSchema = z
|
|
458
|
+
.object({
|
|
459
|
+
type: z.literal("function"),
|
|
460
|
+
function: Function.DefinitionExpressionSchema,
|
|
461
|
+
})
|
|
462
|
+
.describe(Tool.FunctionSchema.description);
|
|
463
|
+
let Function;
|
|
464
|
+
(function (Function) {
|
|
465
|
+
Function.NameSchema = z.string().describe("The name of the function.");
|
|
466
|
+
Function.NameExpressionSchema = z
|
|
467
|
+
.union([
|
|
468
|
+
Function.NameSchema,
|
|
469
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
470
|
+
])
|
|
471
|
+
.describe(Function.NameSchema.description);
|
|
472
|
+
Function.DescriptionSchema = z
|
|
473
|
+
.string()
|
|
474
|
+
.describe("The description of the function.");
|
|
475
|
+
Function.DescriptionExpressionSchema = z
|
|
476
|
+
.union([
|
|
477
|
+
Function.DescriptionSchema,
|
|
478
|
+
ExpressionSchema.describe("An expression which evaluates to a string."),
|
|
479
|
+
])
|
|
480
|
+
.describe(Function.DescriptionSchema.description);
|
|
481
|
+
Function.ParametersSchema = z
|
|
482
|
+
.record(z.string(), JsonValueSchema)
|
|
483
|
+
.describe("The JSON schema defining the parameters of the function.");
|
|
484
|
+
Function.ParametersExpressionSchema = z
|
|
485
|
+
.union([
|
|
486
|
+
z.record(z.string(), JsonValueExpressionSchema),
|
|
487
|
+
ExpressionSchema.describe("An expression which evaluates to a JSON schema object."),
|
|
488
|
+
])
|
|
489
|
+
.describe(Function.ParametersSchema.description);
|
|
490
|
+
Function.StrictSchema = z
|
|
491
|
+
.boolean()
|
|
492
|
+
.describe("Whether to enforce strict adherence to the parameter schema.");
|
|
493
|
+
Function.StrictExpressionSchema = z
|
|
494
|
+
.union([
|
|
495
|
+
Function.StrictSchema,
|
|
496
|
+
ExpressionSchema.describe("An expression which evaluates to a boolean."),
|
|
497
|
+
])
|
|
498
|
+
.describe(Function.StrictSchema.description);
|
|
499
|
+
Function.DefinitionSchema = z
|
|
500
|
+
.object({
|
|
501
|
+
name: Function.NameSchema,
|
|
502
|
+
description: Function.DescriptionSchema.optional().nullable(),
|
|
503
|
+
parameters: Function.ParametersSchema.optional().nullable(),
|
|
504
|
+
strict: Function.StrictSchema.optional().nullable(),
|
|
505
|
+
})
|
|
506
|
+
.describe("The definition of a function tool.");
|
|
507
|
+
Function.DefinitionExpressionSchema = z
|
|
508
|
+
.object({
|
|
509
|
+
name: Function.NameExpressionSchema,
|
|
510
|
+
description: Function.DescriptionExpressionSchema.optional().nullable(),
|
|
511
|
+
parameters: Function.ParametersExpressionSchema.optional().nullable(),
|
|
512
|
+
strict: Function.StrictExpressionSchema.optional().nullable(),
|
|
513
|
+
})
|
|
514
|
+
.describe(Function.DefinitionSchema.description);
|
|
515
|
+
})(Function = Tool.Function || (Tool.Function = {}));
|
|
516
|
+
})(Tool || (Tool = {}));
|
|
517
|
+
export const ToolsSchema = z
|
|
518
|
+
.array(ToolSchema)
|
|
519
|
+
.describe("A list of tools that the assistant can call.");
|
|
520
|
+
export const ToolsExpressionSchema = z
|
|
521
|
+
.union([
|
|
522
|
+
z.array(ToolExpressionSchema).describe(ToolsSchema.description),
|
|
523
|
+
ExpressionSchema.describe("An expression which evaluates to an array of tools."),
|
|
524
|
+
])
|
|
525
|
+
.describe(ToolsSchema.description);
|
|
526
|
+
// Vector Responses
|
|
527
|
+
export const VectorResponseSchema = Message.RichContentSchema.describe("A possible assistant response. The LLMs in the Ensemble may vote for this option.");
|
|
528
|
+
export const VectorResponseExpressionSchema = z
|
|
529
|
+
.union([
|
|
530
|
+
VectorResponseSchema,
|
|
531
|
+
ExpressionSchema.describe("An expression which evaluates to a possible assistant response."),
|
|
532
|
+
])
|
|
533
|
+
.describe(VectorResponseSchema.description);
|
|
534
|
+
export const VectorResponsesSchema = z
|
|
535
|
+
.array(VectorResponseSchema)
|
|
536
|
+
.describe("A list of possible assistant responses which the LLMs in the Ensemble will vote on. The output scores will be of the same length, each corresponding to one response. The winner is the response with the highest score.");
|
|
537
|
+
export const VectorResponsesExpressionSchema = z
|
|
538
|
+
.union([
|
|
539
|
+
z
|
|
540
|
+
.array(VectorResponseExpressionSchema)
|
|
541
|
+
.describe(VectorResponsesSchema.description),
|
|
542
|
+
ExpressionSchema.describe("An expression which evaluates to an array of possible assistant responses."),
|
|
543
|
+
])
|
|
544
|
+
.describe(VectorResponsesSchema.description);
|
|
545
|
+
// Ensemble LLM
|
|
546
|
+
export const EnsembleLlmBaseSchema = z
|
|
547
|
+
.object({
|
|
548
|
+
model: z.string().describe("The full ID of the LLM to use."),
|
|
549
|
+
output_mode: EnsembleLlm.OutputModeSchema,
|
|
550
|
+
synthetic_reasoning: z
|
|
551
|
+
.boolean()
|
|
552
|
+
.optional()
|
|
553
|
+
.nullable()
|
|
554
|
+
.describe("For Vector Completions only, whether to use synthetic reasoning prior to voting. Works for any LLM, even those that do not have native reasoning capabilities."),
|
|
555
|
+
top_logprobs: z
|
|
556
|
+
.int()
|
|
557
|
+
.min(0)
|
|
558
|
+
.max(20)
|
|
559
|
+
.optional()
|
|
560
|
+
.nullable()
|
|
561
|
+
.describe("For Vector Completions only, whether to use logprobs to make the vote probabilistic. This means that the LLM can vote for multiple keys based on their logprobabilities. Allows LLMs to express native uncertainty when voting."),
|
|
562
|
+
prefix_messages: MessagesSchema.optional()
|
|
563
|
+
.nullable()
|
|
564
|
+
.describe(`${MessagesSchema.description} These will be prepended to every prompt sent to this LLM. Useful for setting context or influencing behavior.`),
|
|
565
|
+
suffix_messages: MessagesSchema.optional()
|
|
566
|
+
.nullable()
|
|
567
|
+
.describe(`${MessagesSchema.description} These will be appended to every prompt sent to this LLM. Useful for setting context or influencing behavior.`),
|
|
568
|
+
frequency_penalty: z
|
|
569
|
+
.number()
|
|
570
|
+
.min(-2.0)
|
|
571
|
+
.max(2.0)
|
|
572
|
+
.optional()
|
|
573
|
+
.nullable()
|
|
574
|
+
.describe("This setting aims to control the repetition of tokens based on how often they appear in the input. It tries to use less frequently those tokens that appear more in the input, proportional to how frequently they occur. Token penalty scales with the number of occurrences. Negative values will encourage token reuse."),
|
|
575
|
+
logit_bias: z
|
|
576
|
+
.record(z.string(), z.int().min(-100).max(100))
|
|
577
|
+
.optional()
|
|
578
|
+
.nullable()
|
|
579
|
+
.describe("Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token."),
|
|
580
|
+
max_completion_tokens: z
|
|
581
|
+
.int()
|
|
582
|
+
.min(0)
|
|
583
|
+
.max(2147483647)
|
|
584
|
+
.optional()
|
|
585
|
+
.nullable()
|
|
586
|
+
.describe("An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens."),
|
|
587
|
+
presence_penalty: z
|
|
588
|
+
.number()
|
|
589
|
+
.min(-2.0)
|
|
590
|
+
.max(2.0)
|
|
591
|
+
.optional()
|
|
592
|
+
.nullable()
|
|
593
|
+
.describe("This setting aims to control the presence of tokens in the output. It tries to encourage the model to use tokens that are less present in the input, proportional to their presence in the input. Token presence scales with the number of occurrences. Negative values will encourage more diverse token usage."),
|
|
594
|
+
stop: EnsembleLlm.StopSchema.optional().nullable(),
|
|
595
|
+
temperature: z
|
|
596
|
+
.number()
|
|
597
|
+
.min(0.0)
|
|
598
|
+
.max(2.0)
|
|
599
|
+
.optional()
|
|
600
|
+
.nullable()
|
|
601
|
+
.describe("This setting influences the variety in the model’s responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input."),
|
|
602
|
+
top_p: z
|
|
603
|
+
.number()
|
|
604
|
+
.min(0.0)
|
|
605
|
+
.max(1.0)
|
|
606
|
+
.optional()
|
|
607
|
+
.nullable()
|
|
608
|
+
.describe("This setting limits the model’s choices to a percentage of likely tokens: only the top tokens whose probabilities add up to P. A lower value makes the model’s responses more predictable, while the default setting allows for a full range of token choices. Think of it like a dynamic Top-K."),
|
|
609
|
+
max_tokens: z
|
|
610
|
+
.int()
|
|
611
|
+
.min(0)
|
|
612
|
+
.max(2147483647)
|
|
613
|
+
.optional()
|
|
614
|
+
.nullable()
|
|
615
|
+
.describe("This sets the upper limit for the number of tokens the model can generate in response. It won’t produce more than this limit. The maximum value is the context length minus the prompt length."),
|
|
616
|
+
min_p: z
|
|
617
|
+
.number()
|
|
618
|
+
.min(0.0)
|
|
619
|
+
.max(1.0)
|
|
620
|
+
.optional()
|
|
621
|
+
.nullable()
|
|
622
|
+
.describe("Represents the minimum probability for a token to be considered, relative to the probability of the most likely token. (The value changes depending on the confidence level of the most probable token.) If your Min-P is set to 0.1, that means it will only allow for tokens that are at least 1/10th as probable as the best possible option."),
|
|
623
|
+
provider: EnsembleLlm.ProviderSchema.optional().nullable(),
|
|
624
|
+
reasoning: EnsembleLlm.ReasoningSchema.optional().nullable(),
|
|
625
|
+
repetition_penalty: z
|
|
626
|
+
.number()
|
|
627
|
+
.min(0.0)
|
|
628
|
+
.max(2.0)
|
|
629
|
+
.optional()
|
|
630
|
+
.nullable()
|
|
631
|
+
.describe("Helps to reduce the repetition of tokens from the input. A higher value makes the model less likely to repeat tokens, but too high a value can make the output less coherent (often with run-on sentences that lack small words). Token penalty scales based on original token’s probability."),
|
|
632
|
+
top_a: z
|
|
633
|
+
.number()
|
|
634
|
+
.min(0.0)
|
|
635
|
+
.max(1.0)
|
|
636
|
+
.optional()
|
|
637
|
+
.nullable()
|
|
638
|
+
.describe("Consider only the top tokens with “sufficiently high” probabilities based on the probability of the most likely token. Think of it like a dynamic Top-P. A lower Top-A value focuses the choices based on the highest probability token but with a narrower scope. A higher Top-A value does not necessarily affect the creativity of the output, but rather refines the filtering process based on the maximum probability."),
|
|
639
|
+
top_k: z
|
|
640
|
+
.int()
|
|
641
|
+
.min(0)
|
|
642
|
+
.max(2147483647)
|
|
643
|
+
.optional()
|
|
644
|
+
.nullable()
|
|
645
|
+
.describe("This limits the model’s choice of tokens at each step, making it choose from a smaller set. A value of 1 means the model will always pick the most likely next token, leading to predictable results. By default this setting is disabled, making the model to consider all choices."),
|
|
646
|
+
verbosity: EnsembleLlm.VerbositySchema.optional().nullable(),
|
|
647
|
+
})
|
|
648
|
+
.describe("An LLM to be used within an Ensemble or standalone with Chat Completions.");
|
|
649
|
+
export const EnsembleLlmBaseWithFallbacksAndCountSchema = EnsembleLlmBaseSchema.extend({
|
|
650
|
+
count: z
|
|
651
|
+
.uint32()
|
|
652
|
+
.min(1)
|
|
653
|
+
.optional()
|
|
654
|
+
.nullable()
|
|
655
|
+
.describe("A count greater than one effectively means that there are multiple instances of this LLM in an ensemble."),
|
|
656
|
+
fallbacks: z
|
|
657
|
+
.array(EnsembleLlmBaseSchema)
|
|
658
|
+
.optional()
|
|
659
|
+
.nullable()
|
|
660
|
+
.describe("A list of fallback LLMs to use if the primary LLM fails."),
|
|
661
|
+
}).describe("An LLM to be used within an Ensemble, including optional fallbacks and count.");
|
|
662
|
+
export const EnsembleLlmSchema = EnsembleLlmBaseSchema.extend({
|
|
663
|
+
id: z.string().describe("The unique identifier for the Ensemble LLM."),
|
|
664
|
+
}).describe("An LLM to be used within an Ensemble or standalone with Chat Completions, including its unique identifier.");
|
|
665
|
+
export const EnsembleLlmWithFallbacksAndCountSchema = EnsembleLlmSchema.extend({
|
|
666
|
+
count: EnsembleLlmBaseWithFallbacksAndCountSchema.shape.count,
|
|
667
|
+
fallbacks: z
|
|
668
|
+
.array(EnsembleLlmSchema)
|
|
669
|
+
.optional()
|
|
670
|
+
.nullable()
|
|
671
|
+
.describe(EnsembleLlmBaseWithFallbacksAndCountSchema.shape.fallbacks.description),
|
|
672
|
+
}).describe("An LLM to be used within an Ensemble, including its unique identifier, optional fallbacks, and count.");
|
|
673
|
+
export var EnsembleLlm;
|
|
674
|
+
(function (EnsembleLlm) {
|
|
675
|
+
EnsembleLlm.OutputModeSchema = z
|
|
676
|
+
.enum(["instruction", "json_schema", "tool_call"])
|
|
677
|
+
.describe('For Vector Completions only, specifies the LLM\'s voting output mode. For "instruction", the assistant is instructed to output a key. For "json_schema", the assistant is constrained to output a valid key using a JSON schema. For "tool_call", the assistant is instructed to output a tool call to select the key.');
|
|
678
|
+
EnsembleLlm.StopSchema = z
|
|
679
|
+
.union([
|
|
680
|
+
z
|
|
681
|
+
.string()
|
|
682
|
+
.describe("Generation will stop when this string is generated."),
|
|
683
|
+
z
|
|
684
|
+
.array(z.string())
|
|
685
|
+
.describe("Generation will stop when any of these strings are generated."),
|
|
686
|
+
])
|
|
687
|
+
.describe("The assistant will stop when any of the provided strings are generated.");
|
|
688
|
+
EnsembleLlm.ProviderSchema = z
|
|
689
|
+
.object({
|
|
690
|
+
allow_fallbacks: z
|
|
691
|
+
.boolean()
|
|
692
|
+
.optional()
|
|
693
|
+
.nullable()
|
|
694
|
+
.describe("Whether to allow fallback providers if the preferred provider is unavailable."),
|
|
695
|
+
require_parameters: z
|
|
696
|
+
.boolean()
|
|
697
|
+
.optional()
|
|
698
|
+
.nullable()
|
|
699
|
+
.describe("Whether to require that the provider supports all specified parameters."),
|
|
700
|
+
order: z
|
|
701
|
+
.array(z.string())
|
|
702
|
+
.optional()
|
|
703
|
+
.nullable()
|
|
704
|
+
.describe("An ordered list of provider names to use when selecting a provider for this model."),
|
|
705
|
+
only: z
|
|
706
|
+
.array(z.string())
|
|
707
|
+
.optional()
|
|
708
|
+
.nullable()
|
|
709
|
+
.describe("A list of provider names to restrict selection to when selecting a provider for this model."),
|
|
710
|
+
ignore: z
|
|
711
|
+
.array(z.string())
|
|
712
|
+
.optional()
|
|
713
|
+
.nullable()
|
|
714
|
+
.describe("A list of provider names to ignore when selecting a provider for this model."),
|
|
715
|
+
quantizations: z
|
|
716
|
+
.array(Provider.QuantizationSchema)
|
|
717
|
+
.optional()
|
|
718
|
+
.nullable()
|
|
719
|
+
.describe("Specifies the quantizations to allow when selecting providers for this model."),
|
|
720
|
+
})
|
|
721
|
+
.describe("Options for selecting the upstream provider of this model.");
|
|
722
|
+
let Provider;
|
|
723
|
+
(function (Provider) {
|
|
724
|
+
Provider.QuantizationSchema = z
|
|
725
|
+
.enum([
|
|
726
|
+
"int4",
|
|
727
|
+
"int8",
|
|
728
|
+
"fp4",
|
|
729
|
+
"fp6",
|
|
730
|
+
"fp8",
|
|
731
|
+
"fp16",
|
|
732
|
+
"bf16",
|
|
733
|
+
"fp32",
|
|
734
|
+
"unknown",
|
|
735
|
+
])
|
|
736
|
+
.describe("An LLM quantization.");
|
|
737
|
+
})(Provider = EnsembleLlm.Provider || (EnsembleLlm.Provider = {}));
|
|
738
|
+
EnsembleLlm.ReasoningSchema = z
|
|
739
|
+
.object({
|
|
740
|
+
enabled: z
|
|
741
|
+
.boolean()
|
|
742
|
+
.optional()
|
|
743
|
+
.nullable()
|
|
744
|
+
.describe("Enables or disables reasoning for supported models."),
|
|
745
|
+
max_tokens: z
|
|
746
|
+
.int()
|
|
747
|
+
.min(0)
|
|
748
|
+
.max(2147483647)
|
|
749
|
+
.optional()
|
|
750
|
+
.nullable()
|
|
751
|
+
.describe("The maximum number of tokens to use for reasoning in a response."),
|
|
752
|
+
effort: Reasoning.EffortSchema.optional().nullable(),
|
|
753
|
+
summary_verbosity: Reasoning.SummaryVerbositySchema.optional().nullable(),
|
|
754
|
+
})
|
|
755
|
+
.optional()
|
|
756
|
+
.nullable()
|
|
757
|
+
.describe("Options for controlling reasoning behavior of the model.");
|
|
758
|
+
let Reasoning;
|
|
759
|
+
(function (Reasoning) {
|
|
760
|
+
Reasoning.EffortSchema = z
|
|
761
|
+
.enum(["none", "minimal", "low", "medium", "high", "xhigh"])
|
|
762
|
+
.describe("Constrains effort on reasoning for supported reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.");
|
|
763
|
+
Reasoning.SummaryVerbositySchema = z
|
|
764
|
+
.enum(["auto", "concise", "detailed"])
|
|
765
|
+
.describe("Controls the verbosity of the reasoning summary for supported reasoning models.");
|
|
766
|
+
})(Reasoning = EnsembleLlm.Reasoning || (EnsembleLlm.Reasoning = {}));
|
|
767
|
+
EnsembleLlm.VerbositySchema = z
|
|
768
|
+
.enum(["low", "medium", "high"])
|
|
769
|
+
.describe("Controls the verbosity and length of the model response. Lower values produce more concise responses, while higher values produce more detailed and comprehensive responses.");
|
|
770
|
+
EnsembleLlm.ListItemSchema = z.object({
|
|
771
|
+
id: z.string().describe("The unique identifier for the Ensemble LLM."),
|
|
772
|
+
});
|
|
773
|
+
async function list(openai, options) {
|
|
774
|
+
const response = await openai.get("/ensemble_llms", options);
|
|
775
|
+
return response;
|
|
776
|
+
}
|
|
777
|
+
EnsembleLlm.list = list;
|
|
778
|
+
EnsembleLlm.RetrieveItemSchema = EnsembleLlmSchema.extend({
|
|
779
|
+
created: z
|
|
780
|
+
.uint32()
|
|
781
|
+
.describe("The Unix timestamp (in seconds) when the Ensemble LLM was created."),
|
|
782
|
+
});
|
|
783
|
+
async function retrieve(openai, id, options) {
|
|
784
|
+
const response = await openai.get(`/ensemble_llms/${id}`, options);
|
|
785
|
+
return response;
|
|
786
|
+
}
|
|
787
|
+
EnsembleLlm.retrieve = retrieve;
|
|
788
|
+
EnsembleLlm.HistoricalUsageSchema = z.object({
|
|
789
|
+
requests: z
|
|
790
|
+
.uint32()
|
|
791
|
+
.describe("The total number of requests made to this Ensemble LLM."),
|
|
792
|
+
completion_tokens: z
|
|
793
|
+
.uint32()
|
|
794
|
+
.describe("The total number of completion tokens generated by this Ensemble LLM."),
|
|
795
|
+
prompt_tokens: z
|
|
796
|
+
.uint32()
|
|
797
|
+
.describe("The total number of prompt tokens sent to this Ensemble LLM."),
|
|
798
|
+
total_cost: z
|
|
799
|
+
.number()
|
|
800
|
+
.describe("The total cost incurred by using this Ensemble LLM."),
|
|
801
|
+
});
|
|
802
|
+
async function retrieveUsage(openai, id, options) {
|
|
803
|
+
const response = await openai.get(`/ensemble_llms/${id}/usage`, options);
|
|
804
|
+
return response;
|
|
805
|
+
}
|
|
806
|
+
EnsembleLlm.retrieveUsage = retrieveUsage;
|
|
807
|
+
})(EnsembleLlm || (EnsembleLlm = {}));
|
|
808
|
+
// Ensemble
|
|
809
|
+
export const EnsembleBaseSchema = z
|
|
810
|
+
.object({
|
|
811
|
+
llms: z
|
|
812
|
+
.array(EnsembleLlmBaseWithFallbacksAndCountSchema)
|
|
813
|
+
.describe("The list of LLMs that make up the ensemble."),
|
|
814
|
+
})
|
|
815
|
+
.describe("An ensemble of LLMs.");
|
|
816
|
+
export const EnsembleSchema = z
|
|
817
|
+
.object({
|
|
818
|
+
id: z.string().describe("The unique identifier for the Ensemble."),
|
|
819
|
+
llms: z
|
|
820
|
+
.array(EnsembleLlmWithFallbacksAndCountSchema)
|
|
821
|
+
.describe(EnsembleBaseSchema.shape.llms.description),
|
|
822
|
+
})
|
|
823
|
+
.describe("An ensemble of LLMs with a unique identifier.");
|
|
824
|
+
export var Ensemble;
|
|
825
|
+
(function (Ensemble) {
|
|
826
|
+
Ensemble.ListItemSchema = z.object({
|
|
827
|
+
id: z.string().describe("The unique identifier for the Ensemble."),
|
|
828
|
+
});
|
|
829
|
+
async function list(openai, options) {
|
|
830
|
+
const response = await openai.get("/ensembles", options);
|
|
831
|
+
return response;
|
|
832
|
+
}
|
|
833
|
+
Ensemble.list = list;
|
|
834
|
+
Ensemble.RetrieveItemSchema = EnsembleSchema.extend({
|
|
835
|
+
created: z
|
|
836
|
+
.uint32()
|
|
837
|
+
.describe("The Unix timestamp (in seconds) when the Ensemble was created."),
|
|
838
|
+
});
|
|
839
|
+
async function retrieve(openai, id, options) {
|
|
840
|
+
const response = await openai.get(`/ensembles/${id}`, options);
|
|
841
|
+
return response;
|
|
842
|
+
}
|
|
843
|
+
Ensemble.retrieve = retrieve;
|
|
844
|
+
Ensemble.HistoricalUsageSchema = z.object({
|
|
845
|
+
requests: z
|
|
846
|
+
.uint32()
|
|
847
|
+
.describe("The total number of requests made to this Ensemble."),
|
|
848
|
+
completion_tokens: z
|
|
849
|
+
.uint32()
|
|
850
|
+
.describe("The total number of completion tokens generated by this Ensemble."),
|
|
851
|
+
prompt_tokens: z
|
|
852
|
+
.uint32()
|
|
853
|
+
.describe("The total number of prompt tokens sent to this Ensemble."),
|
|
854
|
+
total_cost: z
|
|
855
|
+
.number()
|
|
856
|
+
.describe("The total cost incurred by using this Ensemble."),
|
|
857
|
+
});
|
|
858
|
+
async function retrieveUsage(openai, id, options) {
|
|
859
|
+
const response = await openai.get(`/ensembles/${id}/usage`, options);
|
|
860
|
+
return response;
|
|
861
|
+
}
|
|
862
|
+
Ensemble.retrieveUsage = retrieveUsage;
|
|
863
|
+
})(Ensemble || (Ensemble = {}));
|
|
864
|
+
// Chat Completions
|
|
1
865
|
export var Chat;
|
|
2
866
|
(function (Chat) {
|
|
3
867
|
let Completions;
|
|
4
868
|
(function (Completions) {
|
|
869
|
+
let Request;
|
|
870
|
+
(function (Request) {
|
|
871
|
+
Request.ProviderSchema = z
|
|
872
|
+
.object({
|
|
873
|
+
data_collection: Provider.DataCollectionSchema.optional().nullable(),
|
|
874
|
+
zdr: z
|
|
875
|
+
.boolean()
|
|
876
|
+
.optional()
|
|
877
|
+
.nullable()
|
|
878
|
+
.describe("Whether to enforce Zero Data Retention (ZDR) policies when selecting providers."),
|
|
879
|
+
sort: Provider.SortSchema.optional().nullable(),
|
|
880
|
+
max_price: Provider.MaxPriceSchema.optional().nullable(),
|
|
881
|
+
preferred_min_throughput: z
|
|
882
|
+
.number()
|
|
883
|
+
.optional()
|
|
884
|
+
.nullable()
|
|
885
|
+
.describe("Preferred minimum throughput for the provider."),
|
|
886
|
+
preferred_max_latency: z
|
|
887
|
+
.number()
|
|
888
|
+
.optional()
|
|
889
|
+
.nullable()
|
|
890
|
+
.describe("Preferred maximum latency for the provider."),
|
|
891
|
+
min_throughput: z
|
|
892
|
+
.number()
|
|
893
|
+
.optional()
|
|
894
|
+
.nullable()
|
|
895
|
+
.describe("Minimum throughput for the provider."),
|
|
896
|
+
max_latency: z
|
|
897
|
+
.number()
|
|
898
|
+
.optional()
|
|
899
|
+
.nullable()
|
|
900
|
+
.describe("Maximum latency for the provider."),
|
|
901
|
+
})
|
|
902
|
+
.describe("Options for selecting the upstream provider of this completion.");
|
|
903
|
+
let Provider;
|
|
904
|
+
(function (Provider) {
|
|
905
|
+
Provider.DataCollectionSchema = z
|
|
906
|
+
.enum(["allow", "deny"])
|
|
907
|
+
.describe("Specifies whether to allow providers which collect data.");
|
|
908
|
+
Provider.SortSchema = z
|
|
909
|
+
.enum(["price", "throughput", "latency"])
|
|
910
|
+
.describe("Specifies the sorting strategy for provider selection.");
|
|
911
|
+
Provider.MaxPriceSchema = z.object({
|
|
912
|
+
prompt: z
|
|
913
|
+
.number()
|
|
914
|
+
.optional()
|
|
915
|
+
.nullable()
|
|
916
|
+
.describe("Maximum price for prompt tokens."),
|
|
917
|
+
completion: z
|
|
918
|
+
.number()
|
|
919
|
+
.optional()
|
|
920
|
+
.nullable()
|
|
921
|
+
.describe("Maximum price for completion tokens."),
|
|
922
|
+
image: z
|
|
923
|
+
.number()
|
|
924
|
+
.optional()
|
|
925
|
+
.nullable()
|
|
926
|
+
.describe("Maximum price for image generation."),
|
|
927
|
+
audio: z
|
|
928
|
+
.number()
|
|
929
|
+
.optional()
|
|
930
|
+
.nullable()
|
|
931
|
+
.describe("Maximum price for audio generation."),
|
|
932
|
+
request: z
|
|
933
|
+
.number()
|
|
934
|
+
.optional()
|
|
935
|
+
.nullable()
|
|
936
|
+
.describe("Maximum price per request."),
|
|
937
|
+
});
|
|
938
|
+
})(Provider = Request.Provider || (Request.Provider = {}));
|
|
939
|
+
Request.ModelSchema = z
|
|
940
|
+
.union([z.string(), EnsembleLlmBaseSchema])
|
|
941
|
+
.describe("The Ensemble LLM to use for this completion. May be a unique ID or an inline definition.");
|
|
942
|
+
Request.ResponseFormatSchema = z
|
|
943
|
+
.union([
|
|
944
|
+
ResponseFormat.TextSchema,
|
|
945
|
+
ResponseFormat.JsonObjectSchema,
|
|
946
|
+
ResponseFormat.JsonSchemaSchema,
|
|
947
|
+
ResponseFormat.GrammarSchema,
|
|
948
|
+
ResponseFormat.PythonSchema,
|
|
949
|
+
])
|
|
950
|
+
.describe("The desired format of the model's response.");
|
|
951
|
+
let ResponseFormat;
|
|
952
|
+
(function (ResponseFormat) {
|
|
953
|
+
ResponseFormat.TextSchema = z
|
|
954
|
+
.object({
|
|
955
|
+
type: z.literal("text"),
|
|
956
|
+
})
|
|
957
|
+
.describe("The response will be arbitrary text.");
|
|
958
|
+
ResponseFormat.JsonObjectSchema = z
|
|
959
|
+
.object({
|
|
960
|
+
type: z.literal("json_object"),
|
|
961
|
+
})
|
|
962
|
+
.describe("The response will be a JSON object.");
|
|
963
|
+
ResponseFormat.JsonSchemaSchema = z
|
|
964
|
+
.object({
|
|
965
|
+
type: z.literal("json_schema"),
|
|
966
|
+
json_schema: JsonSchema.JsonSchemaSchema,
|
|
967
|
+
})
|
|
968
|
+
.describe("The response will conform to the provided JSON schema.");
|
|
969
|
+
let JsonSchema;
|
|
970
|
+
(function (JsonSchema) {
|
|
971
|
+
JsonSchema.JsonSchemaSchema = z
|
|
972
|
+
.object({
|
|
973
|
+
name: z.string().describe("The name of the JSON schema."),
|
|
974
|
+
description: z
|
|
975
|
+
.string()
|
|
976
|
+
.optional()
|
|
977
|
+
.nullable()
|
|
978
|
+
.describe("The description of the JSON schema."),
|
|
979
|
+
schema: z
|
|
980
|
+
.any()
|
|
981
|
+
.optional()
|
|
982
|
+
.describe("The JSON schema definition."),
|
|
983
|
+
strict: z
|
|
984
|
+
.boolean()
|
|
985
|
+
.optional()
|
|
986
|
+
.nullable()
|
|
987
|
+
.describe("Whether to enforce strict adherence to the JSON schema."),
|
|
988
|
+
})
|
|
989
|
+
.describe("A JSON schema definition for constraining model output.");
|
|
990
|
+
})(JsonSchema = ResponseFormat.JsonSchema || (ResponseFormat.JsonSchema = {}));
|
|
991
|
+
ResponseFormat.GrammarSchema = z
|
|
992
|
+
.object({
|
|
993
|
+
type: z.literal("grammar"),
|
|
994
|
+
grammar: z
|
|
995
|
+
.string()
|
|
996
|
+
.describe("The grammar definition to constrain the response."),
|
|
997
|
+
})
|
|
998
|
+
.describe("The response will conform to the provided grammar definition.");
|
|
999
|
+
ResponseFormat.PythonSchema = z
|
|
1000
|
+
.object({
|
|
1001
|
+
type: z.literal("python"),
|
|
1002
|
+
})
|
|
1003
|
+
.describe("The response will be Python code.");
|
|
1004
|
+
})(ResponseFormat = Request.ResponseFormat || (Request.ResponseFormat = {}));
|
|
1005
|
+
Request.ToolChoiceSchema = z
|
|
1006
|
+
.union([
|
|
1007
|
+
z.literal("none"),
|
|
1008
|
+
z.literal("auto"),
|
|
1009
|
+
z.literal("required"),
|
|
1010
|
+
ToolChoice.FunctionSchema,
|
|
1011
|
+
])
|
|
1012
|
+
.describe("Specifies tool call behavior for the assistant.");
|
|
1013
|
+
let ToolChoice;
|
|
1014
|
+
(function (ToolChoice) {
|
|
1015
|
+
ToolChoice.FunctionSchema = z
|
|
1016
|
+
.object({
|
|
1017
|
+
type: z.literal("function"),
|
|
1018
|
+
function: Function.FunctionSchema,
|
|
1019
|
+
})
|
|
1020
|
+
.describe("Specify a function for the assistant to call.");
|
|
1021
|
+
let Function;
|
|
1022
|
+
(function (Function) {
|
|
1023
|
+
Function.FunctionSchema = z.object({
|
|
1024
|
+
name: z
|
|
1025
|
+
.string()
|
|
1026
|
+
.describe("The name of the function the assistant will call."),
|
|
1027
|
+
});
|
|
1028
|
+
})(Function = ToolChoice.Function || (ToolChoice.Function = {}));
|
|
1029
|
+
})(ToolChoice = Request.ToolChoice || (Request.ToolChoice = {}));
|
|
1030
|
+
Request.PredictionSchema = z
|
|
1031
|
+
.object({
|
|
1032
|
+
type: z.literal("content"),
|
|
1033
|
+
content: Prediction.ContentSchema,
|
|
1034
|
+
})
|
|
1035
|
+
.describe("Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content.");
|
|
1036
|
+
let Prediction;
|
|
1037
|
+
(function (Prediction) {
|
|
1038
|
+
Prediction.ContentSchema = z.union([
|
|
1039
|
+
z.string(),
|
|
1040
|
+
z.array(Content.PartSchema),
|
|
1041
|
+
]);
|
|
1042
|
+
let Content;
|
|
1043
|
+
(function (Content) {
|
|
1044
|
+
Content.PartSchema = z
|
|
1045
|
+
.object({
|
|
1046
|
+
type: z.literal("text"),
|
|
1047
|
+
text: z.string(),
|
|
1048
|
+
})
|
|
1049
|
+
.describe("A part of the predicted content.");
|
|
1050
|
+
})(Content = Prediction.Content || (Prediction.Content = {}));
|
|
1051
|
+
})(Prediction = Request.Prediction || (Request.Prediction = {}));
|
|
1052
|
+
Request.SeedSchema = z
|
|
1053
|
+
.bigint()
|
|
1054
|
+
.describe("If specified, upstream systems will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.");
|
|
1055
|
+
Request.BackoffMaxElapsedTimeSchema = z
|
|
1056
|
+
.uint32()
|
|
1057
|
+
.describe("The maximum total time in milliseconds to spend on retries when a transient error occurs.");
|
|
1058
|
+
Request.FirstChunkTimeoutSchema = z
|
|
1059
|
+
.uint32()
|
|
1060
|
+
.describe("The maximum time in milliseconds to wait for the first chunk of a streaming response.");
|
|
1061
|
+
Request.OtherChunkTimeoutSchema = z
|
|
1062
|
+
.uint32()
|
|
1063
|
+
.describe("The maximum time in milliseconds to wait between subsequent chunks of a streaming response.");
|
|
1064
|
+
Request.ChatCompletionCreateParamsBaseSchema = z
|
|
1065
|
+
.object({
|
|
1066
|
+
messages: MessagesSchema,
|
|
1067
|
+
provider: Request.ProviderSchema.optional().nullable(),
|
|
1068
|
+
model: Request.ModelSchema,
|
|
1069
|
+
models: z
|
|
1070
|
+
.array(Request.ModelSchema)
|
|
1071
|
+
.optional()
|
|
1072
|
+
.nullable()
|
|
1073
|
+
.describe("Fallback Ensemble LLMs to use if the primary Ensemble LLM fails."),
|
|
1074
|
+
top_logprobs: z
|
|
1075
|
+
.int()
|
|
1076
|
+
.min(0)
|
|
1077
|
+
.max(20)
|
|
1078
|
+
.optional()
|
|
1079
|
+
.nullable()
|
|
1080
|
+
.describe("An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability."),
|
|
1081
|
+
response_format: Request.ResponseFormatSchema.optional().nullable(),
|
|
1082
|
+
seed: Request.SeedSchema.optional().nullable(),
|
|
1083
|
+
tool_choice: Request.ToolChoiceSchema.optional().nullable(),
|
|
1084
|
+
tools: ToolsSchema,
|
|
1085
|
+
parallel_tool_calls: z
|
|
1086
|
+
.boolean()
|
|
1087
|
+
.optional()
|
|
1088
|
+
.nullable()
|
|
1089
|
+
.describe("Whether to allow the model to make multiple tool calls in parallel."),
|
|
1090
|
+
prediction: Request.PredictionSchema.optional().nullable(),
|
|
1091
|
+
backoff_max_elapsed_time: Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
|
|
1092
|
+
first_chunk_timeout: Request.FirstChunkTimeoutSchema.optional().nullable(),
|
|
1093
|
+
other_chunk_timeout: Request.OtherChunkTimeoutSchema.optional().nullable(),
|
|
1094
|
+
})
|
|
1095
|
+
.describe("Base parameters for creating a chat completion.");
|
|
1096
|
+
Request.StreamTrueSchema = z
|
|
1097
|
+
.literal(true)
|
|
1098
|
+
.describe("Whether to stream the response as a series of chunks.");
|
|
1099
|
+
Request.ChatCompletionCreateParamsStreamingSchema = Request.ChatCompletionCreateParamsBaseSchema.extend({
|
|
1100
|
+
stream: Request.StreamTrueSchema,
|
|
1101
|
+
}).describe("Parameters for creating a streaming chat completion.");
|
|
1102
|
+
Request.StreamFalseSchema = z
|
|
1103
|
+
.literal(false)
|
|
1104
|
+
.describe("Whether to stream the response as a series of chunks.");
|
|
1105
|
+
Request.ChatCompletionCreateParamsNonStreamingSchema = Request.ChatCompletionCreateParamsBaseSchema.extend({
|
|
1106
|
+
stream: Request.StreamFalseSchema.optional().nullable(),
|
|
1107
|
+
}).describe("Parameters for creating a unary chat completion.");
|
|
1108
|
+
Request.ChatCompletionCreateParamsSchema = z
|
|
1109
|
+
.union([
|
|
1110
|
+
Request.ChatCompletionCreateParamsStreamingSchema,
|
|
1111
|
+
Request.ChatCompletionCreateParamsNonStreamingSchema,
|
|
1112
|
+
])
|
|
1113
|
+
.describe("Parameters for creating a chat completion.");
|
|
1114
|
+
})(Request = Completions.Request || (Completions.Request = {}));
|
|
5
1115
|
let Response;
|
|
6
1116
|
(function (Response) {
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
1117
|
+
Response.FinishReasonSchema = z
|
|
1118
|
+
.enum(["stop", "length", "tool_calls", "content_filter", "error"])
|
|
1119
|
+
.describe("The reason why the assistant ceased to generate further tokens.");
|
|
1120
|
+
Response.UsageSchema = z
|
|
1121
|
+
.object({
|
|
1122
|
+
completion_tokens: z
|
|
1123
|
+
.uint32()
|
|
1124
|
+
.describe("The number of tokens generated in the completion."),
|
|
1125
|
+
prompt_tokens: z
|
|
1126
|
+
.uint32()
|
|
1127
|
+
.describe("The number of tokens in the prompt."),
|
|
1128
|
+
total_tokens: z
|
|
1129
|
+
.uint32()
|
|
1130
|
+
.describe("The total number of tokens used in the prompt or generated in the completion."),
|
|
1131
|
+
completion_tokens_details: Usage.CompletionTokensDetailsSchema.optional(),
|
|
1132
|
+
prompt_tokens_details: Usage.PromptTokensDetailsSchema.optional(),
|
|
1133
|
+
cost: z
|
|
1134
|
+
.number()
|
|
1135
|
+
.describe("The cost in credits incurred for this completion."),
|
|
1136
|
+
cost_details: Usage.CostDetailsSchema.optional(),
|
|
1137
|
+
total_cost: z
|
|
1138
|
+
.number()
|
|
1139
|
+
.describe("The total cost in credits incurred including upstream costs."),
|
|
1140
|
+
cost_multiplier: z
|
|
1141
|
+
.number()
|
|
1142
|
+
.describe("The cost multiplier applied to upstream costs for computing ObjectiveAI costs."),
|
|
1143
|
+
is_byok: z
|
|
1144
|
+
.boolean()
|
|
1145
|
+
.describe("Whether the completion used a BYOK (Bring Your Own Key) API Key."),
|
|
1146
|
+
})
|
|
1147
|
+
.describe("Token and cost usage statistics for the completion.");
|
|
1148
|
+
let Usage;
|
|
1149
|
+
(function (Usage) {
|
|
1150
|
+
Usage.CompletionTokensDetailsSchema = z
|
|
1151
|
+
.object({
|
|
1152
|
+
accepted_prediction_tokens: z
|
|
1153
|
+
.uint32()
|
|
1154
|
+
.optional()
|
|
1155
|
+
.describe("The number of accepted prediction tokens in the completion."),
|
|
1156
|
+
audio_tokens: z
|
|
1157
|
+
.uint32()
|
|
1158
|
+
.optional()
|
|
1159
|
+
.describe("The number of generated audio tokens in the completion."),
|
|
1160
|
+
reasoning_tokens: z
|
|
1161
|
+
.uint32()
|
|
1162
|
+
.optional()
|
|
1163
|
+
.describe("The number of generated reasoning tokens in the completion."),
|
|
1164
|
+
rejected_prediction_tokens: z
|
|
1165
|
+
.uint32()
|
|
1166
|
+
.optional()
|
|
1167
|
+
.describe("The number of rejected prediction tokens in the completion."),
|
|
1168
|
+
})
|
|
1169
|
+
.describe("Detailed breakdown of generated completion tokens.");
|
|
1170
|
+
Usage.PromptTokensDetailsSchema = z
|
|
1171
|
+
.object({
|
|
1172
|
+
audio_tokens: z
|
|
1173
|
+
.uint32()
|
|
1174
|
+
.optional()
|
|
1175
|
+
.describe("The number of audio tokens in the prompt."),
|
|
1176
|
+
cached_tokens: z
|
|
1177
|
+
.uint32()
|
|
1178
|
+
.optional()
|
|
1179
|
+
.describe("The number of cached tokens in the prompt."),
|
|
1180
|
+
cache_write_tokens: z
|
|
1181
|
+
.uint32()
|
|
1182
|
+
.optional()
|
|
1183
|
+
.describe("The number of prompt tokens written to cache."),
|
|
1184
|
+
video_tokens: z
|
|
1185
|
+
.uint32()
|
|
1186
|
+
.optional()
|
|
1187
|
+
.describe("The number of video tokens in the prompt."),
|
|
1188
|
+
})
|
|
1189
|
+
.describe("Detailed breakdown of prompt tokens.");
|
|
1190
|
+
Usage.CostDetailsSchema = z
|
|
1191
|
+
.object({
|
|
1192
|
+
upstream_inference_cost: z
|
|
1193
|
+
.number()
|
|
1194
|
+
.optional()
|
|
1195
|
+
.describe("The cost incurred upstream."),
|
|
1196
|
+
upstream_upstream_inference_cost: z
|
|
1197
|
+
.number()
|
|
1198
|
+
.optional()
|
|
1199
|
+
.describe("The cost incurred by upstream's upstream."),
|
|
1200
|
+
})
|
|
1201
|
+
.describe("Detailed breakdown of upstream costs incurred.");
|
|
1202
|
+
})(Usage = Response.Usage || (Response.Usage = {}));
|
|
1203
|
+
Response.LogprobsSchema = z
|
|
1204
|
+
.object({
|
|
1205
|
+
content: z
|
|
1206
|
+
.array(Logprobs.LogprobSchema)
|
|
1207
|
+
.optional()
|
|
1208
|
+
.nullable()
|
|
1209
|
+
.describe("The log probabilities of the tokens in the content."),
|
|
1210
|
+
refusal: z
|
|
1211
|
+
.array(Logprobs.LogprobSchema)
|
|
1212
|
+
.optional()
|
|
1213
|
+
.nullable()
|
|
1214
|
+
.describe("The log probabilities of the tokens in the refusal."),
|
|
1215
|
+
})
|
|
1216
|
+
.describe("The log probabilities of the tokens generated by the model.");
|
|
1217
|
+
let Logprobs;
|
|
1218
|
+
(function (Logprobs) {
|
|
1219
|
+
function merged(a, b) {
|
|
1220
|
+
const [content, contentChanged] = merge(a.content, b.content, Logprob.mergedList);
|
|
1221
|
+
const [refusal, refusalChanged] = merge(a.refusal, b.refusal, Logprob.mergedList);
|
|
1222
|
+
if (contentChanged || refusalChanged) {
|
|
1223
|
+
return [{ content, refusal }, true];
|
|
40
1224
|
}
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
let Choice;
|
|
44
|
-
(function (Choice) {
|
|
45
|
-
function merged(a, b) {
|
|
46
|
-
const [delta, deltaChanged] = merge(a.delta, b.delta, Delta.merged);
|
|
47
|
-
const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
|
|
48
|
-
const index = a.index;
|
|
49
|
-
const [logprobs, logprobsChanged] = merge(a.logprobs, b.logprobs, Logprobs.merged);
|
|
50
|
-
if (deltaChanged || finish_reasonChanged || logprobsChanged) {
|
|
51
|
-
return [
|
|
52
|
-
Object.assign({ delta,
|
|
53
|
-
finish_reason,
|
|
54
|
-
index }, (logprobs !== undefined ? { logprobs } : {})),
|
|
55
|
-
true,
|
|
56
|
-
];
|
|
57
|
-
}
|
|
58
|
-
else {
|
|
59
|
-
return [a, false];
|
|
60
|
-
}
|
|
1225
|
+
else {
|
|
1226
|
+
return [a, false];
|
|
61
1227
|
}
|
|
62
|
-
|
|
1228
|
+
}
|
|
1229
|
+
Logprobs.merged = merged;
|
|
1230
|
+
Logprobs.LogprobSchema = z
|
|
1231
|
+
.object({
|
|
1232
|
+
token: z
|
|
1233
|
+
.string()
|
|
1234
|
+
.describe("The token string which was selected by the sampler."),
|
|
1235
|
+
bytes: z
|
|
1236
|
+
.array(z.uint32())
|
|
1237
|
+
.optional()
|
|
1238
|
+
.nullable()
|
|
1239
|
+
.describe("The byte representation of the token which was selected by the sampler."),
|
|
1240
|
+
logprob: z
|
|
1241
|
+
.number()
|
|
1242
|
+
.describe("The log probability of the token which was selected by the sampler."),
|
|
1243
|
+
top_logprobs: z
|
|
1244
|
+
.array(Logprob.TopLogprobSchema)
|
|
1245
|
+
.describe("The log probabilities of the top tokens for this position."),
|
|
1246
|
+
})
|
|
1247
|
+
.describe("The token which was selected by the sampler for this position as well as the logprobabilities of the top options.");
|
|
1248
|
+
let Logprob;
|
|
1249
|
+
(function (Logprob) {
|
|
63
1250
|
function mergedList(a, b) {
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
const existingIndex = a.findIndex(({ index }) => index === choice.index);
|
|
67
|
-
if (existingIndex === -1) {
|
|
68
|
-
if (merged === undefined) {
|
|
69
|
-
merged = [...a, choice];
|
|
70
|
-
}
|
|
71
|
-
else {
|
|
72
|
-
merged.push(choice);
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
else {
|
|
76
|
-
const [mergedChoice, choiceChanged] = Choice.merged(a[existingIndex], choice);
|
|
77
|
-
if (choiceChanged) {
|
|
78
|
-
if (merged === undefined) {
|
|
79
|
-
merged = [...a];
|
|
80
|
-
}
|
|
81
|
-
merged[existingIndex] = mergedChoice;
|
|
82
|
-
}
|
|
83
|
-
}
|
|
1251
|
+
if (b.length === 0) {
|
|
1252
|
+
return [a, false];
|
|
84
1253
|
}
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
Choice.mergedList = mergedList;
|
|
88
|
-
})(Choice = Streaming.Choice || (Streaming.Choice = {}));
|
|
89
|
-
let Delta;
|
|
90
|
-
(function (Delta) {
|
|
91
|
-
function merged(a, b) {
|
|
92
|
-
const [content, contentChanged] = merge(a.content, b.content, mergedString);
|
|
93
|
-
const [refusal, refusalChanged] = merge(a.refusal, b.refusal, mergedString);
|
|
94
|
-
const [role, roleChanged] = merge(a.role, b.role);
|
|
95
|
-
const [tool_calls, tool_callsChanged] = merge(a.tool_calls, b.tool_calls, ToolCall.mergedList);
|
|
96
|
-
const [reasoning, reasoningChanged] = merge(a.reasoning, b.reasoning, mergedString);
|
|
97
|
-
const [images, imagesChanged] = merge(a.images, b.images, Image.mergedList);
|
|
98
|
-
if (contentChanged ||
|
|
99
|
-
reasoningChanged ||
|
|
100
|
-
refusalChanged ||
|
|
101
|
-
roleChanged ||
|
|
102
|
-
tool_callsChanged ||
|
|
103
|
-
imagesChanged) {
|
|
104
|
-
return [
|
|
105
|
-
Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({}, (content !== undefined ? { content } : {})), (reasoning !== undefined ? { reasoning } : {})), (refusal !== undefined ? { refusal } : {})), (role !== undefined ? { role } : {})), (tool_calls !== undefined ? { tool_calls } : {})), (images !== undefined ? { images } : {})),
|
|
106
|
-
true,
|
|
107
|
-
];
|
|
1254
|
+
else if (a.length === 0) {
|
|
1255
|
+
return [b, true];
|
|
108
1256
|
}
|
|
109
1257
|
else {
|
|
110
|
-
return [a,
|
|
1258
|
+
return [[...a, ...b], true];
|
|
111
1259
|
}
|
|
112
1260
|
}
|
|
113
|
-
|
|
114
|
-
|
|
1261
|
+
Logprob.mergedList = mergedList;
|
|
1262
|
+
Logprob.TopLogprobSchema = z
|
|
1263
|
+
.object({
|
|
1264
|
+
token: z.string().describe("The token string."),
|
|
1265
|
+
bytes: z
|
|
1266
|
+
.array(z.uint32())
|
|
1267
|
+
.optional()
|
|
1268
|
+
.nullable()
|
|
1269
|
+
.describe("The byte representation of the token."),
|
|
1270
|
+
logprob: z
|
|
1271
|
+
.number()
|
|
1272
|
+
.optional()
|
|
1273
|
+
.nullable()
|
|
1274
|
+
.describe("The log probability of the token."),
|
|
1275
|
+
})
|
|
1276
|
+
.describe("The log probability of a token in the list of top tokens.");
|
|
1277
|
+
})(Logprob = Logprobs.Logprob || (Logprobs.Logprob = {}));
|
|
1278
|
+
})(Logprobs = Response.Logprobs || (Response.Logprobs = {}));
|
|
1279
|
+
Response.RoleSchema = z
|
|
1280
|
+
.enum(["assistant"])
|
|
1281
|
+
.describe("The role of the message author.");
|
|
1282
|
+
Response.ImageSchema = z
|
|
1283
|
+
.union([Image.ImageUrlSchema])
|
|
1284
|
+
.describe("An image generated by the model.");
|
|
1285
|
+
let Image;
|
|
1286
|
+
(function (Image) {
|
|
1287
|
+
function mergedList(a, b) {
|
|
1288
|
+
if (b.length === 0) {
|
|
1289
|
+
return [a, false];
|
|
1290
|
+
}
|
|
1291
|
+
else if (a.length === 0) {
|
|
1292
|
+
return [b, true];
|
|
1293
|
+
}
|
|
1294
|
+
else {
|
|
1295
|
+
return [[...a, ...b], true];
|
|
1296
|
+
}
|
|
1297
|
+
}
|
|
1298
|
+
Image.mergedList = mergedList;
|
|
1299
|
+
Image.ImageUrlSchema = z.object({
|
|
1300
|
+
type: z.literal("image_url"),
|
|
1301
|
+
image_url: z.object({
|
|
1302
|
+
url: z.string().describe("The Base64 URL of the generated image."),
|
|
1303
|
+
}),
|
|
1304
|
+
});
|
|
1305
|
+
})(Image = Response.Image || (Response.Image = {}));
|
|
1306
|
+
let Streaming;
|
|
1307
|
+
(function (Streaming) {
|
|
1308
|
+
Streaming.ToolCallSchema = z
|
|
1309
|
+
.union([ToolCall.FunctionSchema])
|
|
1310
|
+
.describe("A tool call made by the assistant.");
|
|
115
1311
|
let ToolCall;
|
|
116
1312
|
(function (ToolCall) {
|
|
117
1313
|
function merged(a, b) {
|
|
@@ -143,13 +1339,26 @@ export var Chat;
|
|
|
143
1339
|
return merged ? [merged, true] : [a, false];
|
|
144
1340
|
}
|
|
145
1341
|
ToolCall.mergedList = mergedList;
|
|
1342
|
+
ToolCall.FunctionSchema = z
|
|
1343
|
+
.object({
|
|
1344
|
+
index: z
|
|
1345
|
+
.uint32()
|
|
1346
|
+
.describe("The index of the tool call in the sequence of tool calls."),
|
|
1347
|
+
type: z.literal("function").optional(),
|
|
1348
|
+
id: z
|
|
1349
|
+
.string()
|
|
1350
|
+
.optional()
|
|
1351
|
+
.describe("The unique identifier of the function tool."),
|
|
1352
|
+
function: Function.DefinitionSchema.optional(),
|
|
1353
|
+
})
|
|
1354
|
+
.describe("A function tool call made by the assistant.");
|
|
146
1355
|
let Function;
|
|
147
1356
|
(function (Function) {
|
|
148
1357
|
function merged(a, b) {
|
|
149
1358
|
const index = a.index;
|
|
1359
|
+
const [type, typeChanged] = merge(a.type, b.type);
|
|
150
1360
|
const [id, idChanged] = merge(a.id, b.id);
|
|
151
1361
|
const [function_, functionChanged] = merge(a.function, b.function, Definition.merged);
|
|
152
|
-
const [type, typeChanged] = merge(a.type, b.type);
|
|
153
1362
|
if (idChanged || functionChanged || typeChanged) {
|
|
154
1363
|
return [
|
|
155
1364
|
Object.assign(Object.assign(Object.assign({ index }, (id !== undefined ? { id } : {})), (function_ !== undefined ? { function: function_ } : {})), (type !== undefined ? { type } : {})),
|
|
@@ -161,6 +1370,13 @@ export var Chat;
|
|
|
161
1370
|
}
|
|
162
1371
|
}
|
|
163
1372
|
Function.merged = merged;
|
|
1373
|
+
Function.DefinitionSchema = z.object({
|
|
1374
|
+
name: z.string().optional().describe("The name of the function."),
|
|
1375
|
+
arguments: z
|
|
1376
|
+
.string()
|
|
1377
|
+
.optional()
|
|
1378
|
+
.describe("The arguments passed to the function."),
|
|
1379
|
+
});
|
|
164
1380
|
let Definition;
|
|
165
1381
|
(function (Definition) {
|
|
166
1382
|
function merged(a, b) {
|
|
@@ -182,59 +1398,48 @@ export var Chat;
|
|
|
182
1398
|
})(Definition = Function.Definition || (Function.Definition = {}));
|
|
183
1399
|
})(Function = ToolCall.Function || (ToolCall.Function = {}));
|
|
184
1400
|
})(ToolCall = Streaming.ToolCall || (Streaming.ToolCall = {}));
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
];
|
|
213
|
-
}
|
|
214
|
-
else {
|
|
215
|
-
return [a, false];
|
|
216
|
-
}
|
|
217
|
-
}
|
|
218
|
-
Usage.merged = merged;
|
|
219
|
-
let CompletionTokensDetails;
|
|
220
|
-
(function (CompletionTokensDetails) {
|
|
1401
|
+
Streaming.DeltaSchema = z
|
|
1402
|
+
.object({
|
|
1403
|
+
content: z
|
|
1404
|
+
.string()
|
|
1405
|
+
.optional()
|
|
1406
|
+
.describe("The content added in this delta."),
|
|
1407
|
+
refusal: z
|
|
1408
|
+
.string()
|
|
1409
|
+
.optional()
|
|
1410
|
+
.describe("The refusal message added in this delta."),
|
|
1411
|
+
role: Response.RoleSchema.optional(),
|
|
1412
|
+
tool_calls: z
|
|
1413
|
+
.array(Streaming.ToolCallSchema)
|
|
1414
|
+
.optional()
|
|
1415
|
+
.describe("Tool calls made in this delta."),
|
|
1416
|
+
reasoning: z
|
|
1417
|
+
.string()
|
|
1418
|
+
.optional()
|
|
1419
|
+
.describe("The reasoning added in this delta."),
|
|
1420
|
+
images: z
|
|
1421
|
+
.array(Response.ImageSchema)
|
|
1422
|
+
.optional()
|
|
1423
|
+
.describe("Images added in this delta."),
|
|
1424
|
+
})
|
|
1425
|
+
.describe("A delta in a streaming chat completion response.");
|
|
1426
|
+
let Delta;
|
|
1427
|
+
(function (Delta) {
|
|
221
1428
|
function merged(a, b) {
|
|
222
|
-
const [
|
|
223
|
-
const [
|
|
224
|
-
const [
|
|
225
|
-
const [
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
1429
|
+
const [content, contentChanged] = merge(a.content, b.content, mergedString);
|
|
1430
|
+
const [refusal, refusalChanged] = merge(a.refusal, b.refusal, mergedString);
|
|
1431
|
+
const [role, roleChanged] = merge(a.role, b.role);
|
|
1432
|
+
const [tool_calls, tool_callsChanged] = merge(a.tool_calls, b.tool_calls, ToolCall.mergedList);
|
|
1433
|
+
const [reasoning, reasoningChanged] = merge(a.reasoning, b.reasoning, mergedString);
|
|
1434
|
+
const [images, imagesChanged] = merge(a.images, b.images, Image.mergedList);
|
|
1435
|
+
if (contentChanged ||
|
|
1436
|
+
reasoningChanged ||
|
|
1437
|
+
refusalChanged ||
|
|
1438
|
+
roleChanged ||
|
|
1439
|
+
tool_callsChanged ||
|
|
1440
|
+
imagesChanged) {
|
|
230
1441
|
return [
|
|
231
|
-
Object.assign(Object.assign(Object.assign(Object.assign({}, (
|
|
232
|
-
? { accepted_prediction_tokens }
|
|
233
|
-
: {})), (audio_tokens !== undefined ? { audio_tokens } : {})), (reasoning_tokens !== undefined
|
|
234
|
-
? { reasoning_tokens }
|
|
235
|
-
: {})), (rejected_prediction_tokens !== undefined
|
|
236
|
-
? { rejected_prediction_tokens }
|
|
237
|
-
: {})),
|
|
1442
|
+
Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({}, (content !== undefined ? { content } : {})), (reasoning !== undefined ? { reasoning } : {})), (refusal !== undefined ? { refusal } : {})), (role !== undefined ? { role } : {})), (tool_calls !== undefined ? { tool_calls } : {})), (images !== undefined ? { images } : {})),
|
|
238
1443
|
true,
|
|
239
1444
|
];
|
|
240
1445
|
}
|
|
@@ -242,16 +1447,30 @@ export var Chat;
|
|
|
242
1447
|
return [a, false];
|
|
243
1448
|
}
|
|
244
1449
|
}
|
|
245
|
-
|
|
246
|
-
})(
|
|
247
|
-
|
|
248
|
-
|
|
1450
|
+
Delta.merged = merged;
|
|
1451
|
+
})(Delta = Streaming.Delta || (Streaming.Delta = {}));
|
|
1452
|
+
Streaming.ChoiceSchema = z
|
|
1453
|
+
.object({
|
|
1454
|
+
delta: Streaming.DeltaSchema,
|
|
1455
|
+
finish_reason: Response.FinishReasonSchema.optional(),
|
|
1456
|
+
index: z
|
|
1457
|
+
.uint32()
|
|
1458
|
+
.describe("The index of the choice in the list of choices."),
|
|
1459
|
+
logprobs: Response.LogprobsSchema.optional(),
|
|
1460
|
+
})
|
|
1461
|
+
.describe("A choice in a streaming chat completion response.");
|
|
1462
|
+
let Choice;
|
|
1463
|
+
(function (Choice) {
|
|
249
1464
|
function merged(a, b) {
|
|
250
|
-
const [
|
|
251
|
-
const [
|
|
252
|
-
|
|
1465
|
+
const [delta, deltaChanged] = merge(a.delta, b.delta, Delta.merged);
|
|
1466
|
+
const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
|
|
1467
|
+
const index = a.index;
|
|
1468
|
+
const [logprobs, logprobsChanged] = merge(a.logprobs, b.logprobs, Logprobs.merged);
|
|
1469
|
+
if (deltaChanged || finish_reasonChanged || logprobsChanged) {
|
|
253
1470
|
return [
|
|
254
|
-
Object.assign(
|
|
1471
|
+
Object.assign({ delta,
|
|
1472
|
+
finish_reason,
|
|
1473
|
+
index }, (logprobs !== undefined ? { logprobs } : {})),
|
|
255
1474
|
true,
|
|
256
1475
|
];
|
|
257
1476
|
}
|
|
@@ -259,21 +1478,92 @@ export var Chat;
|
|
|
259
1478
|
return [a, false];
|
|
260
1479
|
}
|
|
261
1480
|
}
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
1481
|
+
Choice.merged = merged;
|
|
1482
|
+
function mergedList(a, b) {
|
|
1483
|
+
let merged = undefined;
|
|
1484
|
+
for (const choice of b) {
|
|
1485
|
+
const existingIndex = a.findIndex(({ index }) => index === choice.index);
|
|
1486
|
+
if (existingIndex === -1) {
|
|
1487
|
+
if (merged === undefined) {
|
|
1488
|
+
merged = [...a, choice];
|
|
1489
|
+
}
|
|
1490
|
+
else {
|
|
1491
|
+
merged.push(choice);
|
|
1492
|
+
}
|
|
1493
|
+
}
|
|
1494
|
+
else {
|
|
1495
|
+
const [mergedChoice, choiceChanged] = Choice.merged(a[existingIndex], choice);
|
|
1496
|
+
if (choiceChanged) {
|
|
1497
|
+
if (merged === undefined) {
|
|
1498
|
+
merged = [...a];
|
|
1499
|
+
}
|
|
1500
|
+
merged[existingIndex] = mergedChoice;
|
|
1501
|
+
}
|
|
1502
|
+
}
|
|
1503
|
+
}
|
|
1504
|
+
return merged ? [merged, true] : [a, false];
|
|
1505
|
+
}
|
|
1506
|
+
Choice.mergedList = mergedList;
|
|
1507
|
+
})(Choice = Streaming.Choice || (Streaming.Choice = {}));
|
|
1508
|
+
Streaming.ChatCompletionChunkSchema = z
|
|
1509
|
+
.object({
|
|
1510
|
+
id: z
|
|
1511
|
+
.string()
|
|
1512
|
+
.describe("The unique identifier of the chat completion."),
|
|
1513
|
+
upstream_id: z
|
|
1514
|
+
.string()
|
|
1515
|
+
.describe("The unique identifier of the upstream chat completion."),
|
|
1516
|
+
choices: z
|
|
1517
|
+
.array(Streaming.ChoiceSchema)
|
|
1518
|
+
.describe("The list of choices in this chunk."),
|
|
1519
|
+
created: z
|
|
1520
|
+
.uint32()
|
|
1521
|
+
.describe("The Unix timestamp (in seconds) when the chat completion was created."),
|
|
1522
|
+
model: z
|
|
1523
|
+
.string()
|
|
1524
|
+
.describe("The unique identifier of the Ensemble LLM used for this chat completion."),
|
|
1525
|
+
upstream_model: z
|
|
1526
|
+
.string()
|
|
1527
|
+
.describe("The upstream model used for this chat completion."),
|
|
1528
|
+
object: z.literal("chat.completion.chunk"),
|
|
1529
|
+
service_tier: z.string().optional(),
|
|
1530
|
+
system_fingerprint: z.string().optional(),
|
|
1531
|
+
usage: Response.UsageSchema.optional(),
|
|
1532
|
+
provider: z
|
|
1533
|
+
.string()
|
|
1534
|
+
.optional()
|
|
1535
|
+
.describe("The provider used for this chat completion."),
|
|
1536
|
+
})
|
|
1537
|
+
.describe("A chunk in a streaming chat completion response.");
|
|
1538
|
+
let ChatCompletionChunk;
|
|
1539
|
+
(function (ChatCompletionChunk) {
|
|
266
1540
|
function merged(a, b) {
|
|
267
|
-
const
|
|
268
|
-
const
|
|
269
|
-
|
|
270
|
-
|
|
1541
|
+
const id = a.id;
|
|
1542
|
+
const upstream_id = a.upstream_id;
|
|
1543
|
+
const [choices, choicesChanged] = Choice.mergedList(a.choices, b.choices);
|
|
1544
|
+
const created = a.created;
|
|
1545
|
+
const model = a.model;
|
|
1546
|
+
const upstream_model = a.upstream_model;
|
|
1547
|
+
const object = a.object;
|
|
1548
|
+
const [service_tier, service_tierChanged] = merge(a.service_tier, b.service_tier);
|
|
1549
|
+
const [system_fingerprint, system_fingerprintChanged] = merge(a.system_fingerprint, b.system_fingerprint);
|
|
1550
|
+
const [usage, usageChanged] = merge(a.usage, b.usage);
|
|
1551
|
+
const [provider, providerChanged] = merge(a.provider, b.provider);
|
|
1552
|
+
if (choicesChanged ||
|
|
1553
|
+
service_tierChanged ||
|
|
1554
|
+
system_fingerprintChanged ||
|
|
1555
|
+
usageChanged ||
|
|
1556
|
+
providerChanged) {
|
|
271
1557
|
return [
|
|
272
|
-
Object.assign(Object.assign({
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
1558
|
+
Object.assign(Object.assign(Object.assign(Object.assign({ id,
|
|
1559
|
+
upstream_id,
|
|
1560
|
+
choices,
|
|
1561
|
+
created,
|
|
1562
|
+
model,
|
|
1563
|
+
upstream_model,
|
|
1564
|
+
object }, (service_tier !== undefined ? { service_tier } : {})), (system_fingerprint !== undefined
|
|
1565
|
+
? { system_fingerprint }
|
|
1566
|
+
: {})), (usage !== undefined ? { usage } : {})), (provider !== undefined ? { provider } : {})),
|
|
277
1567
|
true,
|
|
278
1568
|
];
|
|
279
1569
|
}
|
|
@@ -281,68 +1571,106 @@ export var Chat;
|
|
|
281
1571
|
return [a, false];
|
|
282
1572
|
}
|
|
283
1573
|
}
|
|
284
|
-
|
|
285
|
-
})(
|
|
286
|
-
})(
|
|
287
|
-
let
|
|
288
|
-
(function (
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
1574
|
+
ChatCompletionChunk.merged = merged;
|
|
1575
|
+
})(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
|
|
1576
|
+
})(Streaming = Response.Streaming || (Response.Streaming = {}));
|
|
1577
|
+
let Unary;
|
|
1578
|
+
(function (Unary) {
|
|
1579
|
+
Unary.ToolCallSchema = z
|
|
1580
|
+
.union([ToolCall.FunctionSchema])
|
|
1581
|
+
.describe(Streaming.ToolCallSchema.description);
|
|
1582
|
+
let ToolCall;
|
|
1583
|
+
(function (ToolCall) {
|
|
1584
|
+
ToolCall.FunctionSchema = z
|
|
1585
|
+
.object({
|
|
1586
|
+
type: z.literal("function"),
|
|
1587
|
+
id: z
|
|
1588
|
+
.string()
|
|
1589
|
+
.describe(Streaming.ToolCall.FunctionSchema.shape.id.description),
|
|
1590
|
+
function: Function.DefinitionSchema,
|
|
1591
|
+
})
|
|
1592
|
+
.describe(Streaming.ToolCall.FunctionSchema.description);
|
|
1593
|
+
let Function;
|
|
1594
|
+
(function (Function) {
|
|
1595
|
+
Function.DefinitionSchema = z.object({
|
|
1596
|
+
name: z
|
|
1597
|
+
.string()
|
|
1598
|
+
.describe(Streaming.ToolCall.Function.DefinitionSchema.shape.name
|
|
1599
|
+
.description),
|
|
1600
|
+
arguments: z
|
|
1601
|
+
.string()
|
|
1602
|
+
.describe(Streaming.ToolCall.Function.DefinitionSchema.shape.arguments
|
|
1603
|
+
.description),
|
|
1604
|
+
});
|
|
1605
|
+
})(Function = ToolCall.Function || (ToolCall.Function = {}));
|
|
1606
|
+
})(ToolCall = Unary.ToolCall || (Unary.ToolCall = {}));
|
|
1607
|
+
Unary.MessageSchema = z
|
|
1608
|
+
.object({
|
|
1609
|
+
content: z
|
|
1610
|
+
.string()
|
|
1611
|
+
.nullable()
|
|
1612
|
+
.describe("The content of the message."),
|
|
1613
|
+
refusal: z
|
|
1614
|
+
.string()
|
|
1615
|
+
.nullable()
|
|
1616
|
+
.describe("The refusal message, if any."),
|
|
1617
|
+
role: Response.RoleSchema,
|
|
1618
|
+
tool_calls: z
|
|
1619
|
+
.array(Unary.ToolCallSchema)
|
|
1620
|
+
.nullable()
|
|
1621
|
+
.describe("The tool calls made by the assistant, if any."),
|
|
1622
|
+
reasoning: z
|
|
1623
|
+
.string()
|
|
1624
|
+
.optional()
|
|
1625
|
+
.describe("The reasoning provided by the assistant, if any."),
|
|
1626
|
+
images: z
|
|
1627
|
+
.array(Response.ImageSchema)
|
|
1628
|
+
.optional()
|
|
1629
|
+
.describe("The images generated by the assistant, if any."),
|
|
1630
|
+
})
|
|
1631
|
+
.describe("A message generated by the assistant.");
|
|
1632
|
+
Unary.ChoiceSchema = z
|
|
1633
|
+
.object({
|
|
1634
|
+
message: Unary.MessageSchema,
|
|
1635
|
+
finish_reason: Response.FinishReasonSchema,
|
|
1636
|
+
index: z
|
|
1637
|
+
.uint32()
|
|
1638
|
+
.describe(Streaming.ChoiceSchema.shape.index.description),
|
|
1639
|
+
logprobs: Response.LogprobsSchema.nullable(),
|
|
1640
|
+
})
|
|
1641
|
+
.describe("A choice in a unary chat completion response.");
|
|
1642
|
+
Unary.ChatCompletionSchema = z
|
|
1643
|
+
.object({
|
|
1644
|
+
id: z
|
|
1645
|
+
.string()
|
|
1646
|
+
.describe("The unique identifier of the chat completion."),
|
|
1647
|
+
upstream_id: z
|
|
1648
|
+
.string()
|
|
1649
|
+
.describe("The unique identifier of the upstream chat completion."),
|
|
1650
|
+
choices: z
|
|
1651
|
+
.array(Unary.ChoiceSchema)
|
|
1652
|
+
.describe("The list of choices in this chat completion."),
|
|
1653
|
+
created: z
|
|
1654
|
+
.uint32()
|
|
1655
|
+
.describe("The Unix timestamp (in seconds) when the chat completion was created."),
|
|
1656
|
+
model: z
|
|
1657
|
+
.string()
|
|
1658
|
+
.describe("The unique identifier of the Ensemble LLM used for this chat completion."),
|
|
1659
|
+
upstream_model: z
|
|
1660
|
+
.string()
|
|
1661
|
+
.describe("The upstream model used for this chat completion."),
|
|
1662
|
+
object: z.literal("chat.completion"),
|
|
1663
|
+
service_tier: z.string().optional(),
|
|
1664
|
+
system_fingerprint: z.string().optional(),
|
|
1665
|
+
usage: Response.UsageSchema,
|
|
1666
|
+
provider: z
|
|
1667
|
+
.string()
|
|
1668
|
+
.optional()
|
|
1669
|
+
.describe("The provider used for this chat completion."),
|
|
1670
|
+
})
|
|
1671
|
+
.describe("A unary chat completion response.");
|
|
1672
|
+
})(Unary = Response.Unary || (Response.Unary = {}));
|
|
331
1673
|
})(Response = Completions.Response || (Completions.Response = {}));
|
|
332
|
-
async function list(openai, listOptions, options) {
|
|
333
|
-
const response = await openai.chat.completions.list(Object.assign({ query: listOptions }, options));
|
|
334
|
-
return response;
|
|
335
|
-
}
|
|
336
|
-
Completions.list = list;
|
|
337
|
-
async function publish(openai, id, options) {
|
|
338
|
-
await openai.post(`/chat/completions/${id}/publish`, options);
|
|
339
|
-
}
|
|
340
|
-
Completions.publish = publish;
|
|
341
|
-
async function retrieve(openai, id, options) {
|
|
342
|
-
const response = await openai.chat.completions.retrieve(id, options);
|
|
343
|
-
return response;
|
|
344
|
-
}
|
|
345
|
-
Completions.retrieve = retrieve;
|
|
346
1674
|
async function create(openai, body, options) {
|
|
347
1675
|
var _a;
|
|
348
1676
|
const response = await openai.post("/chat/completions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
@@ -351,68 +1679,146 @@ export var Chat;
|
|
|
351
1679
|
Completions.create = create;
|
|
352
1680
|
})(Completions = Chat.Completions || (Chat.Completions = {}));
|
|
353
1681
|
})(Chat || (Chat = {}));
|
|
354
|
-
|
|
355
|
-
|
|
1682
|
+
// Vector Completions
|
|
1683
|
+
export var Vector;
|
|
1684
|
+
(function (Vector) {
|
|
356
1685
|
let Completions;
|
|
357
1686
|
(function (Completions) {
|
|
1687
|
+
let Request;
|
|
1688
|
+
(function (Request) {
|
|
1689
|
+
Request.EnsembleSchema = z
|
|
1690
|
+
.union([z.string(), EnsembleBaseSchema])
|
|
1691
|
+
.describe("The Ensemble to use for this completion. May be a unique ID or an inline definition.");
|
|
1692
|
+
Request.ProfileSchema = z
|
|
1693
|
+
.array(z.number())
|
|
1694
|
+
.describe('The profile to use for the completion. Must be of the same length as the Ensemble\'s "LLMs" field, ignoring count.');
|
|
1695
|
+
Request.VectorCompletionCreateParamsBaseSchema = z
|
|
1696
|
+
.object({
|
|
1697
|
+
retry: z
|
|
1698
|
+
.string()
|
|
1699
|
+
.optional()
|
|
1700
|
+
.nullable()
|
|
1701
|
+
.describe("The unique ID of a previous incomplete or failed completion."),
|
|
1702
|
+
messages: MessagesSchema,
|
|
1703
|
+
provider: Chat.Completions.Request.ProviderSchema.optional().nullable(),
|
|
1704
|
+
ensemble: Request.EnsembleSchema,
|
|
1705
|
+
profile: Request.ProfileSchema,
|
|
1706
|
+
seed: Chat.Completions.Request.SeedSchema.optional().nullable(),
|
|
1707
|
+
tools: ToolsSchema.optional()
|
|
1708
|
+
.nullable()
|
|
1709
|
+
.describe(`${ToolsSchema.description} These are readonly and will only be useful for explaining prior tool calls or otherwise influencing behavior.`),
|
|
1710
|
+
responses: VectorResponsesSchema,
|
|
1711
|
+
backoff_max_elapsed_time: Chat.Completions.Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
|
|
1712
|
+
first_chunk_timeout: Chat.Completions.Request.FirstChunkTimeoutSchema.optional().nullable(),
|
|
1713
|
+
other_chunk_timeout: Chat.Completions.Request.OtherChunkTimeoutSchema.optional().nullable(),
|
|
1714
|
+
})
|
|
1715
|
+
.describe("Base parameters for creating a vector completion.");
|
|
1716
|
+
Request.VectorCompletionCreateParamsStreamingSchema = Request.VectorCompletionCreateParamsBaseSchema.extend({
|
|
1717
|
+
stream: Chat.Completions.Request.StreamTrueSchema,
|
|
1718
|
+
}).describe("Parameters for creating a streaming vector completion.");
|
|
1719
|
+
Request.VectorCompletionCreateParamsNonStreamingSchema = Request.VectorCompletionCreateParamsBaseSchema.extend({
|
|
1720
|
+
stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
|
|
1721
|
+
}).describe("Parameters for creating a unary vector completion.");
|
|
1722
|
+
Request.VectorCompletionCreateParamsSchema = z
|
|
1723
|
+
.union([
|
|
1724
|
+
Request.VectorCompletionCreateParamsStreamingSchema,
|
|
1725
|
+
Request.VectorCompletionCreateParamsNonStreamingSchema,
|
|
1726
|
+
])
|
|
1727
|
+
.describe("Parameters for creating a vector completion.");
|
|
1728
|
+
})(Request = Completions.Request || (Completions.Request = {}));
|
|
358
1729
|
let Response;
|
|
359
1730
|
(function (Response) {
|
|
1731
|
+
Response.VoteSchema = z
|
|
1732
|
+
.object({
|
|
1733
|
+
model: z
|
|
1734
|
+
.string()
|
|
1735
|
+
.describe("The unique identifier of the Ensemble LLM which generated this vote."),
|
|
1736
|
+
ensemble_index: z
|
|
1737
|
+
.uint32()
|
|
1738
|
+
.describe("The index of the Ensemble LLM in the Ensemble."),
|
|
1739
|
+
flat_ensemble_index: z
|
|
1740
|
+
.uint32()
|
|
1741
|
+
.describe("The flat index of the Ensemble LLM in the expanded Ensemble, accounting for counts."),
|
|
1742
|
+
vote: z
|
|
1743
|
+
.array(z.number())
|
|
1744
|
+
.describe("The vote generated by this Ensemble LLM. It is of the same length of the number of responses provided in the request. If the Ensemble LLM used logprobs, may be a probability distribution; otherwise, one of the responses will have a value of 1 and the rest 0."),
|
|
1745
|
+
weight: z.number().describe("The weight assigned to this vote."),
|
|
1746
|
+
retry: z
|
|
1747
|
+
.boolean()
|
|
1748
|
+
.optional()
|
|
1749
|
+
.describe("Whether this vote came from a previous Vector Completion which was retried."),
|
|
1750
|
+
})
|
|
1751
|
+
.describe("A vote from an Ensemble LLM within a Vector Completion.");
|
|
1752
|
+
let Vote;
|
|
1753
|
+
(function (Vote) {
|
|
1754
|
+
function mergedList(a, b) {
|
|
1755
|
+
let merged = undefined;
|
|
1756
|
+
for (const vote of b) {
|
|
1757
|
+
const existingIndex = a.findIndex(({ flat_ensemble_index }) => flat_ensemble_index === vote.flat_ensemble_index);
|
|
1758
|
+
if (existingIndex === -1) {
|
|
1759
|
+
if (merged === undefined) {
|
|
1760
|
+
merged = [...a, vote];
|
|
1761
|
+
}
|
|
1762
|
+
else {
|
|
1763
|
+
merged.push(vote);
|
|
1764
|
+
}
|
|
1765
|
+
}
|
|
1766
|
+
}
|
|
1767
|
+
return merged ? [merged, true] : [a, false];
|
|
1768
|
+
}
|
|
1769
|
+
Vote.mergedList = mergedList;
|
|
1770
|
+
})(Vote = Response.Vote || (Response.Vote = {}));
|
|
1771
|
+
Response.UsageSchema = z
|
|
1772
|
+
.object({
|
|
1773
|
+
completion_tokens: z
|
|
1774
|
+
.uint32()
|
|
1775
|
+
.describe("The number of tokens generated in the completion."),
|
|
1776
|
+
prompt_tokens: z
|
|
1777
|
+
.uint32()
|
|
1778
|
+
.describe("The number of tokens in the prompt."),
|
|
1779
|
+
total_tokens: z
|
|
1780
|
+
.uint32()
|
|
1781
|
+
.describe("The total number of tokens used in the prompt or generated in the completion."),
|
|
1782
|
+
completion_tokens_details: Chat.Completions.Response.Usage.CompletionTokensDetailsSchema.optional(),
|
|
1783
|
+
prompt_tokens_details: Chat.Completions.Response.Usage.PromptTokensDetailsSchema.optional(),
|
|
1784
|
+
cost: z
|
|
1785
|
+
.number()
|
|
1786
|
+
.describe("The cost in credits incurred for this completion."),
|
|
1787
|
+
cost_details: Chat.Completions.Response.Usage.CostDetailsSchema.optional(),
|
|
1788
|
+
total_cost: z
|
|
1789
|
+
.number()
|
|
1790
|
+
.describe("The total cost in credits incurred including upstream costs."),
|
|
1791
|
+
})
|
|
1792
|
+
.describe("Token and cost usage statistics for the completion.");
|
|
1793
|
+
Response.VotesSchema = z
|
|
1794
|
+
.array(Response.VoteSchema)
|
|
1795
|
+
.describe("The list of votes for responses in the request from the Ensemble LLMs within the provided Ensemble.");
|
|
1796
|
+
Response.ScoresSchema = z
|
|
1797
|
+
.array(z.number())
|
|
1798
|
+
.describe("The scores for each response in the request, aggregated from the votes of the Ensemble LLMs.");
|
|
1799
|
+
Response.WeightsSchema = z
|
|
1800
|
+
.array(z.number())
|
|
1801
|
+
.describe("The weights assigned to each response in the request, aggregated from the votes of the Ensemble LLMs.");
|
|
1802
|
+
Response.EnsembleSchema = z
|
|
1803
|
+
.string()
|
|
1804
|
+
.describe("The unique identifier of the Ensemble used for this vector completion.");
|
|
360
1805
|
let Streaming;
|
|
361
1806
|
(function (Streaming) {
|
|
1807
|
+
Streaming.ChatCompletionChunkSchema = Chat.Completions.Response.Streaming.ChatCompletionChunkSchema.extend({
|
|
1808
|
+
index: z
|
|
1809
|
+
.uint32()
|
|
1810
|
+
.describe("The index of the completion amongst all chat completions."),
|
|
1811
|
+
error: ObjectiveAIErrorSchema.optional().describe("An error encountered during the generation of this chat completion."),
|
|
1812
|
+
}).describe("A chat completion chunk generated in the pursuit of a vector completion.");
|
|
362
1813
|
let ChatCompletionChunk;
|
|
363
1814
|
(function (ChatCompletionChunk) {
|
|
364
1815
|
function merged(a, b) {
|
|
365
|
-
const id = a.id;
|
|
366
|
-
const [choices, choicesChanged] = Choice.mergedList(a.choices, b.choices);
|
|
367
|
-
const created = a.created;
|
|
368
|
-
const model = a.model;
|
|
369
|
-
const object = a.object;
|
|
370
|
-
const [usage, usageChanged] = merge(a.usage, b.usage, Chat.Completions.Response.Usage.merged);
|
|
371
|
-
const [weight_data, weight_dataChanged] = merge(a.weight_data, b.weight_data);
|
|
372
|
-
if (choicesChanged || usageChanged || weight_dataChanged) {
|
|
373
|
-
return [
|
|
374
|
-
Object.assign(Object.assign({ id,
|
|
375
|
-
choices,
|
|
376
|
-
created,
|
|
377
|
-
model,
|
|
378
|
-
object }, (usage !== undefined ? { usage } : {})), (weight_data !== undefined ? { weight_data } : {})),
|
|
379
|
-
true,
|
|
380
|
-
];
|
|
381
|
-
}
|
|
382
|
-
else {
|
|
383
|
-
return [a, false];
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
ChatCompletionChunk.merged = merged;
|
|
387
|
-
})(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
|
|
388
|
-
let Choice;
|
|
389
|
-
(function (Choice) {
|
|
390
|
-
function merged(a, b) {
|
|
391
|
-
const [delta, deltaChanged] = merge(a.delta, b.delta, Delta.merged);
|
|
392
|
-
const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
|
|
393
1816
|
const index = a.index;
|
|
394
|
-
const [
|
|
395
|
-
const [weight, weightChanged] = merge(a.weight, b.weight);
|
|
396
|
-
const [confidence, confidenceChanged] = merge(a.confidence, b.confidence);
|
|
1817
|
+
const [base, baseChanged] = Chat.Completions.Response.Streaming.ChatCompletionChunk.merged(a, b);
|
|
397
1818
|
const [error, errorChanged] = merge(a.error, b.error);
|
|
398
|
-
|
|
399
|
-
const [model_index, model_indexChanged] = merge(a.model_index, b.model_index);
|
|
400
|
-
const [completion_metadata, completion_metadataChanged] = merge(a.completion_metadata, b.completion_metadata, CompletionMetadata.merged);
|
|
401
|
-
if (deltaChanged ||
|
|
402
|
-
finish_reasonChanged ||
|
|
403
|
-
logprobsChanged ||
|
|
404
|
-
weightChanged ||
|
|
405
|
-
confidenceChanged ||
|
|
406
|
-
errorChanged ||
|
|
407
|
-
modelChanged ||
|
|
408
|
-
model_indexChanged ||
|
|
409
|
-
completion_metadataChanged) {
|
|
1819
|
+
if (baseChanged || errorChanged) {
|
|
410
1820
|
return [
|
|
411
|
-
Object.assign(Object.assign(
|
|
412
|
-
finish_reason,
|
|
413
|
-
index }, (logprobs !== undefined ? { logprobs } : {})), (weight !== undefined ? { weight } : {})), (confidence !== undefined ? { confidence } : {})), (error !== undefined ? { error } : {})), (model !== undefined ? { model } : {})), (model_index !== undefined ? { model_index } : {})), (completion_metadata !== undefined
|
|
414
|
-
? { completion_metadata }
|
|
415
|
-
: {})),
|
|
1821
|
+
Object.assign(Object.assign({ index }, base), (error !== undefined ? { error } : {})),
|
|
416
1822
|
true,
|
|
417
1823
|
];
|
|
418
1824
|
}
|
|
@@ -420,52 +1826,78 @@ export var Score;
|
|
|
420
1826
|
return [a, false];
|
|
421
1827
|
}
|
|
422
1828
|
}
|
|
423
|
-
|
|
1829
|
+
ChatCompletionChunk.merged = merged;
|
|
424
1830
|
function mergedList(a, b) {
|
|
425
1831
|
let merged = undefined;
|
|
426
|
-
for (const
|
|
427
|
-
const existingIndex = a.findIndex(({ index }) => index ===
|
|
1832
|
+
for (const chunk of b) {
|
|
1833
|
+
const existingIndex = a.findIndex(({ index }) => index === chunk.index);
|
|
428
1834
|
if (existingIndex === -1) {
|
|
429
1835
|
if (merged === undefined) {
|
|
430
|
-
merged = [...a,
|
|
1836
|
+
merged = [...a, chunk];
|
|
431
1837
|
}
|
|
432
1838
|
else {
|
|
433
|
-
merged.push(
|
|
1839
|
+
merged.push(chunk);
|
|
434
1840
|
}
|
|
435
1841
|
}
|
|
436
1842
|
else {
|
|
437
|
-
const [
|
|
438
|
-
if (
|
|
1843
|
+
const [mergedChunk, chunkChanged] = ChatCompletionChunk.merged(a[existingIndex], chunk);
|
|
1844
|
+
if (chunkChanged) {
|
|
439
1845
|
if (merged === undefined) {
|
|
440
1846
|
merged = [...a];
|
|
441
1847
|
}
|
|
442
|
-
merged[existingIndex] =
|
|
1848
|
+
merged[existingIndex] = mergedChunk;
|
|
443
1849
|
}
|
|
444
1850
|
}
|
|
445
1851
|
}
|
|
446
1852
|
return merged ? [merged, true] : [a, false];
|
|
447
1853
|
}
|
|
448
|
-
|
|
449
|
-
})(
|
|
450
|
-
|
|
451
|
-
|
|
1854
|
+
ChatCompletionChunk.mergedList = mergedList;
|
|
1855
|
+
})(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
|
|
1856
|
+
Streaming.VectorCompletionChunkSchema = z
|
|
1857
|
+
.object({
|
|
1858
|
+
id: z
|
|
1859
|
+
.string()
|
|
1860
|
+
.describe("The unique identifier of the vector completion."),
|
|
1861
|
+
completions: z
|
|
1862
|
+
.array(Streaming.ChatCompletionChunkSchema)
|
|
1863
|
+
.describe("The list of chat completion chunks created for this vector completion."),
|
|
1864
|
+
votes: Response.VotesSchema,
|
|
1865
|
+
scores: Response.ScoresSchema,
|
|
1866
|
+
weights: Response.WeightsSchema,
|
|
1867
|
+
created: z
|
|
1868
|
+
.uint32()
|
|
1869
|
+
.describe("The Unix timestamp (in seconds) when the vector completion was created."),
|
|
1870
|
+
ensemble: Response.EnsembleSchema,
|
|
1871
|
+
object: z.literal("vector.completion.chunk"),
|
|
1872
|
+
usage: Response.UsageSchema.optional(),
|
|
1873
|
+
})
|
|
1874
|
+
.describe("A chunk in a streaming vector completion response.");
|
|
1875
|
+
let VectorCompletionChunk;
|
|
1876
|
+
(function (VectorCompletionChunk) {
|
|
452
1877
|
function merged(a, b) {
|
|
453
|
-
const
|
|
454
|
-
const [
|
|
455
|
-
const [
|
|
456
|
-
const [
|
|
457
|
-
const [
|
|
458
|
-
const
|
|
459
|
-
const
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
1878
|
+
const id = a.id;
|
|
1879
|
+
const [completions, completionsChanged] = ChatCompletionChunk.mergedList(a.completions, b.completions);
|
|
1880
|
+
const [votes, votesChanged] = Vote.mergedList(a.votes, b.votes);
|
|
1881
|
+
const [scores, scoresChanged] = Scores.merged(a.scores, b.scores);
|
|
1882
|
+
const [weights, weightsChanged] = Weights.merged(a.weights, b.weights);
|
|
1883
|
+
const created = a.created;
|
|
1884
|
+
const ensemble = a.ensemble;
|
|
1885
|
+
const object = a.object;
|
|
1886
|
+
const [usage, usageChanged] = merge(a.usage, b.usage);
|
|
1887
|
+
if (completionsChanged ||
|
|
1888
|
+
votesChanged ||
|
|
1889
|
+
scoresChanged ||
|
|
1890
|
+
weightsChanged ||
|
|
1891
|
+
usageChanged) {
|
|
467
1892
|
return [
|
|
468
|
-
Object.assign(
|
|
1893
|
+
Object.assign({ id,
|
|
1894
|
+
completions,
|
|
1895
|
+
votes,
|
|
1896
|
+
scores,
|
|
1897
|
+
weights,
|
|
1898
|
+
created,
|
|
1899
|
+
ensemble,
|
|
1900
|
+
object }, (usage !== undefined ? { usage } : {})),
|
|
469
1901
|
true,
|
|
470
1902
|
];
|
|
471
1903
|
}
|
|
@@ -473,91 +1905,840 @@ export var Score;
|
|
|
473
1905
|
return [a, false];
|
|
474
1906
|
}
|
|
475
1907
|
}
|
|
476
|
-
|
|
477
|
-
})(
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
Object.assign(Object.assign(Object.assign({ id,
|
|
493
|
-
created,
|
|
494
|
-
model }, (service_tier !== undefined ? { service_tier } : {})), (system_fingerprint !== undefined
|
|
495
|
-
? { system_fingerprint }
|
|
496
|
-
: {})), (usage !== undefined ? { usage } : {})),
|
|
497
|
-
true,
|
|
498
|
-
];
|
|
1908
|
+
VectorCompletionChunk.merged = merged;
|
|
1909
|
+
})(VectorCompletionChunk = Streaming.VectorCompletionChunk || (Streaming.VectorCompletionChunk = {}));
|
|
1910
|
+
let Scores;
|
|
1911
|
+
(function (Scores) {
|
|
1912
|
+
function merged(a, b) {
|
|
1913
|
+
if (a.length === b.length) {
|
|
1914
|
+
for (let i = 0; i < a.length; i++) {
|
|
1915
|
+
if (a[i] !== b[i]) {
|
|
1916
|
+
return [b, true];
|
|
1917
|
+
}
|
|
1918
|
+
}
|
|
1919
|
+
return [a, false];
|
|
1920
|
+
}
|
|
1921
|
+
else {
|
|
1922
|
+
return [b, true];
|
|
1923
|
+
}
|
|
499
1924
|
}
|
|
500
|
-
|
|
501
|
-
|
|
1925
|
+
Scores.merged = merged;
|
|
1926
|
+
})(Scores = Streaming.Scores || (Streaming.Scores = {}));
|
|
1927
|
+
let Weights;
|
|
1928
|
+
(function (Weights) {
|
|
1929
|
+
function merged(a, b) {
|
|
1930
|
+
return Scores.merged(a, b);
|
|
502
1931
|
}
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
})(
|
|
1932
|
+
Weights.merged = merged;
|
|
1933
|
+
})(Weights = Streaming.Weights || (Streaming.Weights = {}));
|
|
1934
|
+
})(Streaming = Response.Streaming || (Response.Streaming = {}));
|
|
1935
|
+
let Unary;
|
|
1936
|
+
(function (Unary) {
|
|
1937
|
+
Unary.ChatCompletionSchema = Chat.Completions.Response.Unary.ChatCompletionSchema.extend({
|
|
1938
|
+
index: z
|
|
1939
|
+
.uint32()
|
|
1940
|
+
.describe("The index of the completion amongst all chat completions."),
|
|
1941
|
+
error: ObjectiveAIErrorSchema.optional().describe("An error encountered during the generation of this chat completion."),
|
|
1942
|
+
}).describe("A chat completion generated in the pursuit of a vector completion.");
|
|
1943
|
+
Unary.VectorCompletionSchema = z
|
|
1944
|
+
.object({
|
|
1945
|
+
id: z
|
|
1946
|
+
.string()
|
|
1947
|
+
.describe("The unique identifier of the vector completion."),
|
|
1948
|
+
completions: z
|
|
1949
|
+
.array(Unary.ChatCompletionSchema)
|
|
1950
|
+
.describe("The list of chat completions created for this vector completion."),
|
|
1951
|
+
votes: Response.VotesSchema,
|
|
1952
|
+
scores: Response.ScoresSchema,
|
|
1953
|
+
weights: Response.WeightsSchema,
|
|
1954
|
+
created: z
|
|
1955
|
+
.uint32()
|
|
1956
|
+
.describe("The Unix timestamp (in seconds) when the vector completion was created."),
|
|
1957
|
+
ensemble: Response.EnsembleSchema,
|
|
1958
|
+
object: z.literal("vector.completion"),
|
|
1959
|
+
usage: Response.UsageSchema,
|
|
1960
|
+
})
|
|
1961
|
+
.describe("A unary vector completion response.");
|
|
1962
|
+
})(Unary = Response.Unary || (Response.Unary = {}));
|
|
506
1963
|
})(Response = Completions.Response || (Completions.Response = {}));
|
|
507
|
-
async function list(openai, listOptions, options) {
|
|
508
|
-
const response = await openai.get("/score/completions", Object.assign({ query: listOptions }, options));
|
|
509
|
-
return response;
|
|
510
|
-
}
|
|
511
|
-
Completions.list = list;
|
|
512
|
-
async function publish(openai, id, options) {
|
|
513
|
-
await openai.post(`/score/completions/${id}/publish`, options);
|
|
514
|
-
}
|
|
515
|
-
Completions.publish = publish;
|
|
516
|
-
async function retrieve(openai, id, options) {
|
|
517
|
-
const response = await openai.get(`/score/completions/${id}`, options);
|
|
518
|
-
return response;
|
|
519
|
-
}
|
|
520
|
-
Completions.retrieve = retrieve;
|
|
521
|
-
async function trainingTableAdd(openai, id, correctVote, options) {
|
|
522
|
-
await openai.post(`/score/completions/${id}/training_table`, Object.assign({ body: { correct_vote: correctVote } }, options));
|
|
523
|
-
}
|
|
524
|
-
Completions.trainingTableAdd = trainingTableAdd;
|
|
525
|
-
async function trainingTableDelete(openai, id, options) {
|
|
526
|
-
await openai.delete(`/score/completions/${id}/training_table`, options);
|
|
527
|
-
}
|
|
528
|
-
Completions.trainingTableDelete = trainingTableDelete;
|
|
529
1964
|
async function create(openai, body, options) {
|
|
530
1965
|
var _a;
|
|
531
|
-
const response = await openai.post("/
|
|
1966
|
+
const response = await openai.post("/vector/completions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
532
1967
|
return response;
|
|
533
1968
|
}
|
|
534
1969
|
Completions.create = create;
|
|
535
|
-
})(Completions =
|
|
536
|
-
})(
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
(function
|
|
1970
|
+
})(Completions = Vector.Completions || (Vector.Completions = {}));
|
|
1971
|
+
})(Vector || (Vector = {}));
|
|
1972
|
+
// Function
|
|
1973
|
+
export const FunctionSchema = z
|
|
1974
|
+
.discriminatedUnion("type", [Function.ScalarSchema, Function.VectorSchema])
|
|
1975
|
+
.describe("A function.");
|
|
1976
|
+
export var Function;
|
|
1977
|
+
(function (Function_1) {
|
|
1978
|
+
Function_1.VectorCompletionProfileSchema = z
|
|
1979
|
+
.object({
|
|
1980
|
+
ensemble: Vector.Completions.Request.EnsembleSchema,
|
|
1981
|
+
profile: Vector.Completions.Request.ProfileSchema,
|
|
1982
|
+
})
|
|
1983
|
+
.describe("A vector completion profile containing an Ensemble and array of weights.");
|
|
1984
|
+
Function_1.FunctionProfileVersionRequiredSchema = z
|
|
1985
|
+
.union([
|
|
1986
|
+
z.object({
|
|
1987
|
+
function_author: z
|
|
1988
|
+
.string()
|
|
1989
|
+
.describe("The author of the function the profile was published to."),
|
|
1990
|
+
function_id: z
|
|
1991
|
+
.string()
|
|
1992
|
+
.describe("The unique identifier of the function the profile was published to."),
|
|
1993
|
+
author: z.string().describe("The author of the profile."),
|
|
1994
|
+
id: z.string().describe("The unique identifier of the profile."),
|
|
1995
|
+
version: z.uint32().describe("The version of the profile."),
|
|
1996
|
+
}),
|
|
1997
|
+
z.lazy(() => z.array(Function_1.ProfileVersionRequiredSchema)),
|
|
1998
|
+
])
|
|
1999
|
+
.describe("A function profile where remote profiles must specify a version.");
|
|
2000
|
+
Function_1.FunctionProfileVersionOptionalSchema = z
|
|
2001
|
+
.union([
|
|
2002
|
+
z.object({
|
|
2003
|
+
function_author: z
|
|
2004
|
+
.string()
|
|
2005
|
+
.describe("The author of the function the profile was published to."),
|
|
2006
|
+
function_id: z
|
|
2007
|
+
.string()
|
|
2008
|
+
.describe("The unique identifier of the function the profile was published to."),
|
|
2009
|
+
author: z.string().describe("The author of the profile."),
|
|
2010
|
+
id: z.string().describe("The unique identifier of the profile."),
|
|
2011
|
+
version: z
|
|
2012
|
+
.uint32()
|
|
2013
|
+
.optional()
|
|
2014
|
+
.nullable()
|
|
2015
|
+
.describe("The version of the profile."),
|
|
2016
|
+
}),
|
|
2017
|
+
z.lazy(() => z.array(Function_1.ProfileVersionOptionalSchema)),
|
|
2018
|
+
])
|
|
2019
|
+
.describe("A function profile where remote profiles may omit a version.");
|
|
2020
|
+
Function_1.ProfileVersionRequiredSchema = z
|
|
2021
|
+
.union([
|
|
2022
|
+
Function_1.FunctionProfileVersionRequiredSchema,
|
|
2023
|
+
Function_1.VectorCompletionProfileSchema,
|
|
2024
|
+
])
|
|
2025
|
+
.describe("A profile where remote function profiles must specify a version.");
|
|
2026
|
+
Function_1.ProfileVersionOptionalSchema = z
|
|
2027
|
+
.union([
|
|
2028
|
+
Function_1.FunctionProfileVersionOptionalSchema,
|
|
2029
|
+
Function_1.VectorCompletionProfileSchema,
|
|
2030
|
+
])
|
|
2031
|
+
.describe("A profile where remote function profiles may omit a version.");
|
|
2032
|
+
Function_1.InputSchemaSchema = z.lazy(() => z
|
|
2033
|
+
.union([
|
|
2034
|
+
InputSchema.ObjectSchema,
|
|
2035
|
+
InputSchema.ArraySchema,
|
|
2036
|
+
InputSchema.StringSchema,
|
|
2037
|
+
InputSchema.NumberSchema,
|
|
2038
|
+
InputSchema.IntegerSchema,
|
|
2039
|
+
InputSchema.BooleanSchema,
|
|
2040
|
+
InputSchema.ImageSchema,
|
|
2041
|
+
InputSchema.AudioSchema,
|
|
2042
|
+
InputSchema.VideoSchema,
|
|
2043
|
+
InputSchema.FileSchema,
|
|
2044
|
+
])
|
|
2045
|
+
.describe("An input schema defining the structure of function inputs."));
|
|
2046
|
+
let InputSchema;
|
|
2047
|
+
(function (InputSchema) {
|
|
2048
|
+
InputSchema.ObjectSchema = z
|
|
2049
|
+
.object({
|
|
2050
|
+
type: z.literal("object"),
|
|
2051
|
+
description: z
|
|
2052
|
+
.string()
|
|
2053
|
+
.optional()
|
|
2054
|
+
.nullable()
|
|
2055
|
+
.describe("The description of the object input."),
|
|
2056
|
+
properties: z
|
|
2057
|
+
.record(z.string(), Function_1.InputSchemaSchema)
|
|
2058
|
+
.describe("The properties of the object input."),
|
|
2059
|
+
required: z
|
|
2060
|
+
.array(z.string())
|
|
2061
|
+
.optional()
|
|
2062
|
+
.nullable()
|
|
2063
|
+
.describe("The required properties of the object input."),
|
|
2064
|
+
})
|
|
2065
|
+
.describe("An object input schema.");
|
|
2066
|
+
InputSchema.ArraySchema = z
|
|
2067
|
+
.object({
|
|
2068
|
+
type: z.literal("array"),
|
|
2069
|
+
description: z
|
|
2070
|
+
.string()
|
|
2071
|
+
.optional()
|
|
2072
|
+
.nullable()
|
|
2073
|
+
.describe("The description of the array input."),
|
|
2074
|
+
minItems: z
|
|
2075
|
+
.uint32()
|
|
2076
|
+
.optional()
|
|
2077
|
+
.nullable()
|
|
2078
|
+
.describe("The minimum number of items in the array input."),
|
|
2079
|
+
maxItems: z
|
|
2080
|
+
.uint32()
|
|
2081
|
+
.optional()
|
|
2082
|
+
.nullable()
|
|
2083
|
+
.describe("The maximum number of items in the array input."),
|
|
2084
|
+
items: Function_1.InputSchemaSchema.describe("The schema of the items in the array input."),
|
|
2085
|
+
})
|
|
2086
|
+
.describe("An array input schema.");
|
|
2087
|
+
InputSchema.StringSchema = z
|
|
2088
|
+
.object({
|
|
2089
|
+
type: z.literal("string"),
|
|
2090
|
+
description: z
|
|
2091
|
+
.string()
|
|
2092
|
+
.optional()
|
|
2093
|
+
.nullable()
|
|
2094
|
+
.describe("The description of the string input."),
|
|
2095
|
+
enum: z
|
|
2096
|
+
.array(z.string())
|
|
2097
|
+
.optional()
|
|
2098
|
+
.nullable()
|
|
2099
|
+
.describe("The enumeration of allowed string values."),
|
|
2100
|
+
})
|
|
2101
|
+
.describe("A string input schema.");
|
|
2102
|
+
InputSchema.NumberSchema = z
|
|
2103
|
+
.object({
|
|
2104
|
+
type: z.literal("number"),
|
|
2105
|
+
description: z
|
|
2106
|
+
.string()
|
|
2107
|
+
.optional()
|
|
2108
|
+
.nullable()
|
|
2109
|
+
.describe("The description of the number input."),
|
|
2110
|
+
minimum: z
|
|
2111
|
+
.number()
|
|
2112
|
+
.optional()
|
|
2113
|
+
.nullable()
|
|
2114
|
+
.describe("The minimum allowed value for the number input."),
|
|
2115
|
+
maximum: z
|
|
2116
|
+
.number()
|
|
2117
|
+
.optional()
|
|
2118
|
+
.nullable()
|
|
2119
|
+
.describe("The maximum allowed value for the number input."),
|
|
2120
|
+
})
|
|
2121
|
+
.describe("A number input schema.");
|
|
2122
|
+
InputSchema.IntegerSchema = z
|
|
2123
|
+
.object({
|
|
2124
|
+
type: z.literal("integer"),
|
|
2125
|
+
description: z
|
|
2126
|
+
.string()
|
|
2127
|
+
.optional()
|
|
2128
|
+
.nullable()
|
|
2129
|
+
.describe("The description of the integer input."),
|
|
2130
|
+
minimum: z
|
|
2131
|
+
.uint32()
|
|
2132
|
+
.optional()
|
|
2133
|
+
.nullable()
|
|
2134
|
+
.describe("The minimum allowed value for the integer input."),
|
|
2135
|
+
maximum: z
|
|
2136
|
+
.uint32()
|
|
2137
|
+
.optional()
|
|
2138
|
+
.nullable()
|
|
2139
|
+
.describe("The maximum allowed value for the integer input."),
|
|
2140
|
+
})
|
|
2141
|
+
.describe("An integer input schema.");
|
|
2142
|
+
InputSchema.BooleanSchema = z
|
|
2143
|
+
.object({
|
|
2144
|
+
type: z.literal("boolean"),
|
|
2145
|
+
description: z
|
|
2146
|
+
.string()
|
|
2147
|
+
.optional()
|
|
2148
|
+
.nullable()
|
|
2149
|
+
.describe("The description of the boolean input."),
|
|
2150
|
+
})
|
|
2151
|
+
.describe("A boolean input schema.");
|
|
2152
|
+
InputSchema.ImageSchema = z
|
|
2153
|
+
.object({
|
|
2154
|
+
type: z.literal("image"),
|
|
2155
|
+
description: z
|
|
2156
|
+
.string()
|
|
2157
|
+
.optional()
|
|
2158
|
+
.nullable()
|
|
2159
|
+
.describe("The description of the image input."),
|
|
2160
|
+
})
|
|
2161
|
+
.describe("An image input schema.");
|
|
2162
|
+
InputSchema.AudioSchema = z
|
|
2163
|
+
.object({
|
|
2164
|
+
type: z.literal("audio"),
|
|
2165
|
+
description: z
|
|
2166
|
+
.string()
|
|
2167
|
+
.optional()
|
|
2168
|
+
.nullable()
|
|
2169
|
+
.describe("The description of the audio input."),
|
|
2170
|
+
})
|
|
2171
|
+
.describe("An audio input schema.");
|
|
2172
|
+
InputSchema.VideoSchema = z
|
|
2173
|
+
.object({
|
|
2174
|
+
type: z.literal("video"),
|
|
2175
|
+
description: z
|
|
2176
|
+
.string()
|
|
2177
|
+
.optional()
|
|
2178
|
+
.nullable()
|
|
2179
|
+
.describe("The description of the video input."),
|
|
2180
|
+
})
|
|
2181
|
+
.describe("A video input schema.");
|
|
2182
|
+
InputSchema.FileSchema = z
|
|
2183
|
+
.object({
|
|
2184
|
+
type: z.literal("file"),
|
|
2185
|
+
description: z
|
|
2186
|
+
.string()
|
|
2187
|
+
.optional()
|
|
2188
|
+
.nullable()
|
|
2189
|
+
.describe("The description of the file input."),
|
|
2190
|
+
})
|
|
2191
|
+
.describe("A file input schema.");
|
|
2192
|
+
})(InputSchema = Function_1.InputSchema || (Function_1.InputSchema = {}));
|
|
2193
|
+
Function_1.InputSchema_ = z
|
|
2194
|
+
.lazy(() => z.union([
|
|
2195
|
+
Message.RichContent.PartSchema,
|
|
2196
|
+
z.record(z.string(), Function_1.InputSchema_),
|
|
2197
|
+
z.array(Function_1.InputSchema_),
|
|
2198
|
+
z.string(),
|
|
2199
|
+
z.number(),
|
|
2200
|
+
z.boolean(),
|
|
2201
|
+
]))
|
|
2202
|
+
.describe("The input provided to the function.");
|
|
2203
|
+
Function_1.InputExpressionSchema = z.lazy(() => z
|
|
2204
|
+
.union([
|
|
2205
|
+
Message.RichContent.PartSchema,
|
|
2206
|
+
z.record(z.string(), Function_1.InputExpressionSchema),
|
|
2207
|
+
z.array(Function_1.InputExpressionSchema),
|
|
2208
|
+
z.string(),
|
|
2209
|
+
z.number(),
|
|
2210
|
+
z.boolean(),
|
|
2211
|
+
ExpressionSchema.describe("An expression which evaluates to an input."),
|
|
2212
|
+
])
|
|
2213
|
+
.describe(Function_1.InputSchema_.description));
|
|
2214
|
+
Function_1.InputMapsExpressionSchema = z
|
|
2215
|
+
.union([
|
|
2216
|
+
ExpressionSchema.describe("An expression which evaluates to a 2D array of Inputs."),
|
|
2217
|
+
z
|
|
2218
|
+
.array(ExpressionSchema.describe("An expression which evaluates to a 1D array of Inputs."))
|
|
2219
|
+
.describe("A list of expressions which each evaluate to a 1D array of Inputs."),
|
|
2220
|
+
])
|
|
2221
|
+
.describe("An expression or list of expressions which evaluate to a 2D array of Inputs. Each sub-array will be fed into Tasks which specify an index of this input map.");
|
|
2222
|
+
Function_1.TaskExpressionSchema = z
|
|
2223
|
+
.discriminatedUnion("type", [
|
|
2224
|
+
TaskExpression.ScalarFunctionSchema,
|
|
2225
|
+
TaskExpression.VectorFunctionSchema,
|
|
2226
|
+
TaskExpression.VectorCompletionSchema,
|
|
2227
|
+
])
|
|
2228
|
+
.describe("A task to be executed as part of the function. Will first be compiled using the parent function's input. May be skipped or mapped.");
|
|
2229
|
+
let TaskExpression;
|
|
2230
|
+
(function (TaskExpression) {
|
|
2231
|
+
TaskExpression.SkipSchema = ExpressionSchema.describe("An expression which evaluates to a boolean indicating whether to skip this task.");
|
|
2232
|
+
TaskExpression.MapSchema = z
|
|
2233
|
+
.uint32()
|
|
2234
|
+
.describe("If present, indicates that this task should be ran once for each entry in the specified input map (input map is a 2D array indexed by this value).");
|
|
2235
|
+
TaskExpression.ScalarFunctionSchema = z
|
|
2236
|
+
.object({
|
|
2237
|
+
type: z.literal("scalar.function"),
|
|
2238
|
+
author: z
|
|
2239
|
+
.string()
|
|
2240
|
+
.describe("The author of the remote published scalar function."),
|
|
2241
|
+
id: z
|
|
2242
|
+
.string()
|
|
2243
|
+
.describe("The unique identifier of the remote published scalar function."),
|
|
2244
|
+
version: z
|
|
2245
|
+
.uint32()
|
|
2246
|
+
.describe("The version of the remote published scalar function."),
|
|
2247
|
+
skip: TaskExpression.SkipSchema.optional().nullable(),
|
|
2248
|
+
map: TaskExpression.MapSchema.optional().nullable(),
|
|
2249
|
+
input: Function_1.InputExpressionSchema,
|
|
2250
|
+
})
|
|
2251
|
+
.describe("A remote published scalar function task.");
|
|
2252
|
+
TaskExpression.VectorFunctionSchema = z
|
|
2253
|
+
.object({
|
|
2254
|
+
type: z.literal("vector.function"),
|
|
2255
|
+
author: z
|
|
2256
|
+
.string()
|
|
2257
|
+
.describe("The author of the remote published vector function."),
|
|
2258
|
+
id: z
|
|
2259
|
+
.string()
|
|
2260
|
+
.describe("The unique identifier of the remote published vector function."),
|
|
2261
|
+
version: z
|
|
2262
|
+
.uint32()
|
|
2263
|
+
.describe("The version of the remote published vector function."),
|
|
2264
|
+
skip: TaskExpression.SkipSchema.optional().nullable(),
|
|
2265
|
+
map: TaskExpression.MapSchema.optional().nullable(),
|
|
2266
|
+
input: Function_1.InputExpressionSchema,
|
|
2267
|
+
})
|
|
2268
|
+
.describe("A remote published vector function task.");
|
|
2269
|
+
TaskExpression.VectorCompletionSchema = z
|
|
2270
|
+
.object({
|
|
2271
|
+
type: z.literal("vector.completion"),
|
|
2272
|
+
skip: TaskExpression.SkipSchema.optional().nullable(),
|
|
2273
|
+
map: TaskExpression.MapSchema.optional().nullable(),
|
|
2274
|
+
messages: MessagesExpressionSchema,
|
|
2275
|
+
tools: ToolsExpressionSchema.optional()
|
|
2276
|
+
.nullable()
|
|
2277
|
+
.describe(`${ToolsExpressionSchema.description} These are readonly and will only be useful for explaining prior tool calls or otherwise influencing behavior.`),
|
|
2278
|
+
responses: VectorResponsesExpressionSchema,
|
|
2279
|
+
})
|
|
2280
|
+
.describe("A vector completion task.");
|
|
2281
|
+
})(TaskExpression = Function_1.TaskExpression || (Function_1.TaskExpression = {}));
|
|
2282
|
+
Function_1.TaskExpressionsSchema = z
|
|
2283
|
+
.array(Function_1.TaskExpressionSchema)
|
|
2284
|
+
.describe("The list of tasks to be executed as part of the function.");
|
|
2285
|
+
Function_1.ScalarSchema = z
|
|
2286
|
+
.object({
|
|
2287
|
+
type: z.literal("scalar.function"),
|
|
2288
|
+
author: z.string().describe("The author of the scalar function."),
|
|
2289
|
+
id: z.string().describe("The unique identifier of the scalar function."),
|
|
2290
|
+
version: z.uint32().describe("The version of the scalar function."),
|
|
2291
|
+
description: z
|
|
2292
|
+
.string()
|
|
2293
|
+
.describe("The description of the scalar function."),
|
|
2294
|
+
changelog: z
|
|
2295
|
+
.string()
|
|
2296
|
+
.optional()
|
|
2297
|
+
.nullable()
|
|
2298
|
+
.describe("When present, describes changes from the previous version or versions."),
|
|
2299
|
+
input_schema: Function_1.InputSchemaSchema,
|
|
2300
|
+
input_maps: Function_1.InputMapsExpressionSchema.optional().nullable(),
|
|
2301
|
+
tasks: Function_1.TaskExpressionsSchema,
|
|
2302
|
+
output: ExpressionSchema.describe("An expression which evaluates to a single number. This is the output of the scalar function. Will be provided with the outputs of all tasks."),
|
|
2303
|
+
})
|
|
2304
|
+
.describe("A scalar function.");
|
|
2305
|
+
Function_1.VectorSchema = z
|
|
2306
|
+
.object({
|
|
2307
|
+
type: z.literal("vector.function"),
|
|
2308
|
+
author: z.string().describe("The author of the vector function."),
|
|
2309
|
+
id: z.string().describe("The unique identifier of the vector function."),
|
|
2310
|
+
version: z.uint32().describe("The version of the vector function."),
|
|
2311
|
+
description: z
|
|
2312
|
+
.string()
|
|
2313
|
+
.describe("The description of the vector function."),
|
|
2314
|
+
changelog: z
|
|
2315
|
+
.string()
|
|
2316
|
+
.optional()
|
|
2317
|
+
.nullable()
|
|
2318
|
+
.describe("When present, describes changes from the previous version or versions."),
|
|
2319
|
+
input_schema: Function_1.InputSchemaSchema,
|
|
2320
|
+
input_maps: Function_1.InputMapsExpressionSchema.optional().nullable(),
|
|
2321
|
+
tasks: Function_1.TaskExpressionsSchema,
|
|
2322
|
+
output: ExpressionSchema.describe("An expressions which evaluates to an array of numbers. This is the output of the vector function. Will be provided with the outputs of all tasks."),
|
|
2323
|
+
output_length: z
|
|
2324
|
+
.union([
|
|
2325
|
+
z.uint32().describe("The fixed length of the output vector."),
|
|
2326
|
+
ExpressionSchema.describe("An expression which evaluates to the length of the output vector. Will only be provided with the function input. The output length must be determinable from the input alone."),
|
|
2327
|
+
])
|
|
2328
|
+
.describe("The length of the output vector."),
|
|
2329
|
+
})
|
|
2330
|
+
.describe("A vector function.");
|
|
2331
|
+
let Executions;
|
|
2332
|
+
(function (Executions) {
|
|
2333
|
+
let Request;
|
|
2334
|
+
(function (Request) {
|
|
2335
|
+
Request.FunctionExecutionParamsBaseSchema = z
|
|
2336
|
+
.object({
|
|
2337
|
+
retry_token: z
|
|
2338
|
+
.string()
|
|
2339
|
+
.optional()
|
|
2340
|
+
.nullable()
|
|
2341
|
+
.describe("The retry token provided by a previous incomplete or failed function execution."),
|
|
2342
|
+
input: Function_1.InputSchema_,
|
|
2343
|
+
provider: Chat.Completions.Request.ProviderSchema.optional().nullable(),
|
|
2344
|
+
seed: Chat.Completions.Request.SeedSchema.optional().nullable(),
|
|
2345
|
+
backoff_max_elapsed_time: Chat.Completions.Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
|
|
2346
|
+
first_chunk_timeout: Chat.Completions.Request.FirstChunkTimeoutSchema.optional().nullable(),
|
|
2347
|
+
other_chunk_timeout: Chat.Completions.Request.OtherChunkTimeoutSchema.optional().nullable(),
|
|
2348
|
+
})
|
|
2349
|
+
.describe("Base parameters for executing a function.");
|
|
2350
|
+
// Execute Inline Function
|
|
2351
|
+
Request.FunctionExecutionParamsExecuteInlineBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
|
|
2352
|
+
function: FunctionSchema,
|
|
2353
|
+
profile: Function_1.FunctionProfileVersionOptionalSchema,
|
|
2354
|
+
}).describe("Base parameters for executing an inline function.");
|
|
2355
|
+
Request.FunctionExecutionParamsExecuteInlineStreamingSchema = Request.FunctionExecutionParamsExecuteInlineBaseSchema.extend({
|
|
2356
|
+
stream: Chat.Completions.Request.StreamTrueSchema,
|
|
2357
|
+
}).describe("Parameters for executing an inline function and streaming the response.");
|
|
2358
|
+
Request.FunctionExecutionParamsExecuteInlineNonStreamingSchema = Request.FunctionExecutionParamsExecuteInlineBaseSchema.extend({
|
|
2359
|
+
stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
|
|
2360
|
+
}).describe("Parameters for executing an inline function with a unary response.");
|
|
2361
|
+
Request.FunctionExecutionParamsExecuteInlineSchema = z
|
|
2362
|
+
.union([
|
|
2363
|
+
Request.FunctionExecutionParamsExecuteInlineStreamingSchema,
|
|
2364
|
+
Request.FunctionExecutionParamsExecuteInlineNonStreamingSchema,
|
|
2365
|
+
])
|
|
2366
|
+
.describe("Parameters for executing an inline function.");
|
|
2367
|
+
// Execute Published Function
|
|
2368
|
+
Request.FunctionExecutionParamsExecuteBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
|
|
2369
|
+
profile: Function_1.FunctionProfileVersionOptionalSchema.optional().nullable(),
|
|
2370
|
+
}).describe("Base parameters for executing a remote published function.");
|
|
2371
|
+
Request.FunctionExecutionParamsExecuteStreamingSchema = Request.FunctionExecutionParamsExecuteBaseSchema.extend({
|
|
2372
|
+
stream: Chat.Completions.Request.StreamTrueSchema,
|
|
2373
|
+
}).describe("Parameters for executing a remote published function and streaming the response.");
|
|
2374
|
+
Request.FunctionExecutionParamsExecuteNonStreamingSchema = Request.FunctionExecutionParamsExecuteBaseSchema.extend({
|
|
2375
|
+
stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
|
|
2376
|
+
}).describe("Parameters for executing a remote published function with a unary response.");
|
|
2377
|
+
Request.FunctionExecutionParamsExecuteSchema = z
|
|
2378
|
+
.union([
|
|
2379
|
+
Request.FunctionExecutionParamsExecuteStreamingSchema,
|
|
2380
|
+
Request.FunctionExecutionParamsExecuteNonStreamingSchema,
|
|
2381
|
+
])
|
|
2382
|
+
.describe("Parameters for executing a remote published function.");
|
|
2383
|
+
// Publish Scalar Function
|
|
2384
|
+
Request.FunctionExecutionParamsPublishScalarFunctionBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
|
|
2385
|
+
function: Function_1.ScalarSchema,
|
|
2386
|
+
publish_function: z
|
|
2387
|
+
.object({
|
|
2388
|
+
description: z
|
|
2389
|
+
.string()
|
|
2390
|
+
.describe("The description of the published scalar function."),
|
|
2391
|
+
changelog: z
|
|
2392
|
+
.string()
|
|
2393
|
+
.optional()
|
|
2394
|
+
.nullable()
|
|
2395
|
+
.describe("When present, describes changes from the previous version or versions."),
|
|
2396
|
+
input_schema: Function_1.InputSchemaSchema,
|
|
2397
|
+
})
|
|
2398
|
+
.describe("Details about the scalar function to be published."),
|
|
2399
|
+
profile: Function_1.FunctionProfileVersionRequiredSchema,
|
|
2400
|
+
publish_profile: z
|
|
2401
|
+
.object({
|
|
2402
|
+
id: z
|
|
2403
|
+
.literal("default")
|
|
2404
|
+
.describe('The identifier of the profile to publish. Must be "default" when publishing a function.'),
|
|
2405
|
+
version: z
|
|
2406
|
+
.uint32()
|
|
2407
|
+
.describe("The version of the profile to publish. Must match the function's version."),
|
|
2408
|
+
description: z
|
|
2409
|
+
.string()
|
|
2410
|
+
.describe("The description of the published profile."),
|
|
2411
|
+
changelog: z
|
|
2412
|
+
.string()
|
|
2413
|
+
.optional()
|
|
2414
|
+
.nullable()
|
|
2415
|
+
.describe("When present, describes changes from the previous version or versions."),
|
|
2416
|
+
})
|
|
2417
|
+
.describe("Details about the profile to be published."),
|
|
2418
|
+
}).describe("Base parameters for executing and publishing an inline scalar function.");
|
|
2419
|
+
Request.FunctionExecutionParamsPublishScalarFunctionStreamingSchema = Request.FunctionExecutionParamsPublishScalarFunctionBaseSchema.extend({
|
|
2420
|
+
stream: Chat.Completions.Request.StreamTrueSchema,
|
|
2421
|
+
}).describe("Parameters for executing and publishing an inline scalar function and streaming the response.");
|
|
2422
|
+
Request.FunctionExecutionParamsPublishScalarFunctionNonStreamingSchema = Request.FunctionExecutionParamsPublishScalarFunctionBaseSchema.extend({
|
|
2423
|
+
stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
|
|
2424
|
+
}).describe("Parameters for executing and publishing an inline scalar function with a unary response.");
|
|
2425
|
+
Request.FunctionExecutionParamsPublishScalarFunctionSchema = z
|
|
2426
|
+
.union([
|
|
2427
|
+
Request.FunctionExecutionParamsPublishScalarFunctionStreamingSchema,
|
|
2428
|
+
Request.FunctionExecutionParamsPublishScalarFunctionNonStreamingSchema,
|
|
2429
|
+
])
|
|
2430
|
+
.describe("Parameters for executing and publishing an inline scalar function.");
|
|
2431
|
+
// Publish Vector Function
|
|
2432
|
+
Request.FunctionExecutionParamsPublishVectorFunctionBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
|
|
2433
|
+
function: Function_1.VectorSchema,
|
|
2434
|
+
publish_function: z
|
|
2435
|
+
.object({
|
|
2436
|
+
description: z
|
|
2437
|
+
.string()
|
|
2438
|
+
.describe("The description of the published vector function."),
|
|
2439
|
+
changelog: z
|
|
2440
|
+
.string()
|
|
2441
|
+
.optional()
|
|
2442
|
+
.nullable()
|
|
2443
|
+
.describe("When present, describes changes from the previous version or versions."),
|
|
2444
|
+
input_schema: Function_1.InputSchemaSchema,
|
|
2445
|
+
output_length: z
|
|
2446
|
+
.union([
|
|
2447
|
+
z.uint32().describe("The fixed length of the output vector."),
|
|
2448
|
+
ExpressionSchema.describe("An expression which evaluates to the length of the output vector. Will only be provided with the function input. The output length must be determinable from the input alone."),
|
|
2449
|
+
])
|
|
2450
|
+
.describe("The length of the output vector."),
|
|
2451
|
+
})
|
|
2452
|
+
.describe("Details about the vector function to be published."),
|
|
2453
|
+
profile: Function_1.FunctionProfileVersionRequiredSchema,
|
|
2454
|
+
publish_profile: z
|
|
2455
|
+
.object({
|
|
2456
|
+
id: z
|
|
2457
|
+
.literal("default")
|
|
2458
|
+
.describe('The identifier of the profile to publish. Must be "default" when publishing a function.'),
|
|
2459
|
+
version: z
|
|
2460
|
+
.uint32()
|
|
2461
|
+
.describe("The version of the profile to publish. Must match the function's version."),
|
|
2462
|
+
description: z
|
|
2463
|
+
.string()
|
|
2464
|
+
.describe("The description of the published profile."),
|
|
2465
|
+
changelog: z
|
|
2466
|
+
.string()
|
|
2467
|
+
.optional()
|
|
2468
|
+
.nullable()
|
|
2469
|
+
.describe("When present, describes changes from the previous version or versions."),
|
|
2470
|
+
})
|
|
2471
|
+
.describe("Details about the profile to be published."),
|
|
2472
|
+
}).describe("Base parameters for executing and publishing an inline vector function.");
|
|
2473
|
+
Request.FunctionExecutionParamsPublishVectorFunctionStreamingSchema = Request.FunctionExecutionParamsPublishVectorFunctionBaseSchema.extend({
|
|
2474
|
+
stream: Chat.Completions.Request.StreamTrueSchema,
|
|
2475
|
+
}).describe("Parameters for executing and publishing an inline vector function and streaming the response.");
|
|
2476
|
+
Request.FunctionExecutionParamsPublishVectorFunctionNonStreamingSchema = Request.FunctionExecutionParamsPublishVectorFunctionBaseSchema.extend({
|
|
2477
|
+
stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
|
|
2478
|
+
}).describe("Parameters for executing and publishing an inline vector function with a unary response.");
|
|
2479
|
+
Request.FunctionExecutionParamsPublishVectorFunctionSchema = z
|
|
2480
|
+
.union([
|
|
2481
|
+
Request.FunctionExecutionParamsPublishVectorFunctionStreamingSchema,
|
|
2482
|
+
Request.FunctionExecutionParamsPublishVectorFunctionNonStreamingSchema,
|
|
2483
|
+
])
|
|
2484
|
+
.describe("Parameters for executing and publishing an inline vector function.");
|
|
2485
|
+
// Publish Function
|
|
2486
|
+
Request.FunctionExecutionParamsPublishFunctionStreamingSchema = z
|
|
2487
|
+
.union([
|
|
2488
|
+
Request.FunctionExecutionParamsPublishScalarFunctionStreamingSchema,
|
|
2489
|
+
Request.FunctionExecutionParamsPublishVectorFunctionStreamingSchema,
|
|
2490
|
+
])
|
|
2491
|
+
.describe("Parameters for executing and publishing an inline function and streaming the response.");
|
|
2492
|
+
Request.FunctionExecutionParamsPublishFunctionNonStreamingSchema = z
|
|
2493
|
+
.union([
|
|
2494
|
+
Request.FunctionExecutionParamsPublishScalarFunctionNonStreamingSchema,
|
|
2495
|
+
Request.FunctionExecutionParamsPublishVectorFunctionNonStreamingSchema,
|
|
2496
|
+
])
|
|
2497
|
+
.describe("Parameters for executing and publishing an inline function with a unary response.");
|
|
2498
|
+
Request.FunctionExecutionParamsPublishFunctionSchema = z
|
|
2499
|
+
.union([
|
|
2500
|
+
Request.FunctionExecutionParamsPublishScalarFunctionSchema,
|
|
2501
|
+
Request.FunctionExecutionParamsPublishVectorFunctionSchema,
|
|
2502
|
+
])
|
|
2503
|
+
.describe("Parameters for executing and publishing an inline function.");
|
|
2504
|
+
// Publish Profile
|
|
2505
|
+
Request.FunctionExecutionParamsPublishProfileBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
|
|
2506
|
+
profile: z
|
|
2507
|
+
.array(Function_1.ProfileVersionRequiredSchema)
|
|
2508
|
+
.describe("The profile to publish."),
|
|
2509
|
+
publish_profile: z
|
|
2510
|
+
.object({
|
|
2511
|
+
id: z
|
|
2512
|
+
.string()
|
|
2513
|
+
.describe("The unique identifier of the profile to publish."),
|
|
2514
|
+
version: z
|
|
2515
|
+
.uint32()
|
|
2516
|
+
.describe("The version of the profile to publish."),
|
|
2517
|
+
description: z
|
|
2518
|
+
.string()
|
|
2519
|
+
.describe("The description of the published profile."),
|
|
2520
|
+
changelog: z
|
|
2521
|
+
.string()
|
|
2522
|
+
.optional()
|
|
2523
|
+
.nullable()
|
|
2524
|
+
.describe("When present, describes changes from the previous version or versions."),
|
|
2525
|
+
})
|
|
2526
|
+
.describe("Details about the profile to be published."),
|
|
2527
|
+
}).describe("Base parameters for executing a remote published function and publishing a profile.");
|
|
2528
|
+
Request.FunctionExecutionParamsPublishProfileStreamingSchema = Request.FunctionExecutionParamsPublishProfileBaseSchema.extend({
|
|
2529
|
+
stream: Chat.Completions.Request.StreamTrueSchema,
|
|
2530
|
+
}).describe("Parameters for executing a remote published function, publishing a profile, and streaming the response.");
|
|
2531
|
+
Request.FunctionExecutionParamsPublishProfileNonStreamingSchema = Request.FunctionExecutionParamsPublishProfileBaseSchema.extend({
|
|
2532
|
+
stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
|
|
2533
|
+
}).describe("Parameters for executing a remote published function and publishing a profile with a unary response.");
|
|
2534
|
+
Request.FunctionExecutionParamsPublishProfileSchema = z
|
|
2535
|
+
.union([
|
|
2536
|
+
Request.FunctionExecutionParamsPublishProfileStreamingSchema,
|
|
2537
|
+
Request.FunctionExecutionParamsPublishProfileNonStreamingSchema,
|
|
2538
|
+
])
|
|
2539
|
+
.describe("Parameters for executing a remote published function and publishing a profile.");
|
|
2540
|
+
})(Request = Executions.Request || (Executions.Request = {}));
|
|
541
2541
|
let Response;
|
|
542
2542
|
(function (Response) {
|
|
2543
|
+
let Task;
|
|
2544
|
+
(function (Task) {
|
|
2545
|
+
Task.IndexSchema = z
|
|
2546
|
+
.uint32()
|
|
2547
|
+
.describe("The index of the task in the sequence of tasks.");
|
|
2548
|
+
Task.TaskIndexSchema = z
|
|
2549
|
+
.uint32()
|
|
2550
|
+
.describe("The index of the task amongst all mapped and non-skipped compiled tasks. Used internally.");
|
|
2551
|
+
Task.TaskPathSchema = z
|
|
2552
|
+
.array(z.uint32())
|
|
2553
|
+
.describe("The path of this task which may be used to navigate which nested task this is amongst the root functions tasks and sub-tasks.");
|
|
2554
|
+
})(Task = Response.Task || (Response.Task = {}));
|
|
543
2555
|
let Streaming;
|
|
544
2556
|
(function (Streaming) {
|
|
545
|
-
|
|
546
|
-
|
|
2557
|
+
Streaming.TaskChunkSchema = z
|
|
2558
|
+
.union([TaskChunk.FunctionSchema, TaskChunk.VectorCompletionSchema])
|
|
2559
|
+
.describe("A chunk of a task execution.");
|
|
2560
|
+
let TaskChunk;
|
|
2561
|
+
(function (TaskChunk) {
|
|
2562
|
+
function merged(a, b) {
|
|
2563
|
+
if ("scores" in a) {
|
|
2564
|
+
return VectorCompletion.merged(a, b);
|
|
2565
|
+
}
|
|
2566
|
+
else {
|
|
2567
|
+
return Function.merged(a, b);
|
|
2568
|
+
}
|
|
2569
|
+
}
|
|
2570
|
+
TaskChunk.merged = merged;
|
|
2571
|
+
function mergedList(a, b) {
|
|
2572
|
+
let merged = undefined;
|
|
2573
|
+
for (const chunk of b) {
|
|
2574
|
+
const existingIndex = a.findIndex(({ index }) => index === chunk.index);
|
|
2575
|
+
if (existingIndex === -1) {
|
|
2576
|
+
if (merged === undefined) {
|
|
2577
|
+
merged = [...a, chunk];
|
|
2578
|
+
}
|
|
2579
|
+
else {
|
|
2580
|
+
merged.push(chunk);
|
|
2581
|
+
}
|
|
2582
|
+
}
|
|
2583
|
+
else {
|
|
2584
|
+
const [mergedChunk, chunkChanged] = TaskChunk.merged(a[existingIndex], chunk);
|
|
2585
|
+
if (chunkChanged) {
|
|
2586
|
+
if (merged === undefined) {
|
|
2587
|
+
merged = [...a];
|
|
2588
|
+
}
|
|
2589
|
+
merged[existingIndex] = mergedChunk;
|
|
2590
|
+
}
|
|
2591
|
+
}
|
|
2592
|
+
}
|
|
2593
|
+
return merged ? [merged, true] : [a, false];
|
|
2594
|
+
}
|
|
2595
|
+
TaskChunk.mergedList = mergedList;
|
|
2596
|
+
TaskChunk.FunctionSchema = z
|
|
2597
|
+
.lazy(() => Streaming.FunctionExecutionChunkSchema.extend({
|
|
2598
|
+
index: Task.IndexSchema,
|
|
2599
|
+
task_index: Task.TaskIndexSchema,
|
|
2600
|
+
task_path: Task.TaskPathSchema,
|
|
2601
|
+
}))
|
|
2602
|
+
.describe("A chunk of a function execution task.");
|
|
2603
|
+
let Function;
|
|
2604
|
+
(function (Function) {
|
|
2605
|
+
function merged(a, b) {
|
|
2606
|
+
const index = a.index;
|
|
2607
|
+
const task_index = a.task_index;
|
|
2608
|
+
const task_path = a.task_path;
|
|
2609
|
+
const [base, baseChanged] = FunctionExecutionChunk.merged(a, b);
|
|
2610
|
+
if (baseChanged) {
|
|
2611
|
+
return [
|
|
2612
|
+
Object.assign({ index,
|
|
2613
|
+
task_index,
|
|
2614
|
+
task_path }, base),
|
|
2615
|
+
true,
|
|
2616
|
+
];
|
|
2617
|
+
}
|
|
2618
|
+
else {
|
|
2619
|
+
return [a, false];
|
|
2620
|
+
}
|
|
2621
|
+
}
|
|
2622
|
+
Function.merged = merged;
|
|
2623
|
+
})(Function = TaskChunk.Function || (TaskChunk.Function = {}));
|
|
2624
|
+
TaskChunk.VectorCompletionSchema = Vector.Completions.Response.Streaming.VectorCompletionChunkSchema.extend({
|
|
2625
|
+
index: Task.IndexSchema,
|
|
2626
|
+
task_index: Task.TaskIndexSchema,
|
|
2627
|
+
task_path: Task.TaskPathSchema,
|
|
2628
|
+
error: ObjectiveAIErrorSchema.optional().describe("When present, indicates that an error occurred during the vector completion task."),
|
|
2629
|
+
}).describe("A chunk of a vector completion task.");
|
|
2630
|
+
let VectorCompletion;
|
|
2631
|
+
(function (VectorCompletion) {
|
|
2632
|
+
function merged(a, b) {
|
|
2633
|
+
const index = a.index;
|
|
2634
|
+
const task_index = a.task_index;
|
|
2635
|
+
const task_path = a.task_path;
|
|
2636
|
+
const [base, baseChanged] = Vector.Completions.Response.Streaming.VectorCompletionChunk.merged(a, b);
|
|
2637
|
+
const [error, errorChanged] = merge(a.error, b.error);
|
|
2638
|
+
if (baseChanged || errorChanged) {
|
|
2639
|
+
return [
|
|
2640
|
+
Object.assign(Object.assign({ index,
|
|
2641
|
+
task_index,
|
|
2642
|
+
task_path }, base), (error !== undefined ? { error } : {})),
|
|
2643
|
+
true,
|
|
2644
|
+
];
|
|
2645
|
+
}
|
|
2646
|
+
else {
|
|
2647
|
+
return [a, false];
|
|
2648
|
+
}
|
|
2649
|
+
}
|
|
2650
|
+
VectorCompletion.merged = merged;
|
|
2651
|
+
})(VectorCompletion = TaskChunk.VectorCompletion || (TaskChunk.VectorCompletion = {}));
|
|
2652
|
+
})(TaskChunk = Streaming.TaskChunk || (Streaming.TaskChunk = {}));
|
|
2653
|
+
Streaming.FunctionExecutionChunkSchema = z
|
|
2654
|
+
.object({
|
|
2655
|
+
id: z
|
|
2656
|
+
.string()
|
|
2657
|
+
.describe("The unique identifier of the function execution."),
|
|
2658
|
+
tasks: z
|
|
2659
|
+
.array(Streaming.TaskChunkSchema)
|
|
2660
|
+
.describe("The tasks executed as part of the function execution."),
|
|
2661
|
+
tasks_errors: z
|
|
2662
|
+
.boolean()
|
|
2663
|
+
.optional()
|
|
2664
|
+
.describe("When true, indicates that one or more tasks encountered errors during execution."),
|
|
2665
|
+
output: z
|
|
2666
|
+
.union([
|
|
2667
|
+
z
|
|
2668
|
+
.number()
|
|
2669
|
+
.describe("The scalar output of the function execution."),
|
|
2670
|
+
z
|
|
2671
|
+
.array(z.number())
|
|
2672
|
+
.describe("The vector output of the function execution."),
|
|
2673
|
+
JsonValueSchema.describe("The erroneous output of the function execution."),
|
|
2674
|
+
])
|
|
2675
|
+
.optional()
|
|
2676
|
+
.describe("The output of the function execution."),
|
|
2677
|
+
error: ObjectiveAIErrorSchema.optional().describe("When present, indicates that an error occurred during the function execution."),
|
|
2678
|
+
retry_token: z
|
|
2679
|
+
.string()
|
|
2680
|
+
.optional()
|
|
2681
|
+
.describe("A token which may be used to retry the function execution."),
|
|
2682
|
+
function_published: z
|
|
2683
|
+
.boolean()
|
|
2684
|
+
.optional()
|
|
2685
|
+
.describe("When true, indicates that a function was published as part of this execution."),
|
|
2686
|
+
profile_published: z
|
|
2687
|
+
.boolean()
|
|
2688
|
+
.optional()
|
|
2689
|
+
.describe("When true, indicates that a profile was published as part of this execution."),
|
|
2690
|
+
created: z
|
|
2691
|
+
.uint32()
|
|
2692
|
+
.describe("The UNIX timestamp (in seconds) when the function execution chunk was created."),
|
|
2693
|
+
function: z
|
|
2694
|
+
.string()
|
|
2695
|
+
.nullable()
|
|
2696
|
+
.describe("The unique identifier of the function being executed."),
|
|
2697
|
+
profile: z
|
|
2698
|
+
.string()
|
|
2699
|
+
.nullable()
|
|
2700
|
+
.describe("The unique identifier of the profile being used."),
|
|
2701
|
+
object: z
|
|
2702
|
+
.enum([
|
|
2703
|
+
"scalar.function.execution.chunk",
|
|
2704
|
+
"vector.function.execution.chunk",
|
|
2705
|
+
])
|
|
2706
|
+
.describe("The object type."),
|
|
2707
|
+
usage: Vector.Completions.Response.UsageSchema.optional(),
|
|
2708
|
+
})
|
|
2709
|
+
.describe("A chunk of a function execution.");
|
|
2710
|
+
let FunctionExecutionChunk;
|
|
2711
|
+
(function (FunctionExecutionChunk) {
|
|
547
2712
|
function merged(a, b) {
|
|
548
2713
|
const id = a.id;
|
|
549
|
-
const [
|
|
2714
|
+
const [tasks, tasksChanged] = TaskChunk.mergedList(a.tasks, b.tasks);
|
|
2715
|
+
const [tasks_errors, tasks_errorsChanged] = merge(a.tasks_errors, b.tasks_errors);
|
|
2716
|
+
const [output, outputChanged] = merge(a.output, b.output);
|
|
2717
|
+
const [error, errorChanged] = merge(a.error, b.error);
|
|
2718
|
+
const [retry_token, retry_tokenChanged] = merge(a.retry_token, b.retry_token);
|
|
2719
|
+
const [function_published, function_publishedChanged] = merge(a.function_published, b.function_published);
|
|
2720
|
+
const [profile_published, profile_publishedChanged] = merge(a.profile_published, b.profile_published);
|
|
550
2721
|
const created = a.created;
|
|
551
|
-
const
|
|
2722
|
+
const function_ = a.function;
|
|
2723
|
+
const profile = a.profile;
|
|
552
2724
|
const object = a.object;
|
|
553
|
-
const [usage, usageChanged] = merge(a.usage, b.usage
|
|
554
|
-
if (
|
|
2725
|
+
const [usage, usageChanged] = merge(a.usage, b.usage);
|
|
2726
|
+
if (tasksChanged ||
|
|
2727
|
+
tasks_errorsChanged ||
|
|
2728
|
+
outputChanged ||
|
|
2729
|
+
errorChanged ||
|
|
2730
|
+
retry_tokenChanged ||
|
|
2731
|
+
function_publishedChanged ||
|
|
2732
|
+
profile_publishedChanged ||
|
|
2733
|
+
usageChanged) {
|
|
555
2734
|
return [
|
|
556
|
-
Object.assign({ id,
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
2735
|
+
Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({ id,
|
|
2736
|
+
tasks }, (tasks_errors !== undefined ? { tasks_errors } : {})), (output !== undefined ? { output } : {})), (error !== undefined ? { error } : {})), (retry_token !== undefined ? { retry_token } : {})), (function_published !== undefined
|
|
2737
|
+
? { function_published }
|
|
2738
|
+
: {})), (profile_published !== undefined
|
|
2739
|
+
? { profile_published }
|
|
2740
|
+
: {})), { created, function: function_, profile,
|
|
2741
|
+
object }), (usage !== undefined ? { usage } : {})),
|
|
561
2742
|
true,
|
|
562
2743
|
];
|
|
563
2744
|
}
|
|
@@ -565,32 +2746,216 @@ export var Multichat;
|
|
|
565
2746
|
return [a, false];
|
|
566
2747
|
}
|
|
567
2748
|
}
|
|
568
|
-
|
|
569
|
-
})(
|
|
570
|
-
|
|
571
|
-
|
|
2749
|
+
FunctionExecutionChunk.merged = merged;
|
|
2750
|
+
})(FunctionExecutionChunk = Streaming.FunctionExecutionChunk || (Streaming.FunctionExecutionChunk = {}));
|
|
2751
|
+
})(Streaming = Response.Streaming || (Response.Streaming = {}));
|
|
2752
|
+
let Unary;
|
|
2753
|
+
(function (Unary) {
|
|
2754
|
+
Unary.TaskSchema = z
|
|
2755
|
+
.union([Task.FunctionSchema, Task.VectorCompletionSchema])
|
|
2756
|
+
.describe("A task execution.");
|
|
2757
|
+
let Task;
|
|
2758
|
+
(function (Task) {
|
|
2759
|
+
Task.FunctionSchema = z
|
|
2760
|
+
.lazy(() => Unary.FunctionExecutionSchema.extend({
|
|
2761
|
+
index: Response.Task.IndexSchema,
|
|
2762
|
+
task_index: Response.Task.TaskIndexSchema,
|
|
2763
|
+
task_path: Response.Task.TaskPathSchema,
|
|
2764
|
+
}))
|
|
2765
|
+
.describe("A chunk of a function execution task.");
|
|
2766
|
+
Task.VectorCompletionSchema = Vector.Completions.Response.Unary.VectorCompletionSchema.extend({
|
|
2767
|
+
index: Response.Task.IndexSchema,
|
|
2768
|
+
task_index: Response.Task.TaskIndexSchema,
|
|
2769
|
+
task_path: Response.Task.TaskPathSchema,
|
|
2770
|
+
error: ObjectiveAIErrorSchema.nullable().describe("When non-null, indicates that an error occurred during the vector completion task."),
|
|
2771
|
+
}).describe("A vector completion task.");
|
|
2772
|
+
})(Task = Unary.Task || (Unary.Task = {}));
|
|
2773
|
+
Unary.FunctionExecutionSchema = z
|
|
2774
|
+
.object({
|
|
2775
|
+
id: z
|
|
2776
|
+
.string()
|
|
2777
|
+
.describe("The unique identifier of the function execution."),
|
|
2778
|
+
tasks: z
|
|
2779
|
+
.array(Unary.TaskSchema)
|
|
2780
|
+
.describe("The tasks executed as part of the function execution."),
|
|
2781
|
+
tasks_errors: z
|
|
2782
|
+
.boolean()
|
|
2783
|
+
.describe("When true, indicates that one or more tasks encountered errors during execution."),
|
|
2784
|
+
output: z
|
|
2785
|
+
.union([
|
|
2786
|
+
z
|
|
2787
|
+
.number()
|
|
2788
|
+
.describe("The scalar output of the function execution."),
|
|
2789
|
+
z
|
|
2790
|
+
.array(z.number())
|
|
2791
|
+
.describe("The vector output of the function execution."),
|
|
2792
|
+
JsonValueSchema.describe("The erroneous output of the function execution."),
|
|
2793
|
+
])
|
|
2794
|
+
.describe("The output of the function execution."),
|
|
2795
|
+
error: ObjectiveAIErrorSchema.nullable().describe("When non-null, indicates that an error occurred during the function execution."),
|
|
2796
|
+
retry_token: z
|
|
2797
|
+
.string()
|
|
2798
|
+
.nullable()
|
|
2799
|
+
.describe("A token which may be used to retry the function execution."),
|
|
2800
|
+
function_published: z
|
|
2801
|
+
.boolean()
|
|
2802
|
+
.optional()
|
|
2803
|
+
.describe("When true, indicates that a function was published as part of this execution."),
|
|
2804
|
+
profile_published: z
|
|
2805
|
+
.boolean()
|
|
2806
|
+
.optional()
|
|
2807
|
+
.describe("When true, indicates that a profile was published as part of this execution."),
|
|
2808
|
+
created: z
|
|
2809
|
+
.uint32()
|
|
2810
|
+
.describe("The UNIX timestamp (in seconds) when the function execution chunk was created."),
|
|
2811
|
+
function: z
|
|
2812
|
+
.string()
|
|
2813
|
+
.nullable()
|
|
2814
|
+
.describe("The unique identifier of the function being executed."),
|
|
2815
|
+
profile: z
|
|
2816
|
+
.string()
|
|
2817
|
+
.nullable()
|
|
2818
|
+
.describe("The unique identifier of the profile being used."),
|
|
2819
|
+
object: z
|
|
2820
|
+
.enum(["scalar.function.execution", "vector.function.execution"])
|
|
2821
|
+
.describe("The object type."),
|
|
2822
|
+
usage: Vector.Completions.Response.UsageSchema,
|
|
2823
|
+
})
|
|
2824
|
+
.describe("A function execution.");
|
|
2825
|
+
})(Unary = Response.Unary || (Response.Unary = {}));
|
|
2826
|
+
})(Response = Executions.Response || (Executions.Response = {}));
|
|
2827
|
+
})(Executions = Function_1.Executions || (Function_1.Executions = {}));
|
|
2828
|
+
let ComputeProfile;
|
|
2829
|
+
(function (ComputeProfile) {
|
|
2830
|
+
let Request;
|
|
2831
|
+
(function (Request) {
|
|
2832
|
+
Request.DatasetItemSchema = z
|
|
2833
|
+
.object({
|
|
2834
|
+
input: Function_1.InputSchema_,
|
|
2835
|
+
target: DatasetItem.TargetSchema,
|
|
2836
|
+
})
|
|
2837
|
+
.describe("A Function input and its corresponding target output.");
|
|
2838
|
+
let DatasetItem;
|
|
2839
|
+
(function (DatasetItem) {
|
|
2840
|
+
DatasetItem.TargetSchema = z
|
|
2841
|
+
.union([
|
|
2842
|
+
Target.ScalarSchema,
|
|
2843
|
+
Target.VectorSchema,
|
|
2844
|
+
Target.VectorWinnerSchema,
|
|
2845
|
+
])
|
|
2846
|
+
.describe("The target output for a given function input.");
|
|
2847
|
+
let Target;
|
|
2848
|
+
(function (Target) {
|
|
2849
|
+
Target.ScalarSchema = z
|
|
2850
|
+
.object({
|
|
2851
|
+
type: z.literal("scalar"),
|
|
2852
|
+
value: z.number(),
|
|
2853
|
+
})
|
|
2854
|
+
.describe("A scalar target output. The desired output is this exact scalar.");
|
|
2855
|
+
Target.VectorSchema = z
|
|
2856
|
+
.object({
|
|
2857
|
+
type: z.literal("vector"),
|
|
2858
|
+
value: z.array(z.number()),
|
|
2859
|
+
})
|
|
2860
|
+
.describe("A vector target output. The desired output is this exact vector.");
|
|
2861
|
+
Target.VectorWinnerSchema = z
|
|
2862
|
+
.object({
|
|
2863
|
+
type: z.literal("vector_winner"),
|
|
2864
|
+
value: z.uint32(),
|
|
2865
|
+
})
|
|
2866
|
+
.describe("A vector winner target output. The desired output is a vector where the highest value is at the specified index.");
|
|
2867
|
+
})(Target = DatasetItem.Target || (DatasetItem.Target = {}));
|
|
2868
|
+
})(DatasetItem = Request.DatasetItem || (Request.DatasetItem = {}));
|
|
2869
|
+
Request.FunctionComputeProfileParamsBaseSchema = z
|
|
2870
|
+
.object({
|
|
2871
|
+
retry_token: z
|
|
2872
|
+
.string()
|
|
2873
|
+
.optional()
|
|
2874
|
+
.nullable()
|
|
2875
|
+
.describe("The retry token provided by a previous incomplete or failed profile computation."),
|
|
2876
|
+
max_retries: z
|
|
2877
|
+
.uint32()
|
|
2878
|
+
.optional()
|
|
2879
|
+
.nullable()
|
|
2880
|
+
.describe("The maximum number of retries to attempt when a function execution fails during profile computation."),
|
|
2881
|
+
n: z
|
|
2882
|
+
.uint32()
|
|
2883
|
+
.describe("The number of function executions to perform per dataset item. Generally speaking, higher N values increase the quality of the computed profile."),
|
|
2884
|
+
dataset: z
|
|
2885
|
+
.array(Request.DatasetItemSchema)
|
|
2886
|
+
.describe("The dataset of input and target output pairs to use for computing the profile."),
|
|
2887
|
+
ensemble: Vector.Completions.Request.EnsembleSchema,
|
|
2888
|
+
provider: Chat.Completions.Request.ProviderSchema.optional().nullable(),
|
|
2889
|
+
seed: Chat.Completions.Request.SeedSchema.optional().nullable(),
|
|
2890
|
+
backoff_max_elapsed_time: Chat.Completions.Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
|
|
2891
|
+
first_chunk_timeout: Chat.Completions.Request.FirstChunkTimeoutSchema.optional().nullable(),
|
|
2892
|
+
other_chunk_timeout: Chat.Completions.Request.OtherChunkTimeoutSchema.optional().nullable(),
|
|
2893
|
+
})
|
|
2894
|
+
.describe("Base parameters for computing a function profile.");
|
|
2895
|
+
Request.FunctionComputeProfileParamsStreamingSchema = Request.FunctionComputeProfileParamsBaseSchema.extend({
|
|
2896
|
+
stream: Chat.Completions.Request.StreamTrueSchema,
|
|
2897
|
+
}).describe("Parameters for computing a function profile and streaming the response.");
|
|
2898
|
+
Request.FunctionComputeProfileParamsNonStreamingSchema = Request.FunctionComputeProfileParamsBaseSchema.extend({
|
|
2899
|
+
stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
|
|
2900
|
+
}).describe("Parameters for computing a function profile with a unary response.");
|
|
2901
|
+
Request.FunctionComputeProfileParamsSchema = z
|
|
2902
|
+
.union([
|
|
2903
|
+
Request.FunctionComputeProfileParamsStreamingSchema,
|
|
2904
|
+
Request.FunctionComputeProfileParamsNonStreamingSchema,
|
|
2905
|
+
])
|
|
2906
|
+
.describe("Parameters for computing a function profile.");
|
|
2907
|
+
})(Request = ComputeProfile.Request || (ComputeProfile.Request = {}));
|
|
2908
|
+
let Response;
|
|
2909
|
+
(function (Response) {
|
|
2910
|
+
Response.FittingStatsSchema = z
|
|
2911
|
+
.object({
|
|
2912
|
+
loss: z
|
|
2913
|
+
.number()
|
|
2914
|
+
.describe("The final sum loss achieved during weights fitting."),
|
|
2915
|
+
executions: z
|
|
2916
|
+
.uint32()
|
|
2917
|
+
.describe("The total number of function executions used during weights fitting."),
|
|
2918
|
+
starts: z
|
|
2919
|
+
.uint32()
|
|
2920
|
+
.describe("The number of fitting starts attempted. Each start begins with a randomized weight vector."),
|
|
2921
|
+
rounds: z
|
|
2922
|
+
.uint32()
|
|
2923
|
+
.describe("The number of fitting rounds performed across all starts."),
|
|
2924
|
+
errors: z
|
|
2925
|
+
.uint32()
|
|
2926
|
+
.describe("The number of errors which occured while computing outputs during fitting."),
|
|
2927
|
+
})
|
|
2928
|
+
.describe("Statistics about the fitting process used to compute the weights for the profile.");
|
|
2929
|
+
let Streaming;
|
|
2930
|
+
(function (Streaming) {
|
|
2931
|
+
Streaming.FunctionExecutionChunkSchema = Executions.Response.Streaming.FunctionExecutionChunkSchema.extend({
|
|
2932
|
+
index: z
|
|
2933
|
+
.uint32()
|
|
2934
|
+
.describe("The index of the function execution chunk in the list of executions."),
|
|
2935
|
+
dataset: z
|
|
2936
|
+
.uint32()
|
|
2937
|
+
.describe("The index of the dataset item this function execution chunk corresponds to."),
|
|
2938
|
+
n: z
|
|
2939
|
+
.uint32()
|
|
2940
|
+
.describe("The N index for this function execution chunk. There will be N function executions, and N comes from the request parameters."),
|
|
2941
|
+
retry: z
|
|
2942
|
+
.uint32()
|
|
2943
|
+
.describe("The retry index for this function execution chunk. There may be multiple retries for a given dataset item and N index."),
|
|
2944
|
+
}).describe("A chunk of a function execution ran during profile computation.");
|
|
2945
|
+
let FunctionExecutionChunk;
|
|
2946
|
+
(function (FunctionExecutionChunk) {
|
|
572
2947
|
function merged(a, b) {
|
|
573
|
-
const [delta, deltaChanged] = merge(a.delta, b.delta, Chat.Completions.Response.Streaming.Delta.merged);
|
|
574
|
-
const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
|
|
575
2948
|
const index = a.index;
|
|
576
|
-
const
|
|
577
|
-
const
|
|
578
|
-
const
|
|
579
|
-
const [
|
|
580
|
-
|
|
581
|
-
if (deltaChanged ||
|
|
582
|
-
finish_reasonChanged ||
|
|
583
|
-
logprobsChanged ||
|
|
584
|
-
errorChanged ||
|
|
585
|
-
modelChanged ||
|
|
586
|
-
model_indexChanged ||
|
|
587
|
-
completion_metadataChanged) {
|
|
2949
|
+
const dataset = a.dataset;
|
|
2950
|
+
const n = a.n;
|
|
2951
|
+
const retry = a.retry;
|
|
2952
|
+
const [base, baseChanged] = Executions.Response.Streaming.FunctionExecutionChunk.merged(a, b);
|
|
2953
|
+
if (baseChanged) {
|
|
588
2954
|
return [
|
|
589
|
-
Object.assign(
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
: {})),
|
|
2955
|
+
Object.assign({ index,
|
|
2956
|
+
dataset,
|
|
2957
|
+
n,
|
|
2958
|
+
retry }, base),
|
|
594
2959
|
true,
|
|
595
2960
|
];
|
|
596
2961
|
}
|
|
@@ -598,205 +2963,322 @@ export var Multichat;
|
|
|
598
2963
|
return [a, false];
|
|
599
2964
|
}
|
|
600
2965
|
}
|
|
601
|
-
|
|
2966
|
+
FunctionExecutionChunk.merged = merged;
|
|
602
2967
|
function mergedList(a, b) {
|
|
603
2968
|
let merged = undefined;
|
|
604
|
-
for (const
|
|
605
|
-
const existingIndex = a.findIndex(({ index }) => index ===
|
|
2969
|
+
for (const chunk of b) {
|
|
2970
|
+
const existingIndex = a.findIndex(({ index }) => index === chunk.index);
|
|
606
2971
|
if (existingIndex === -1) {
|
|
607
2972
|
if (merged === undefined) {
|
|
608
|
-
merged = [...a,
|
|
2973
|
+
merged = [...a, chunk];
|
|
609
2974
|
}
|
|
610
2975
|
else {
|
|
611
|
-
merged.push(
|
|
2976
|
+
merged.push(chunk);
|
|
612
2977
|
}
|
|
613
2978
|
}
|
|
614
2979
|
else {
|
|
615
|
-
const [
|
|
616
|
-
if (
|
|
2980
|
+
const [mergedChunk, chunkChanged] = FunctionExecutionChunk.merged(a[existingIndex], chunk);
|
|
2981
|
+
if (chunkChanged) {
|
|
617
2982
|
if (merged === undefined) {
|
|
618
2983
|
merged = [...a];
|
|
619
2984
|
}
|
|
620
|
-
merged[existingIndex] =
|
|
2985
|
+
merged[existingIndex] = mergedChunk;
|
|
621
2986
|
}
|
|
622
2987
|
}
|
|
623
2988
|
}
|
|
624
2989
|
return merged ? [merged, true] : [a, false];
|
|
625
2990
|
}
|
|
626
|
-
|
|
627
|
-
})(
|
|
2991
|
+
FunctionExecutionChunk.mergedList = mergedList;
|
|
2992
|
+
})(FunctionExecutionChunk = Streaming.FunctionExecutionChunk || (Streaming.FunctionExecutionChunk = {}));
|
|
2993
|
+
Streaming.FunctionComputeProfileChunkSchema = z
|
|
2994
|
+
.object({
|
|
2995
|
+
id: z
|
|
2996
|
+
.string()
|
|
2997
|
+
.describe("The unique identifier of the function profile computation chunk."),
|
|
2998
|
+
executions: z
|
|
2999
|
+
.array(Streaming.FunctionExecutionChunkSchema)
|
|
3000
|
+
.describe("The function executions performed as part of computing the profile."),
|
|
3001
|
+
executions_errors: z
|
|
3002
|
+
.boolean()
|
|
3003
|
+
.optional()
|
|
3004
|
+
.describe("When true, indicates that one or more function executions encountered errors during profile computation."),
|
|
3005
|
+
profile: z
|
|
3006
|
+
.array(Function_1.ProfileVersionRequiredSchema)
|
|
3007
|
+
.optional()
|
|
3008
|
+
.describe("The computed function profile."),
|
|
3009
|
+
fitting_stats: Response.FittingStatsSchema.optional(),
|
|
3010
|
+
created: z
|
|
3011
|
+
.uint32()
|
|
3012
|
+
.describe("The UNIX timestamp (in seconds) when the function profile computation was created."),
|
|
3013
|
+
function: z
|
|
3014
|
+
.string()
|
|
3015
|
+
.describe("The unique identifier of the function for which the profile is being computed."),
|
|
3016
|
+
object: z.literal("function.compute.profile.chunk"),
|
|
3017
|
+
usage: Vector.Completions.Response.UsageSchema.optional(),
|
|
3018
|
+
})
|
|
3019
|
+
.describe("A chunk of a function profile computation.");
|
|
3020
|
+
let FunctionComputeProfileChunk;
|
|
3021
|
+
(function (FunctionComputeProfileChunk) {
|
|
3022
|
+
function merged(a, b) {
|
|
3023
|
+
const id = a.id;
|
|
3024
|
+
const [executions, executionsChanged] = FunctionExecutionChunk.mergedList(a.executions, b.executions);
|
|
3025
|
+
const [executions_errors, executions_errorsChanged] = merge(a.executions_errors, b.executions_errors);
|
|
3026
|
+
const [profile, profileChanged] = merge(a.profile, b.profile);
|
|
3027
|
+
const [fitting_stats, fitting_statsChanged] = merge(a.fitting_stats, b.fitting_stats);
|
|
3028
|
+
const created = a.created;
|
|
3029
|
+
const function_ = a.function;
|
|
3030
|
+
const object = a.object;
|
|
3031
|
+
const [usage, usageChanged] = merge(a.usage, b.usage);
|
|
3032
|
+
if (executionsChanged ||
|
|
3033
|
+
executions_errorsChanged ||
|
|
3034
|
+
profileChanged ||
|
|
3035
|
+
fitting_statsChanged ||
|
|
3036
|
+
usageChanged) {
|
|
3037
|
+
return [
|
|
3038
|
+
Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({ id,
|
|
3039
|
+
executions }, (executions_errors !== undefined
|
|
3040
|
+
? { executions_errors }
|
|
3041
|
+
: {})), (profile !== undefined ? { profile } : {})), (fitting_stats !== undefined ? { fitting_stats } : {})), { created, function: function_, object }), (usage !== undefined ? { usage } : {})),
|
|
3042
|
+
true,
|
|
3043
|
+
];
|
|
3044
|
+
}
|
|
3045
|
+
else {
|
|
3046
|
+
return [a, false];
|
|
3047
|
+
}
|
|
3048
|
+
}
|
|
3049
|
+
FunctionComputeProfileChunk.merged = merged;
|
|
3050
|
+
})(FunctionComputeProfileChunk = Streaming.FunctionComputeProfileChunk || (Streaming.FunctionComputeProfileChunk = {}));
|
|
628
3051
|
})(Streaming = Response.Streaming || (Response.Streaming = {}));
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
3052
|
+
let Unary;
|
|
3053
|
+
(function (Unary) {
|
|
3054
|
+
Unary.FunctionExecutionSchema = Executions.Response.Unary.FunctionExecutionSchema.extend({
|
|
3055
|
+
index: z
|
|
3056
|
+
.uint32()
|
|
3057
|
+
.describe("The index of the function execution in the list of executions."),
|
|
3058
|
+
dataset: z
|
|
3059
|
+
.uint32()
|
|
3060
|
+
.describe("The index of the dataset item this function execution corresponds to."),
|
|
3061
|
+
n: z
|
|
3062
|
+
.uint32()
|
|
3063
|
+
.describe("The N index for this function execution. There will be N function executions, and N comes from the request parameters."),
|
|
3064
|
+
retry: z
|
|
3065
|
+
.uint32()
|
|
3066
|
+
.describe("The retry index for this function execution. There may be multiple retries for a given dataset item and N index."),
|
|
3067
|
+
}).describe("A function execution ran during profile computation.");
|
|
3068
|
+
Unary.FunctionComputeProfileSchema = z
|
|
3069
|
+
.object({
|
|
3070
|
+
id: z
|
|
3071
|
+
.string()
|
|
3072
|
+
.describe("The unique identifier of the function profile computation."),
|
|
3073
|
+
executions: z
|
|
3074
|
+
.array(Unary.FunctionExecutionSchema)
|
|
3075
|
+
.describe("The function executions performed as part of computing the profile."),
|
|
3076
|
+
executions_errors: z
|
|
3077
|
+
.boolean()
|
|
3078
|
+
.describe("When true, indicates that one or more function executions encountered errors during profile computation."),
|
|
3079
|
+
profile: z
|
|
3080
|
+
.array(Function_1.ProfileVersionRequiredSchema)
|
|
3081
|
+
.describe("The computed function profile."),
|
|
3082
|
+
fitting_stats: Response.FittingStatsSchema,
|
|
3083
|
+
created: z
|
|
3084
|
+
.uint32()
|
|
3085
|
+
.describe("The UNIX timestamp (in seconds) when the function profile computation was created."),
|
|
3086
|
+
function: z
|
|
3087
|
+
.string()
|
|
3088
|
+
.describe("The unique identifier of the function for which the profile is being computed."),
|
|
3089
|
+
object: z.literal("function.compute.profile"),
|
|
3090
|
+
usage: Vector.Completions.Response.UsageSchema,
|
|
3091
|
+
})
|
|
3092
|
+
.describe("A function profile computation.");
|
|
3093
|
+
})(Unary = Response.Unary || (Response.Unary = {}));
|
|
3094
|
+
})(Response = ComputeProfile.Response || (ComputeProfile.Response = {}));
|
|
3095
|
+
})(ComputeProfile = Function_1.ComputeProfile || (Function_1.ComputeProfile = {}));
|
|
3096
|
+
let Profile;
|
|
3097
|
+
(function (Profile) {
|
|
3098
|
+
Profile.ListItemSchema = z.object({
|
|
3099
|
+
function_author: z
|
|
3100
|
+
.string()
|
|
3101
|
+
.describe("The author of the function the profile was published to."),
|
|
3102
|
+
function_id: z
|
|
3103
|
+
.string()
|
|
3104
|
+
.describe("The unique identifier of the function the profile was published to."),
|
|
3105
|
+
author: z.string().describe("The author of the profile."),
|
|
3106
|
+
id: z.string().describe("The unique identifier of the profile."),
|
|
3107
|
+
version: z.uint32().describe("The version of the profile."),
|
|
3108
|
+
});
|
|
3109
|
+
async function list(openai, options) {
|
|
3110
|
+
const response = await openai.get("/functions/profiles", options);
|
|
641
3111
|
return response;
|
|
642
3112
|
}
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
3113
|
+
Profile.list = list;
|
|
3114
|
+
Profile.RetrieveItemSchema = z.object({
|
|
3115
|
+
created: z
|
|
3116
|
+
.uint32()
|
|
3117
|
+
.describe("The UNIX timestamp (in seconds) when the profile was created."),
|
|
3118
|
+
shape: z
|
|
3119
|
+
.string()
|
|
3120
|
+
.describe("The shape of the profile. Unless Task Skip expressions work out favorably, profiles only work for functions with the same shape."),
|
|
3121
|
+
function_author: z
|
|
3122
|
+
.string()
|
|
3123
|
+
.describe("The author of the function the profile was published to."),
|
|
3124
|
+
function_id: z
|
|
3125
|
+
.string()
|
|
3126
|
+
.describe("The unique identifier of the function the profile was published to."),
|
|
3127
|
+
author: z.string().describe("The author of the profile."),
|
|
3128
|
+
id: z.string().describe("The unique identifier of the profile."),
|
|
3129
|
+
version: z.uint32().describe("The version of the profile."),
|
|
3130
|
+
profile: z
|
|
3131
|
+
.array(Function.ProfileVersionRequiredSchema)
|
|
3132
|
+
.describe("The function profile."),
|
|
3133
|
+
});
|
|
3134
|
+
async function retrieve(openai, function_author, function_id, author, id, version, options) {
|
|
3135
|
+
const response = await openai.get(version !== null && version !== undefined
|
|
3136
|
+
? `/functions/${function_author}/${function_id}/profiles/${author}/${id}/${version}`
|
|
3137
|
+
: `/functions/${function_author}/${function_id}/profiles/${author}/${id}`, options);
|
|
647
3138
|
return response;
|
|
648
3139
|
}
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
(
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
3140
|
+
Profile.retrieve = retrieve;
|
|
3141
|
+
Profile.HistoricalUsageSchema = z.object({
|
|
3142
|
+
requests: z
|
|
3143
|
+
.uint32()
|
|
3144
|
+
.describe("The total number of requests made to Functions while using this Profile."),
|
|
3145
|
+
completion_tokens: z
|
|
3146
|
+
.uint32()
|
|
3147
|
+
.describe("The total number of completion tokens generated by Functions while using this Profile."),
|
|
3148
|
+
prompt_tokens: z
|
|
3149
|
+
.uint32()
|
|
3150
|
+
.describe("The total number of prompt tokens sent to Functions while using this Profile."),
|
|
3151
|
+
total_cost: z
|
|
3152
|
+
.number()
|
|
3153
|
+
.describe("The total cost incurred by using this Profile."),
|
|
3154
|
+
});
|
|
3155
|
+
async function retrieveUsage(openai, function_author, function_id, author, id, version, options) {
|
|
3156
|
+
const response = await openai.get(version !== null && version !== undefined
|
|
3157
|
+
? `/functions/${function_author}/${function_id}/profiles/${author}/${id}/${version}/usage`
|
|
3158
|
+
: `/functions/${function_author}/${function_id}/profiles/${author}/${id}/usage`, options);
|
|
658
3159
|
return response;
|
|
659
3160
|
}
|
|
660
|
-
|
|
661
|
-
})(
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
(
|
|
665
|
-
let Response;
|
|
666
|
-
(function (Response) {
|
|
667
|
-
let Streaming;
|
|
668
|
-
(function (Streaming) {
|
|
669
|
-
let CompletionChunk;
|
|
670
|
-
(function (CompletionChunk) {
|
|
671
|
-
let ScoreCompletionChunk;
|
|
672
|
-
(function (ScoreCompletionChunk) {
|
|
673
|
-
function merged(a, b) {
|
|
674
|
-
const [base, baseChanged] = Score.Completions.Response.Streaming.ChatCompletionChunk.merged(a, b);
|
|
675
|
-
return baseChanged
|
|
676
|
-
? [Object.assign(Object.assign({ type: a.type }, base), { index: a.index }), true]
|
|
677
|
-
: [a, false];
|
|
678
|
-
}
|
|
679
|
-
ScoreCompletionChunk.merged = merged;
|
|
680
|
-
})(ScoreCompletionChunk = CompletionChunk.ScoreCompletionChunk || (CompletionChunk.ScoreCompletionChunk = {}));
|
|
681
|
-
let MultichatCompletionChunk;
|
|
682
|
-
(function (MultichatCompletionChunk) {
|
|
683
|
-
function merged(a, b) {
|
|
684
|
-
const [base, baseChanged] = Multichat.Completions.Response.Streaming.ChatCompletionChunk.merged(a, b);
|
|
685
|
-
return baseChanged
|
|
686
|
-
? [Object.assign(Object.assign({ type: a.type }, base), { index: a.index }), true]
|
|
687
|
-
: [a, false];
|
|
688
|
-
}
|
|
689
|
-
MultichatCompletionChunk.merged = merged;
|
|
690
|
-
})(MultichatCompletionChunk = CompletionChunk.MultichatCompletionChunk || (CompletionChunk.MultichatCompletionChunk = {}));
|
|
691
|
-
function mergedList(a, b) {
|
|
692
|
-
let merged = undefined;
|
|
693
|
-
for (const chunk of b) {
|
|
694
|
-
const existingIndex = a.findIndex((c) => c.index === chunk.index && c.type === chunk.type);
|
|
695
|
-
if (existingIndex === -1) {
|
|
696
|
-
if (merged === undefined) {
|
|
697
|
-
merged = [...a, chunk];
|
|
698
|
-
}
|
|
699
|
-
else {
|
|
700
|
-
merged.push(chunk);
|
|
701
|
-
}
|
|
702
|
-
}
|
|
703
|
-
else if (chunk.type === "score") {
|
|
704
|
-
const [mergedChunk, chunkChanged] = ScoreCompletionChunk.merged(a[existingIndex], chunk);
|
|
705
|
-
if (chunkChanged) {
|
|
706
|
-
if (merged === undefined) {
|
|
707
|
-
merged = [...a];
|
|
708
|
-
}
|
|
709
|
-
merged[existingIndex] = mergedChunk;
|
|
710
|
-
}
|
|
711
|
-
}
|
|
712
|
-
else if (chunk.type === "multichat") {
|
|
713
|
-
const [mergedChunk, chunkChanged] = MultichatCompletionChunk.merged(a[existingIndex], chunk);
|
|
714
|
-
if (chunkChanged) {
|
|
715
|
-
if (merged === undefined) {
|
|
716
|
-
merged = [...a];
|
|
717
|
-
}
|
|
718
|
-
merged[existingIndex] = mergedChunk;
|
|
719
|
-
}
|
|
720
|
-
}
|
|
721
|
-
}
|
|
722
|
-
return merged ? [merged, true] : [a, false];
|
|
723
|
-
}
|
|
724
|
-
CompletionChunk.mergedList = mergedList;
|
|
725
|
-
})(CompletionChunk = Streaming.CompletionChunk || (Streaming.CompletionChunk = {}));
|
|
726
|
-
function merged(a, b) {
|
|
727
|
-
const [completions, completionsChanged] = CompletionChunk.mergedList(a.completions, b.completions);
|
|
728
|
-
const [output, outputChanged] = merge(a.output, b.output);
|
|
729
|
-
const [retry_token, retry_tokenChanged] = merge(a.retry_token, b.retry_token);
|
|
730
|
-
const [error, errorChanged] = merge(a.error, b.error);
|
|
731
|
-
const [function_published, function_publishedChanged] = merge(a.function_published, b.function_published);
|
|
732
|
-
if (completionsChanged ||
|
|
733
|
-
outputChanged ||
|
|
734
|
-
retry_tokenChanged ||
|
|
735
|
-
errorChanged ||
|
|
736
|
-
function_publishedChanged) {
|
|
737
|
-
return [
|
|
738
|
-
Object.assign(Object.assign(Object.assign(Object.assign({ completions }, (output !== undefined ? { output } : {})), (retry_token !== undefined ? { retry_token } : {})), (error !== undefined ? { error } : {})), (function_published !== undefined
|
|
739
|
-
? { function_published }
|
|
740
|
-
: {})),
|
|
741
|
-
true,
|
|
742
|
-
];
|
|
743
|
-
}
|
|
744
|
-
else {
|
|
745
|
-
return [a, false];
|
|
746
|
-
}
|
|
747
|
-
}
|
|
748
|
-
Streaming.merged = merged;
|
|
749
|
-
})(Streaming = Response.Streaming || (Response.Streaming = {}));
|
|
750
|
-
})(Response = Functions.Response || (Functions.Response = {}));
|
|
751
|
-
async function list(openai, listOptions, options) {
|
|
752
|
-
const response = await openai.get("/functions", Object.assign({ query: listOptions }, options));
|
|
3161
|
+
Profile.retrieveUsage = retrieveUsage;
|
|
3162
|
+
})(Profile = Function_1.Profile || (Function_1.Profile = {}));
|
|
3163
|
+
async function executeInline(openai, body, options) {
|
|
3164
|
+
var _a;
|
|
3165
|
+
const response = await openai.post("/functions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
753
3166
|
return response;
|
|
754
3167
|
}
|
|
755
|
-
|
|
756
|
-
async function
|
|
757
|
-
|
|
3168
|
+
Function_1.executeInline = executeInline;
|
|
3169
|
+
async function execute(openai, author, id, version, body, options) {
|
|
3170
|
+
var _a;
|
|
3171
|
+
const response = await openai.post(version !== null && version !== undefined
|
|
3172
|
+
? `/functions/${author}/${id}/${version}`
|
|
3173
|
+
: `/functions/${author}/${id}`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
758
3174
|
return response;
|
|
759
3175
|
}
|
|
760
|
-
|
|
761
|
-
async function
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
: `/functions/${author}/${id}`;
|
|
765
|
-
const response = await openai.get(url, Object.assign({ query: retrieveOptions }, options));
|
|
3176
|
+
Function_1.execute = execute;
|
|
3177
|
+
async function publishFunction(openai, author, id, version, body, options) {
|
|
3178
|
+
var _a;
|
|
3179
|
+
const response = await openai.post(`/functions/${author}/${id}/${version}/publish`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
766
3180
|
return response;
|
|
767
3181
|
}
|
|
768
|
-
|
|
769
|
-
async function
|
|
3182
|
+
Function_1.publishFunction = publishFunction;
|
|
3183
|
+
async function publishProfile(openai, function_author, function_id, body, options) {
|
|
770
3184
|
var _a;
|
|
771
|
-
const
|
|
772
|
-
? `/functions/${author}/${id}/${version}`
|
|
773
|
-
: `/functions/${author}/${id}`;
|
|
774
|
-
const response = await openai.post(url, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
3185
|
+
const response = await openai.post(`/functions/${function_author}/${function_id}/profiles/publish`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
775
3186
|
return response;
|
|
776
3187
|
}
|
|
777
|
-
|
|
778
|
-
async function
|
|
3188
|
+
Function_1.publishProfile = publishProfile;
|
|
3189
|
+
async function computeProfile(openai, author, id, version, body, options) {
|
|
779
3190
|
var _a;
|
|
780
|
-
const response = await openai.post(
|
|
3191
|
+
const response = await openai.post(version !== null && version !== undefined
|
|
3192
|
+
? `/functions/${author}/${id}/${version}/profiles/compute`
|
|
3193
|
+
: `/functions/${author}/${id}/profiles/compute`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
|
|
3194
|
+
return response;
|
|
3195
|
+
}
|
|
3196
|
+
Function_1.computeProfile = computeProfile;
|
|
3197
|
+
Function_1.ListItemSchema = z.object({
|
|
3198
|
+
author: z.string().describe("The author of the function."),
|
|
3199
|
+
id: z.string().describe("The unique identifier of the function."),
|
|
3200
|
+
version: z.uint32().describe("The version of the function."),
|
|
3201
|
+
});
|
|
3202
|
+
async function list(openai, options) {
|
|
3203
|
+
const response = await openai.get("/functions", options);
|
|
781
3204
|
return response;
|
|
782
3205
|
}
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
(
|
|
787
|
-
|
|
788
|
-
|
|
3206
|
+
Function_1.list = list;
|
|
3207
|
+
Function_1.ScalarRetrieveItemSchema = Function_1.ScalarSchema.extend({
|
|
3208
|
+
created: z
|
|
3209
|
+
.uint32()
|
|
3210
|
+
.describe("The UNIX timestamp (in seconds) when the function was created."),
|
|
3211
|
+
shape: z
|
|
3212
|
+
.string()
|
|
3213
|
+
.describe("The shape of the function. Unless Task Skip expressions work out favorably, functions only work with profiles that have the same shape."),
|
|
3214
|
+
});
|
|
3215
|
+
Function_1.VectorRetrieveItemSchema = Function_1.VectorSchema.extend({
|
|
3216
|
+
created: z
|
|
3217
|
+
.uint32()
|
|
3218
|
+
.describe("The UNIX timestamp (in seconds) when the function was created."),
|
|
3219
|
+
shape: z
|
|
3220
|
+
.string()
|
|
3221
|
+
.describe("The shape of the function. Unless Task Skip expressions work out favorably, functions only work with profiles that have the same shape."),
|
|
3222
|
+
});
|
|
3223
|
+
Function_1.RetrieveItemSchema = z.discriminatedUnion("type", [
|
|
3224
|
+
Function_1.ScalarRetrieveItemSchema,
|
|
3225
|
+
Function_1.VectorRetrieveItemSchema,
|
|
3226
|
+
]);
|
|
3227
|
+
async function retrieve(openai, author, id, version, options) {
|
|
3228
|
+
const response = await openai.get(version !== null && version !== undefined
|
|
3229
|
+
? `/functions/${author}/${id}/${version}`
|
|
3230
|
+
: `/functions/${author}/${id}`, options);
|
|
789
3231
|
return response;
|
|
790
3232
|
}
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
3233
|
+
Function_1.retrieve = retrieve;
|
|
3234
|
+
Function_1.HistoricalUsageSchema = z.object({
|
|
3235
|
+
requests: z
|
|
3236
|
+
.uint32()
|
|
3237
|
+
.describe("The total number of requests made to this Function."),
|
|
3238
|
+
completion_tokens: z
|
|
3239
|
+
.uint32()
|
|
3240
|
+
.describe("The total number of completion tokens generated by this Function."),
|
|
3241
|
+
prompt_tokens: z
|
|
3242
|
+
.uint32()
|
|
3243
|
+
.describe("The total number of prompt tokens sent to this Function."),
|
|
3244
|
+
total_cost: z
|
|
3245
|
+
.number()
|
|
3246
|
+
.describe("The total cost incurred by using this Function."),
|
|
3247
|
+
});
|
|
3248
|
+
async function retrieveUsage(openai, author, id, version, options) {
|
|
3249
|
+
const response = await openai.get(version !== null && version !== undefined
|
|
3250
|
+
? `/functions/${author}/${id}/${version}/usage`
|
|
3251
|
+
: `/functions/${author}/${id}/usage`, options);
|
|
794
3252
|
return response;
|
|
795
3253
|
}
|
|
796
|
-
|
|
797
|
-
})(
|
|
3254
|
+
Function_1.retrieveUsage = retrieveUsage;
|
|
3255
|
+
})(Function || (Function = {}));
|
|
798
3256
|
export var Auth;
|
|
799
3257
|
(function (Auth) {
|
|
3258
|
+
Auth.ApiKeySchema = z.object({
|
|
3259
|
+
api_key: z.string().describe("The API key."),
|
|
3260
|
+
created: z
|
|
3261
|
+
.string()
|
|
3262
|
+
.describe("The RFC 3339 timestamp when the API key was created."),
|
|
3263
|
+
expires: z
|
|
3264
|
+
.string()
|
|
3265
|
+
.nullable()
|
|
3266
|
+
.describe("The RFC 3339 timestamp when the API key expires, or null if it does not expire."),
|
|
3267
|
+
disabled: z
|
|
3268
|
+
.string()
|
|
3269
|
+
.nullable()
|
|
3270
|
+
.describe("The RFC 3339 timestamp when the API key was disabled, or null if it is not disabled."),
|
|
3271
|
+
name: z.string().describe("The name of the API key."),
|
|
3272
|
+
description: z
|
|
3273
|
+
.string()
|
|
3274
|
+
.nullable()
|
|
3275
|
+
.describe("The description of the API key, or null if no description was provided."),
|
|
3276
|
+
});
|
|
3277
|
+
Auth.ApiKeyWithCostSchema = Auth.ApiKeySchema.extend({
|
|
3278
|
+
cost: z
|
|
3279
|
+
.number()
|
|
3280
|
+
.describe("The total cost incurred while using this API key."),
|
|
3281
|
+
});
|
|
800
3282
|
let ApiKey;
|
|
801
3283
|
(function (ApiKey) {
|
|
802
3284
|
async function list(openai, options) {
|
|
@@ -821,6 +3303,9 @@ export var Auth;
|
|
|
821
3303
|
}
|
|
822
3304
|
ApiKey.remove = remove;
|
|
823
3305
|
})(ApiKey = Auth.ApiKey || (Auth.ApiKey = {}));
|
|
3306
|
+
Auth.OpenRouterApiKeySchema = z.object({
|
|
3307
|
+
api_key: z.string().describe("The OpenRouter API key."),
|
|
3308
|
+
});
|
|
824
3309
|
let OpenRouterApiKey;
|
|
825
3310
|
(function (OpenRouterApiKey) {
|
|
826
3311
|
async function retrieve(openai, options) {
|
|
@@ -841,6 +3326,15 @@ export var Auth;
|
|
|
841
3326
|
}
|
|
842
3327
|
OpenRouterApiKey.remove = remove;
|
|
843
3328
|
})(OpenRouterApiKey = Auth.OpenRouterApiKey || (Auth.OpenRouterApiKey = {}));
|
|
3329
|
+
Auth.CreditsSchema = z.object({
|
|
3330
|
+
credits: z.number().describe("The current number of credits available."),
|
|
3331
|
+
total_credits_purchased: z
|
|
3332
|
+
.number()
|
|
3333
|
+
.describe("The total number of credits ever purchased."),
|
|
3334
|
+
total_credits_used: z
|
|
3335
|
+
.number()
|
|
3336
|
+
.describe("The total number of credits ever used."),
|
|
3337
|
+
});
|
|
844
3338
|
let Credits;
|
|
845
3339
|
(function (Credits) {
|
|
846
3340
|
async function retrieve(openai, options) {
|
|
@@ -863,106 +3357,6 @@ export var Auth;
|
|
|
863
3357
|
Username.set = set;
|
|
864
3358
|
})(Username = Auth.Username || (Auth.Username = {}));
|
|
865
3359
|
})(Auth || (Auth = {}));
|
|
866
|
-
export var Metadata;
|
|
867
|
-
(function (Metadata) {
|
|
868
|
-
async function get(openai, options) {
|
|
869
|
-
const response = await openai.get("/metadata", options);
|
|
870
|
-
return response;
|
|
871
|
-
}
|
|
872
|
-
Metadata.get = get;
|
|
873
|
-
})(Metadata || (Metadata = {}));
|
|
874
|
-
export var ScoreLlm;
|
|
875
|
-
(function (ScoreLlm) {
|
|
876
|
-
async function list(openai, listOptions, options) {
|
|
877
|
-
const response = await openai.get("/score/llms", Object.assign({ query: listOptions }, options));
|
|
878
|
-
return response;
|
|
879
|
-
}
|
|
880
|
-
ScoreLlm.list = list;
|
|
881
|
-
async function count(openai, options) {
|
|
882
|
-
const response = await openai.get("/score/llms/count", options);
|
|
883
|
-
return response;
|
|
884
|
-
}
|
|
885
|
-
ScoreLlm.count = count;
|
|
886
|
-
async function retrieve(openai, model, retrieveOptions, options) {
|
|
887
|
-
const response = await openai.get(`/score/llms/${model}`, Object.assign({ query: retrieveOptions }, options));
|
|
888
|
-
return response;
|
|
889
|
-
}
|
|
890
|
-
ScoreLlm.retrieve = retrieve;
|
|
891
|
-
async function retrieveValidate(openai, model, retrieveOptions, options) {
|
|
892
|
-
const response = await openai.post("/score/llms", Object.assign({ query: retrieveOptions, body: model }, options));
|
|
893
|
-
return response;
|
|
894
|
-
}
|
|
895
|
-
ScoreLlm.retrieveValidate = retrieveValidate;
|
|
896
|
-
})(ScoreLlm || (ScoreLlm = {}));
|
|
897
|
-
export var MultichatLlm;
|
|
898
|
-
(function (MultichatLlm) {
|
|
899
|
-
async function list(openai, listOptions, options) {
|
|
900
|
-
const response = await openai.get("/multichat/llms", Object.assign({ query: listOptions }, options));
|
|
901
|
-
return response;
|
|
902
|
-
}
|
|
903
|
-
MultichatLlm.list = list;
|
|
904
|
-
async function count(openai, options) {
|
|
905
|
-
const response = await openai.get("/multichat/llms/count", options);
|
|
906
|
-
return response;
|
|
907
|
-
}
|
|
908
|
-
MultichatLlm.count = count;
|
|
909
|
-
async function retrieve(openai, model, retrieveOptions, options) {
|
|
910
|
-
const response = await openai.get(`/multichat/llms/${model}`, Object.assign({ query: retrieveOptions }, options));
|
|
911
|
-
return response;
|
|
912
|
-
}
|
|
913
|
-
MultichatLlm.retrieve = retrieve;
|
|
914
|
-
async function retrieveValidate(openai, model, retrieveOptions, options) {
|
|
915
|
-
const response = await openai.post("/multichat/llms", Object.assign({ query: retrieveOptions, body: model }, options));
|
|
916
|
-
return response;
|
|
917
|
-
}
|
|
918
|
-
MultichatLlm.retrieveValidate = retrieveValidate;
|
|
919
|
-
})(MultichatLlm || (MultichatLlm = {}));
|
|
920
|
-
export var ScoreModel;
|
|
921
|
-
(function (ScoreModel) {
|
|
922
|
-
async function list(openai, listOptions, options) {
|
|
923
|
-
const response = await openai.get("/score/models", Object.assign({ query: listOptions }, options));
|
|
924
|
-
return response;
|
|
925
|
-
}
|
|
926
|
-
ScoreModel.list = list;
|
|
927
|
-
async function count(openai, options) {
|
|
928
|
-
const response = await openai.get("/score/models/count", options);
|
|
929
|
-
return response;
|
|
930
|
-
}
|
|
931
|
-
ScoreModel.count = count;
|
|
932
|
-
async function retrieve(openai, model, retrieveOptions, options) {
|
|
933
|
-
const response = await openai.get(`/score/models/${model}`, Object.assign({ query: retrieveOptions }, options));
|
|
934
|
-
return response;
|
|
935
|
-
}
|
|
936
|
-
ScoreModel.retrieve = retrieve;
|
|
937
|
-
async function retrieveValidate(openai, model, retrieveOptions, options) {
|
|
938
|
-
const response = await openai.post("/score/models", Object.assign({ query: retrieveOptions, body: model }, options));
|
|
939
|
-
return response;
|
|
940
|
-
}
|
|
941
|
-
ScoreModel.retrieveValidate = retrieveValidate;
|
|
942
|
-
})(ScoreModel || (ScoreModel = {}));
|
|
943
|
-
export var MultichatModel;
|
|
944
|
-
(function (MultichatModel) {
|
|
945
|
-
async function list(openai, listOptions, options) {
|
|
946
|
-
const response = await openai.get("/multichat/models", Object.assign({ query: listOptions }, options));
|
|
947
|
-
return response;
|
|
948
|
-
}
|
|
949
|
-
MultichatModel.list = list;
|
|
950
|
-
async function count(openai, options) {
|
|
951
|
-
const response = await openai.get("/multichat/models/count", options);
|
|
952
|
-
return response;
|
|
953
|
-
}
|
|
954
|
-
MultichatModel.count = count;
|
|
955
|
-
async function retrieve(openai, model, retrieveOptions, options) {
|
|
956
|
-
const response = await openai.get(`/multichat/models/${model}`, Object.assign({ query: retrieveOptions }, options));
|
|
957
|
-
return response;
|
|
958
|
-
}
|
|
959
|
-
MultichatModel.retrieve = retrieve;
|
|
960
|
-
async function retrieveValidate(openai, model, retrieveOptions, options) {
|
|
961
|
-
const response = await openai.post("/multichat/models", Object.assign({ query: retrieveOptions, body: model }, options));
|
|
962
|
-
return response;
|
|
963
|
-
}
|
|
964
|
-
MultichatModel.retrieveValidate = retrieveValidate;
|
|
965
|
-
})(MultichatModel || (MultichatModel = {}));
|
|
966
3360
|
function merge(a, b, combine) {
|
|
967
3361
|
if (a !== null && a !== undefined && b !== null && b !== undefined) {
|
|
968
3362
|
return combine ? combine(a, b) : [a, false];
|
|
@@ -983,6 +3377,6 @@ function merge(a, b, combine) {
|
|
|
983
3377
|
function mergedString(a, b) {
|
|
984
3378
|
return b === "" ? [a, false] : [a + b, true];
|
|
985
3379
|
}
|
|
986
|
-
function mergedNumber(a, b) {
|
|
987
|
-
|
|
988
|
-
}
|
|
3380
|
+
// function mergedNumber(a: number, b: number): [number, boolean] {
|
|
3381
|
+
// return b === 0 ? [a, false] : [a + b, true];
|
|
3382
|
+
// }
|