objectiveai 1.1.12 → 1.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (5) hide show
  1. package/LICENSE +21 -21
  2. package/dist/index.cjs +3277 -664
  3. package/dist/index.d.ts +37576 -1201
  4. package/dist/index.js +3273 -663
  5. package/package.json +64 -61
package/dist/index.js CHANGED
@@ -1,117 +1,1434 @@
1
+ import z from "zod";
2
+ // Expressions
3
+ export const ExpressionSchema = z
4
+ .object({
5
+ $jmespath: z.string().describe("A JMESPath expression."),
6
+ })
7
+ .describe("An expression which evaluates to a value.")
8
+ .meta({ title: "Expression" });
9
+ export const JsonValueSchema = z
10
+ .lazy(() => z.union([
11
+ z.null(),
12
+ z.boolean(),
13
+ z.number(),
14
+ z.string(),
15
+ z.array(JsonValueSchema.meta({
16
+ title: "JsonValue",
17
+ recursive: true,
18
+ })),
19
+ z.record(z.string(), JsonValueSchema.meta({
20
+ title: "JsonValue",
21
+ recursive: true,
22
+ })),
23
+ ]))
24
+ .describe("A JSON value.")
25
+ .meta({ title: "JsonValue" });
26
+ export const JsonValueExpressionSchema = z
27
+ .lazy(() => z.union([
28
+ z.null(),
29
+ z.boolean(),
30
+ z.number(),
31
+ z.string(),
32
+ z.array(JsonValueExpressionSchema.meta({
33
+ title: "JsonValueExpression",
34
+ recursive: true,
35
+ })),
36
+ z.record(z.string(), JsonValueExpressionSchema.meta({
37
+ title: "JsonValueExpression",
38
+ recursive: true,
39
+ })),
40
+ ExpressionSchema.describe("An expression which evaluates to a JSON value."),
41
+ ]))
42
+ .describe(JsonValueSchema.description)
43
+ .meta({ title: "JsonValueExpression" });
44
+ // Errors
45
+ export const ObjectiveAIErrorSchema = z
46
+ .object({
47
+ code: z.uint32().describe("The status code of the error."),
48
+ message: z.any().describe("The message or details of the error."),
49
+ })
50
+ .describe("An error returned by the ObjectiveAI API.")
51
+ .meta({ title: "ObjectiveAIError" });
52
+ // Messages
53
+ export var Message;
54
+ (function (Message) {
55
+ let SimpleContent;
56
+ (function (SimpleContent) {
57
+ SimpleContent.TextSchema = z
58
+ .string()
59
+ .describe("Plain text content.")
60
+ .meta({ title: "SimpleContentText" });
61
+ SimpleContent.PartSchema = z
62
+ .object({
63
+ type: z.literal("text"),
64
+ text: z.string().describe("The text content."),
65
+ })
66
+ .describe("A simple content part.")
67
+ .meta({ title: "SimpleContentPart" });
68
+ SimpleContent.PartExpressionSchema = z
69
+ .union([
70
+ SimpleContent.PartSchema,
71
+ ExpressionSchema.describe("An expression which evaluates to a simple content part."),
72
+ ])
73
+ .describe(SimpleContent.PartSchema.description)
74
+ .meta({ title: "SimpleContentPartExpression" });
75
+ SimpleContent.PartsSchema = z
76
+ .array(SimpleContent.PartSchema)
77
+ .describe("An array of simple content parts.")
78
+ .meta({ title: "SimpleContentParts" });
79
+ SimpleContent.PartsExpressionSchema = z
80
+ .array(SimpleContent.PartExpressionSchema)
81
+ .describe(SimpleContent.PartsSchema.description)
82
+ .meta({ title: "SimpleContentPartExpressions" });
83
+ })(SimpleContent = Message.SimpleContent || (Message.SimpleContent = {}));
84
+ Message.SimpleContentSchema = z
85
+ .union([SimpleContent.TextSchema, SimpleContent.PartsSchema])
86
+ .describe("Simple content.")
87
+ .meta({ title: "SimpleContent" });
88
+ Message.SimpleContentExpressionSchema = z
89
+ .union([
90
+ SimpleContent.TextSchema,
91
+ SimpleContent.PartsExpressionSchema,
92
+ ExpressionSchema.describe("An expression which evaluates to simple content."),
93
+ ])
94
+ .describe(Message.SimpleContentSchema.description)
95
+ .meta({ title: "SimpleContentExpression" });
96
+ let RichContent;
97
+ (function (RichContent) {
98
+ RichContent.TextSchema = z
99
+ .string()
100
+ .describe("Plain text content.")
101
+ .meta({ title: "RichContentText" });
102
+ let Part;
103
+ (function (Part) {
104
+ let Text;
105
+ (function (Text) {
106
+ Text.TextSchema = z.string().describe("The text content.");
107
+ })(Text = Part.Text || (Part.Text = {}));
108
+ Part.TextSchema = z
109
+ .object({
110
+ type: z.literal("text"),
111
+ text: Text.TextSchema,
112
+ })
113
+ .describe("A text rich content part.")
114
+ .meta({ title: "TextRichContentPart" });
115
+ let ImageUrl;
116
+ (function (ImageUrl) {
117
+ ImageUrl.DetailSchema = z
118
+ .enum(["auto", "low", "high"])
119
+ .describe("Specifies the detail level of the image.");
120
+ ImageUrl.UrlSchema = z
121
+ .string()
122
+ .describe("Either a URL of the image or the base64 encoded image data.");
123
+ ImageUrl.DefinitionSchema = z
124
+ .object({
125
+ url: ImageUrl.UrlSchema,
126
+ detail: ImageUrl.DetailSchema.optional().nullable(),
127
+ })
128
+ .describe("The URL of the image and its optional detail level.");
129
+ })(ImageUrl = Part.ImageUrl || (Part.ImageUrl = {}));
130
+ Part.ImageUrlSchema = z
131
+ .object({
132
+ type: z.literal("image_url"),
133
+ image_url: ImageUrl.DefinitionSchema,
134
+ })
135
+ .describe("An image rich content part.")
136
+ .meta({ title: "ImageRichContentPart" });
137
+ let InputAudio;
138
+ (function (InputAudio) {
139
+ InputAudio.FormatSchema = z
140
+ .enum(["wav", "mp3"])
141
+ .describe("The format of the encoded audio data.");
142
+ InputAudio.DataSchema = z
143
+ .string()
144
+ .describe("Base64 encoded audio data.");
145
+ InputAudio.DefinitionSchema = z
146
+ .object({
147
+ data: InputAudio.DataSchema,
148
+ format: InputAudio.FormatSchema,
149
+ })
150
+ .describe("The audio data and its format.");
151
+ })(InputAudio = Part.InputAudio || (Part.InputAudio = {}));
152
+ Part.InputAudioSchema = z
153
+ .object({
154
+ type: z.literal("input_audio"),
155
+ input_audio: InputAudio.DefinitionSchema,
156
+ })
157
+ .describe("An audio rich content part.")
158
+ .meta({ title: "AudioRichContentPart" });
159
+ let VideoUrl;
160
+ (function (VideoUrl) {
161
+ VideoUrl.UrlSchema = z.string().describe("URL of the video.");
162
+ VideoUrl.DefinitionSchema = z.object({
163
+ url: VideoUrl.UrlSchema,
164
+ });
165
+ })(VideoUrl = Part.VideoUrl || (Part.VideoUrl = {}));
166
+ Part.VideoUrlSchema = z
167
+ .object({
168
+ type: z.enum(["video_url", "input_video"]),
169
+ video_url: VideoUrl.DefinitionSchema,
170
+ })
171
+ .describe("A video rich content part.")
172
+ .meta({ title: "VideoRichContentPart" });
173
+ let File;
174
+ (function (File) {
175
+ File.FileDataSchema = z
176
+ .string()
177
+ .describe("The base64 encoded file data, used when passing the file to the model as a string.");
178
+ File.FileIdSchema = z
179
+ .string()
180
+ .describe("The ID of an uploaded file to use as input.");
181
+ File.FilenameSchema = z
182
+ .string()
183
+ .describe("The name of the file, used when passing the file to the model as a string.");
184
+ File.FileUrlSchema = z
185
+ .string()
186
+ .describe("The URL of the file, used when passing the file to the model as a URL.");
187
+ File.DefinitionSchema = z
188
+ .object({
189
+ file_data: File.FileDataSchema.optional().nullable(),
190
+ file_id: File.FileIdSchema.optional().nullable(),
191
+ filename: File.FilenameSchema.optional().nullable(),
192
+ file_url: File.FileUrlSchema.optional().nullable(),
193
+ })
194
+ .describe("The file to be used as input, either as base64 data, an uploaded file ID, or a URL.");
195
+ })(File = Part.File || (Part.File = {}));
196
+ Part.FileSchema = z
197
+ .object({
198
+ type: z.literal("file"),
199
+ file: File.DefinitionSchema,
200
+ })
201
+ .describe("A file rich content part.")
202
+ .meta({ title: "FileRichContentPart" });
203
+ })(Part = RichContent.Part || (RichContent.Part = {}));
204
+ RichContent.PartSchema = z
205
+ .discriminatedUnion("type", [
206
+ Part.TextSchema,
207
+ Part.ImageUrlSchema,
208
+ Part.InputAudioSchema,
209
+ Part.VideoUrlSchema,
210
+ Part.FileSchema,
211
+ ])
212
+ .describe("A rich content part.")
213
+ .meta({ title: "RichContentPart" });
214
+ RichContent.PartExpressionSchema = z
215
+ .union([
216
+ RichContent.PartSchema,
217
+ ExpressionSchema.describe("An expression which evaluates to a rich content part."),
218
+ ])
219
+ .describe(RichContent.PartSchema.description)
220
+ .meta({ title: "RichContentPartExpression" });
221
+ RichContent.PartsSchema = z
222
+ .array(RichContent.PartSchema)
223
+ .describe("An array of rich content parts.")
224
+ .meta({ title: "RichContentParts" });
225
+ RichContent.PartsExpressionSchema = z
226
+ .array(RichContent.PartExpressionSchema)
227
+ .describe(RichContent.PartsSchema.description)
228
+ .meta({ title: "RichContentPartExpressions" });
229
+ })(RichContent = Message.RichContent || (Message.RichContent = {}));
230
+ Message.RichContentSchema = z
231
+ .union([RichContent.TextSchema, RichContent.PartsSchema])
232
+ .describe("Rich content.")
233
+ .meta({ title: "RichContent" });
234
+ Message.RichContentExpressionSchema = z
235
+ .union([
236
+ RichContent.TextSchema,
237
+ RichContent.PartsExpressionSchema,
238
+ ExpressionSchema.describe("An expression which evaluates to rich content."),
239
+ ])
240
+ .describe(Message.RichContentSchema.description)
241
+ .meta({ title: "RichContentExpression" });
242
+ Message.NameSchema = z
243
+ .string()
244
+ .describe("An optional name for the participant. Provides the model information to differentiate between participants of the same role.")
245
+ .meta({ title: "MessageName" });
246
+ Message.NameExpressionSchema = z
247
+ .union([
248
+ Message.NameSchema,
249
+ ExpressionSchema.describe("An expression which evaluates to a string."),
250
+ ])
251
+ .describe(Message.NameSchema.description)
252
+ .meta({ title: "MessageNameExpression" });
253
+ Message.DeveloperSchema = z
254
+ .object({
255
+ role: z.literal("developer"),
256
+ content: Message.SimpleContentSchema,
257
+ name: Message.NameSchema.optional().nullable(),
258
+ })
259
+ .describe("Developer-provided instructions that the model should follow, regardless of messages sent by the user.")
260
+ .meta({ title: "DeveloperMessage" });
261
+ Message.DeveloperExpressionSchema = z
262
+ .object({
263
+ role: z.literal("developer"),
264
+ content: Message.SimpleContentExpressionSchema,
265
+ name: Message.NameExpressionSchema.optional().nullable(),
266
+ })
267
+ .describe(Message.DeveloperSchema.description)
268
+ .meta({ title: "DeveloperMessageExpression" });
269
+ Message.SystemSchema = z
270
+ .object({
271
+ role: z.literal("system"),
272
+ content: Message.SimpleContentSchema,
273
+ name: Message.NameSchema.optional().nullable(),
274
+ })
275
+ .describe("Developer-provided instructions that the model should follow, regardless of messages sent by the user.")
276
+ .meta({ title: "SystemMessage" });
277
+ Message.SystemExpressionSchema = z
278
+ .object({
279
+ role: z.literal("system"),
280
+ content: Message.SimpleContentExpressionSchema,
281
+ name: Message.NameExpressionSchema.optional().nullable(),
282
+ })
283
+ .describe(Message.SystemSchema.description)
284
+ .meta({ title: "SystemMessageExpression" });
285
+ Message.UserSchema = z
286
+ .object({
287
+ role: z.literal("user"),
288
+ content: Message.RichContentSchema,
289
+ name: Message.NameSchema.optional().nullable(),
290
+ })
291
+ .describe("Messages sent by an end user, containing prompts or additional context information.")
292
+ .meta({ title: "UserMessage" });
293
+ Message.UserExpressionSchema = z
294
+ .object({
295
+ role: z.literal("user"),
296
+ content: Message.RichContentExpressionSchema,
297
+ name: Message.NameExpressionSchema.optional().nullable(),
298
+ })
299
+ .describe(Message.UserSchema.description)
300
+ .meta({ title: "UserMessageExpression" });
301
+ let Tool;
302
+ (function (Tool) {
303
+ Tool.ToolCallIdSchema = z
304
+ .string()
305
+ .describe("The ID of the tool call that this message is responding to.")
306
+ .meta({ title: "ToolMessageToolCallId" });
307
+ Tool.ToolCallIdExpressionSchema = z
308
+ .union([
309
+ Tool.ToolCallIdSchema,
310
+ ExpressionSchema.describe("An expression which evaluates to a string."),
311
+ ])
312
+ .describe(Tool.ToolCallIdSchema.description)
313
+ .meta({ title: "ToolMessageToolCallIdExpression" });
314
+ })(Tool = Message.Tool || (Message.Tool = {}));
315
+ Message.ToolSchema = z
316
+ .object({
317
+ role: z.literal("tool"),
318
+ content: Message.RichContentSchema,
319
+ tool_call_id: Tool.ToolCallIdSchema,
320
+ })
321
+ .describe("Messages sent by tools in response to tool calls made by the assistant.")
322
+ .meta({ title: "ToolMessage" });
323
+ Message.ToolExpressionSchema = z
324
+ .object({
325
+ role: z.literal("tool"),
326
+ content: Message.RichContentExpressionSchema,
327
+ tool_call_id: Tool.ToolCallIdExpressionSchema,
328
+ })
329
+ .describe(Message.ToolSchema.description)
330
+ .meta({ title: "ToolMessageExpression" });
331
+ let Assistant;
332
+ (function (Assistant) {
333
+ Assistant.RefusalSchema = z
334
+ .string()
335
+ .describe("The refusal message by the assistant.")
336
+ .meta({ title: "AssistantMessageRefusal" });
337
+ Assistant.RefusalExpressionSchema = z
338
+ .union([
339
+ Assistant.RefusalSchema,
340
+ ExpressionSchema.describe("An expression which evaluates to a string."),
341
+ ])
342
+ .describe(Assistant.RefusalSchema.description)
343
+ .meta({ title: "AssistantMessageRefusalExpression" });
344
+ Assistant.ReasoningSchema = z
345
+ .string()
346
+ .describe("The reasoning provided by the assistant.")
347
+ .meta({ title: "AssistantMessageReasoning" });
348
+ Assistant.ReasoningExpressionSchema = z
349
+ .union([
350
+ Assistant.ReasoningSchema,
351
+ ExpressionSchema.describe("An expression which evaluates to a string."),
352
+ ])
353
+ .describe(Assistant.ReasoningSchema.description)
354
+ .meta({ title: "AssistantMessageReasoningExpression" });
355
+ let ToolCall;
356
+ (function (ToolCall) {
357
+ ToolCall.IdSchema = z
358
+ .string()
359
+ .describe("The unique identifier for the tool call.")
360
+ .meta({ title: "AssistantMessageToolCallId" });
361
+ ToolCall.IdExpressionSchema = z
362
+ .union([
363
+ ToolCall.IdSchema,
364
+ ExpressionSchema.describe("An expression which evaluates to a string."),
365
+ ])
366
+ .describe(ToolCall.IdSchema.description)
367
+ .meta({ title: "AssistantMessageToolCallIdExpression" });
368
+ let Function;
369
+ (function (Function) {
370
+ Function.NameSchema = z
371
+ .string()
372
+ .describe("The name of the function called.")
373
+ .meta({ title: "AssistantMessageToolCallFunctionName" });
374
+ Function.NameExpressionSchema = z
375
+ .union([
376
+ Function.NameSchema,
377
+ ExpressionSchema.describe("An expression which evaluates to a string."),
378
+ ])
379
+ .describe(Function.NameSchema.description)
380
+ .meta({ title: "AssistantMessageToolCallFunctionNameExpression" });
381
+ Function.ArgumentsSchema = z
382
+ .string()
383
+ .describe("The arguments passed to the function.")
384
+ .meta({ title: "AssistantMessageToolCallFunctionArguments" });
385
+ Function.ArgumentsExpressionSchema = z
386
+ .union([
387
+ Function.ArgumentsSchema,
388
+ ExpressionSchema.describe("An expression which evaluates to a string."),
389
+ ])
390
+ .describe(Function.ArgumentsSchema.description)
391
+ .meta({
392
+ title: "AssistantMessageToolCallFunctionArgumentsExpression",
393
+ });
394
+ Function.DefinitionSchema = z
395
+ .object({
396
+ name: Function.NameSchema,
397
+ arguments: Function.ArgumentsSchema,
398
+ })
399
+ .describe("The name and arguments of the function called.")
400
+ .meta({ title: "AssistantMessageToolCallFunctionDefinition" });
401
+ Function.DefinitionExpressionSchema = z
402
+ .object({
403
+ name: Function.NameExpressionSchema,
404
+ arguments: Function.ArgumentsExpressionSchema,
405
+ })
406
+ .describe(Function.DefinitionSchema.description)
407
+ .meta({
408
+ title: "AssistantMessageToolCallFunctionDefinitionExpression",
409
+ });
410
+ })(Function = ToolCall.Function || (ToolCall.Function = {}));
411
+ ToolCall.FunctionSchema = z
412
+ .object({
413
+ type: z.literal("function"),
414
+ id: ToolCall.IdSchema,
415
+ function: Function.DefinitionSchema,
416
+ })
417
+ .describe("A function tool call made by the assistant.")
418
+ .meta({ title: "AssistantMessageToolCallFunction" });
419
+ ToolCall.FunctionExpressionSchema = z
420
+ .object({
421
+ type: z.literal("function"),
422
+ id: ToolCall.IdExpressionSchema,
423
+ function: Function.DefinitionExpressionSchema,
424
+ })
425
+ .describe(ToolCall.FunctionSchema.description)
426
+ .meta({ title: "AssistantMessageToolCallFunctionExpression" });
427
+ })(ToolCall = Assistant.ToolCall || (Assistant.ToolCall = {}));
428
+ Assistant.ToolCallSchema = z
429
+ .union([ToolCall.FunctionSchema])
430
+ .describe("A tool call made by the assistant.")
431
+ .meta({ title: "AssistantMessageToolCall" });
432
+ Assistant.ToolCallExpressionSchema = z
433
+ .union([
434
+ ToolCall.FunctionExpressionSchema,
435
+ ExpressionSchema.describe("An expression which evaluates to a tool call."),
436
+ ])
437
+ .describe(Assistant.ToolCallSchema.description)
438
+ .meta({ title: "AssistantMessageToolCallExpression" });
439
+ Assistant.ToolCallsSchema = z
440
+ .array(Assistant.ToolCallSchema)
441
+ .describe("Tool calls made by the assistant.")
442
+ .meta({ title: "AssistantMessageToolCalls" });
443
+ Assistant.ToolCallsExpressionSchema = z
444
+ .union([
445
+ z
446
+ .array(Assistant.ToolCallExpressionSchema)
447
+ .describe(Assistant.ToolCallsSchema.description),
448
+ ExpressionSchema.describe("An expression which evaluates to an array of tool calls."),
449
+ ])
450
+ .describe(Assistant.ToolCallsSchema.description)
451
+ .meta({ title: "AssistantMessageToolCallsExpression" });
452
+ })(Assistant = Message.Assistant || (Message.Assistant = {}));
453
+ Message.AssistantSchema = z
454
+ .object({
455
+ role: z.literal("assistant"),
456
+ content: Message.RichContentSchema.optional().nullable(),
457
+ name: Message.NameSchema.optional().nullable(),
458
+ refusal: Assistant.RefusalSchema.optional().nullable(),
459
+ tool_calls: Assistant.ToolCallsSchema.optional().nullable(),
460
+ reasoning: Assistant.ReasoningSchema.optional().nullable(),
461
+ })
462
+ .describe("Messages sent by the model in response to user messages.")
463
+ .meta({ title: "AssistantMessage" });
464
+ Message.AssistantExpressionSchema = z
465
+ .object({
466
+ role: z.literal("assistant"),
467
+ content: Message.RichContentExpressionSchema.optional().nullable(),
468
+ name: Message.NameExpressionSchema.optional().nullable(),
469
+ refusal: Assistant.RefusalExpressionSchema.optional().nullable(),
470
+ tool_calls: Assistant.ToolCallsExpressionSchema.optional().nullable(),
471
+ reasoning: Assistant.ReasoningExpressionSchema.optional().nullable(),
472
+ })
473
+ .describe(Message.AssistantSchema.description)
474
+ .meta({ title: "AssistantMessageExpression" });
475
+ })(Message || (Message = {}));
476
+ export const MessageSchema = z
477
+ .discriminatedUnion("role", [
478
+ Message.DeveloperSchema,
479
+ Message.SystemSchema,
480
+ Message.UserSchema,
481
+ Message.ToolSchema,
482
+ Message.AssistantSchema,
483
+ ])
484
+ .describe("A message exchanged in a chat conversation.")
485
+ .meta({ title: "Message" });
486
+ export const MessageExpressionSchema = z
487
+ .union([
488
+ z
489
+ .discriminatedUnion("role", [
490
+ Message.DeveloperExpressionSchema,
491
+ Message.SystemExpressionSchema,
492
+ Message.UserExpressionSchema,
493
+ Message.ToolExpressionSchema,
494
+ Message.AssistantExpressionSchema,
495
+ ])
496
+ .describe(MessageSchema.description),
497
+ ExpressionSchema.describe("An expression which evaluates to a message."),
498
+ ])
499
+ .describe(MessageSchema.description)
500
+ .meta({ title: "MessageExpression" });
501
+ export const MessagesSchema = z
502
+ .array(MessageSchema)
503
+ .describe("A list of messages exchanged in a chat conversation.")
504
+ .meta({ title: "Messages" });
505
+ export const MessagesExpressionSchema = z
506
+ .union([
507
+ z
508
+ .array(MessageExpressionSchema)
509
+ .describe(MessagesSchema.description)
510
+ .meta({ title: "MessageExpressions" }),
511
+ ExpressionSchema.describe("An expression which evaluates to an array of messages."),
512
+ ])
513
+ .describe(MessagesSchema.description)
514
+ .meta({ title: "MessagesExpression" });
515
+ // Tools
516
+ export var Tool;
517
+ (function (Tool) {
518
+ let Function;
519
+ (function (Function) {
520
+ Function.NameSchema = z
521
+ .string()
522
+ .describe("The name of the function.")
523
+ .meta({ title: "FunctionToolName" });
524
+ Function.NameExpressionSchema = z
525
+ .union([
526
+ Function.NameSchema,
527
+ ExpressionSchema.describe("An expression which evaluates to a string."),
528
+ ])
529
+ .describe(Function.NameSchema.description)
530
+ .meta({ title: "FunctionToolNameExpression" });
531
+ Function.DescriptionSchema = z
532
+ .string()
533
+ .describe("The description of the function.")
534
+ .meta({ title: "FunctionToolDescription" });
535
+ Function.DescriptionExpressionSchema = z
536
+ .union([
537
+ Function.DescriptionSchema,
538
+ ExpressionSchema.describe("An expression which evaluates to a string."),
539
+ ])
540
+ .describe(Function.DescriptionSchema.description)
541
+ .meta({ title: "FunctionToolDescriptionExpression" });
542
+ Function.ParametersSchema = z
543
+ .record(z.string(), JsonValueSchema)
544
+ .describe("The JSON schema defining the parameters of the function.")
545
+ .meta({ title: "FunctionToolParameters" });
546
+ Function.ParametersExpressionSchema = z
547
+ .union([
548
+ z.record(z.string(), JsonValueExpressionSchema),
549
+ ExpressionSchema.describe("An expression which evaluates to a JSON schema object."),
550
+ ])
551
+ .describe(Function.ParametersSchema.description)
552
+ .meta({ title: "FunctionToolParametersExpression" });
553
+ Function.StrictSchema = z
554
+ .boolean()
555
+ .describe("Whether to enforce strict adherence to the parameter schema.")
556
+ .meta({ title: "FunctionToolStrict" });
557
+ Function.StrictExpressionSchema = z
558
+ .union([
559
+ Function.StrictSchema,
560
+ ExpressionSchema.describe("An expression which evaluates to a boolean."),
561
+ ])
562
+ .describe(Function.StrictSchema.description)
563
+ .meta({ title: "FunctionToolStrictExpression" });
564
+ Function.DefinitionSchema = z
565
+ .object({
566
+ name: Function.NameSchema,
567
+ description: Function.DescriptionSchema.optional().nullable(),
568
+ parameters: Function.ParametersSchema.optional().nullable(),
569
+ strict: Function.StrictSchema.optional().nullable(),
570
+ })
571
+ .describe("The definition of a function tool.")
572
+ .meta({ title: "FunctionToolDefinition" });
573
+ Function.DefinitionExpressionSchema = z
574
+ .object({
575
+ name: Function.NameExpressionSchema,
576
+ description: Function.DescriptionExpressionSchema.optional().nullable(),
577
+ parameters: Function.ParametersExpressionSchema.optional().nullable(),
578
+ strict: Function.StrictExpressionSchema.optional().nullable(),
579
+ })
580
+ .describe(Function.DefinitionSchema.description)
581
+ .meta({ title: "FunctionToolDefinitionExpression" });
582
+ })(Function = Tool.Function || (Tool.Function = {}));
583
+ Tool.FunctionSchema = z
584
+ .object({
585
+ type: z.literal("function"),
586
+ function: Function.DefinitionSchema,
587
+ })
588
+ .describe("A function tool that the assistant can call.")
589
+ .meta({ title: "FunctionTool" });
590
+ Tool.FunctionExpressionSchema = z
591
+ .object({
592
+ type: z.literal("function"),
593
+ function: Function.DefinitionExpressionSchema,
594
+ })
595
+ .describe(Tool.FunctionSchema.description)
596
+ .meta({ title: "FunctionToolExpression" });
597
+ })(Tool || (Tool = {}));
598
+ export const ToolSchema = z
599
+ .union([Tool.FunctionSchema])
600
+ .describe("A tool that the assistant can call.")
601
+ .meta({ title: "Tool" });
602
+ export const ToolExpressionSchema = z
603
+ .union([
604
+ Tool.FunctionExpressionSchema,
605
+ ExpressionSchema.describe("An expression which evaluates to a tool."),
606
+ ])
607
+ .describe(ToolSchema.description)
608
+ .meta({ title: "ToolExpression" });
609
+ export const ToolsSchema = z
610
+ .array(ToolSchema)
611
+ .describe("A list of tools that the assistant can call.")
612
+ .meta({ title: "Tools" });
613
+ export const ToolsExpressionSchema = z
614
+ .union([
615
+ z
616
+ .array(ToolExpressionSchema)
617
+ .describe(ToolsSchema.description)
618
+ .meta({ title: "ToolExpressions" }),
619
+ ExpressionSchema.describe("An expression which evaluates to an array of tools."),
620
+ ])
621
+ .describe(ToolsSchema.description)
622
+ .meta({ title: "ToolsExpression" });
623
+ // Vector Responses
624
+ export const VectorResponseSchema = Message.RichContentSchema.describe("A possible assistant response. The LLMs in the Ensemble may vote for this option.").meta({ title: "VectorResponse" });
625
+ export const VectorResponseExpressionSchema = z
626
+ .union([
627
+ VectorResponseSchema,
628
+ ExpressionSchema.describe("An expression which evaluates to a possible assistant response."),
629
+ ])
630
+ .describe(VectorResponseSchema.description)
631
+ .meta({ title: "VectorResponseExpression" });
632
+ export const VectorResponsesSchema = z
633
+ .array(VectorResponseSchema)
634
+ .describe("A list of possible assistant responses which the LLMs in the Ensemble will vote on. The output scores will be of the same length, each corresponding to one response. The winner is the response with the highest score.")
635
+ .meta({ title: "VectorResponses" });
636
+ export const VectorResponsesExpressionSchema = z
637
+ .union([
638
+ z
639
+ .array(VectorResponseExpressionSchema)
640
+ .describe(VectorResponsesSchema.description)
641
+ .meta({ title: "VectorResponseExpressions" }),
642
+ ExpressionSchema.describe("An expression which evaluates to an array of possible assistant responses."),
643
+ ])
644
+ .describe(VectorResponsesSchema.description)
645
+ .meta({ title: "VectorResponsesExpression" });
646
+ // Ensemble LLM
647
+ export var EnsembleLlm;
648
+ (function (EnsembleLlm) {
649
+ EnsembleLlm.OutputModeSchema = z
650
+ .enum(["instruction", "json_schema", "tool_call"])
651
+ .describe('For Vector Completions only, specifies the LLM\'s voting output mode. For "instruction", the assistant is instructed to output a key. For "json_schema", the assistant is constrained to output a valid key using a JSON schema. For "tool_call", the assistant is instructed to output a tool call to select the key.');
652
+ EnsembleLlm.StopSchema = z
653
+ .union([
654
+ z
655
+ .string()
656
+ .describe("Generation will stop when this string is generated.")
657
+ .meta({ title: "StopString" }),
658
+ z
659
+ .array(z.string().meta({ title: "StopString" }))
660
+ .describe("Generation will stop when any of these strings are generated.")
661
+ .meta({ title: "StopStrings" }),
662
+ ])
663
+ .describe("The assistant will stop when any of the provided strings are generated.")
664
+ .meta({ title: "Stop" });
665
+ let Provider;
666
+ (function (Provider) {
667
+ Provider.QuantizationSchema = z
668
+ .enum([
669
+ "int4",
670
+ "int8",
671
+ "fp4",
672
+ "fp6",
673
+ "fp8",
674
+ "fp16",
675
+ "bf16",
676
+ "fp32",
677
+ "unknown",
678
+ ])
679
+ .describe("An LLM quantization.")
680
+ .meta({ title: "ProviderQuantization" });
681
+ })(Provider = EnsembleLlm.Provider || (EnsembleLlm.Provider = {}));
682
+ EnsembleLlm.ProviderSchema = z
683
+ .object({
684
+ allow_fallbacks: z
685
+ .boolean()
686
+ .optional()
687
+ .nullable()
688
+ .describe("Whether to allow fallback providers if the preferred provider is unavailable."),
689
+ require_parameters: z
690
+ .boolean()
691
+ .optional()
692
+ .nullable()
693
+ .describe("Whether to require that the provider supports all specified parameters."),
694
+ order: z
695
+ .array(z.string().meta({ title: "ProviderName" }))
696
+ .optional()
697
+ .nullable()
698
+ .describe("An ordered list of provider names to use when selecting a provider for this model."),
699
+ only: z
700
+ .array(z.string().meta({ title: "ProviderName" }))
701
+ .optional()
702
+ .nullable()
703
+ .describe("A list of provider names to restrict selection to when selecting a provider for this model."),
704
+ ignore: z
705
+ .array(z.string().meta({ title: "ProviderName" }))
706
+ .optional()
707
+ .nullable()
708
+ .describe("A list of provider names to ignore when selecting a provider for this model."),
709
+ quantizations: z
710
+ .array(Provider.QuantizationSchema)
711
+ .optional()
712
+ .nullable()
713
+ .describe("Specifies the quantizations to allow when selecting providers for this model."),
714
+ })
715
+ .describe("Options for selecting the upstream provider of this model.");
716
+ let Reasoning;
717
+ (function (Reasoning) {
718
+ Reasoning.EffortSchema = z
719
+ .enum(["none", "minimal", "low", "medium", "high", "xhigh"])
720
+ .describe("Constrains effort on reasoning for supported reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.")
721
+ .meta({ title: "ReasoningEffort" });
722
+ Reasoning.SummaryVerbositySchema = z
723
+ .enum(["auto", "concise", "detailed"])
724
+ .describe("Controls the verbosity of the reasoning summary for supported reasoning models.")
725
+ .meta({ title: "ReasoningSummaryVerbosity" });
726
+ })(Reasoning = EnsembleLlm.Reasoning || (EnsembleLlm.Reasoning = {}));
727
+ EnsembleLlm.ReasoningSchema = z
728
+ .object({
729
+ enabled: z
730
+ .boolean()
731
+ .optional()
732
+ .nullable()
733
+ .describe("Enables or disables reasoning for supported models."),
734
+ max_tokens: z
735
+ .int()
736
+ .min(0)
737
+ .max(2147483647)
738
+ .optional()
739
+ .nullable()
740
+ .describe("The maximum number of tokens to use for reasoning in a response."),
741
+ effort: Reasoning.EffortSchema.optional().nullable(),
742
+ summary_verbosity: Reasoning.SummaryVerbositySchema.optional().nullable(),
743
+ })
744
+ .optional()
745
+ .nullable()
746
+ .describe("Options for controlling reasoning behavior of the model.");
747
+ EnsembleLlm.VerbositySchema = z
748
+ .enum(["low", "medium", "high"])
749
+ .describe("Controls the verbosity and length of the model response. Lower values produce more concise responses, while higher values produce more detailed and comprehensive responses.");
750
+ EnsembleLlm.ListItemSchema = z.object({
751
+ id: z.string().describe("The unique identifier for the Ensemble LLM."),
752
+ });
753
+ async function list(openai, options) {
754
+ const response = await openai.get("/ensemble_llms", options);
755
+ return response;
756
+ }
757
+ EnsembleLlm.list = list;
758
+ EnsembleLlm.RetrieveItemSchema = z.lazy(() => EnsembleLlmSchema.extend({
759
+ created: z
760
+ .uint32()
761
+ .describe("The Unix timestamp (in seconds) when the Ensemble LLM was created."),
762
+ }));
763
+ async function retrieve(openai, id, options) {
764
+ const response = await openai.get(`/ensemble_llms/${id}`, options);
765
+ return response;
766
+ }
767
+ EnsembleLlm.retrieve = retrieve;
768
+ EnsembleLlm.HistoricalUsageSchema = z.object({
769
+ requests: z
770
+ .uint32()
771
+ .describe("The total number of requests made to this Ensemble LLM."),
772
+ completion_tokens: z
773
+ .uint32()
774
+ .describe("The total number of completion tokens generated by this Ensemble LLM."),
775
+ prompt_tokens: z
776
+ .uint32()
777
+ .describe("The total number of prompt tokens sent to this Ensemble LLM."),
778
+ total_cost: z
779
+ .number()
780
+ .describe("The total cost incurred by using this Ensemble LLM."),
781
+ });
782
+ async function retrieveUsage(openai, id, options) {
783
+ const response = await openai.get(`/ensemble_llms/${id}/usage`, options);
784
+ return response;
785
+ }
786
+ EnsembleLlm.retrieveUsage = retrieveUsage;
787
+ })(EnsembleLlm || (EnsembleLlm = {}));
788
+ export const EnsembleLlmBaseSchema = z
789
+ .object({
790
+ model: z.string().describe("The full ID of the LLM to use."),
791
+ output_mode: EnsembleLlm.OutputModeSchema,
792
+ synthetic_reasoning: z
793
+ .boolean()
794
+ .optional()
795
+ .nullable()
796
+ .describe("For Vector Completions only, whether to use synthetic reasoning prior to voting. Works for any LLM, even those that do not have native reasoning capabilities."),
797
+ top_logprobs: z
798
+ .int()
799
+ .min(0)
800
+ .max(20)
801
+ .optional()
802
+ .nullable()
803
+ .describe("For Vector Completions only, whether to use logprobs to make the vote probabilistic. This means that the LLM can vote for multiple keys based on their logprobabilities. Allows LLMs to express native uncertainty when voting."),
804
+ prefix_messages: MessagesSchema.optional()
805
+ .nullable()
806
+ .describe(`${MessagesSchema.description} These will be prepended to every prompt sent to this LLM. Useful for setting context or influencing behavior.`),
807
+ suffix_messages: MessagesSchema.optional()
808
+ .nullable()
809
+ .describe(`${MessagesSchema.description} These will be appended to every prompt sent to this LLM. Useful for setting context or influencing behavior.`),
810
+ frequency_penalty: z
811
+ .number()
812
+ .min(-2.0)
813
+ .max(2.0)
814
+ .optional()
815
+ .nullable()
816
+ .describe("This setting aims to control the repetition of tokens based on how often they appear in the input. It tries to use less frequently those tokens that appear more in the input, proportional to how frequently they occur. Token penalty scales with the number of occurrences. Negative values will encourage token reuse."),
817
+ logit_bias: z
818
+ .record(z.string(), z.int().min(-100).max(100))
819
+ .optional()
820
+ .nullable()
821
+ .describe("Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token."),
822
+ max_completion_tokens: z
823
+ .int()
824
+ .min(0)
825
+ .max(2147483647)
826
+ .optional()
827
+ .nullable()
828
+ .describe("An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens."),
829
+ presence_penalty: z
830
+ .number()
831
+ .min(-2.0)
832
+ .max(2.0)
833
+ .optional()
834
+ .nullable()
835
+ .describe("This setting aims to control the presence of tokens in the output. It tries to encourage the model to use tokens that are less present in the input, proportional to their presence in the input. Token presence scales with the number of occurrences. Negative values will encourage more diverse token usage."),
836
+ stop: EnsembleLlm.StopSchema.optional().nullable(),
837
+ temperature: z
838
+ .number()
839
+ .min(0.0)
840
+ .max(2.0)
841
+ .optional()
842
+ .nullable()
843
+ .describe("This setting influences the variety in the model’s responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input."),
844
+ top_p: z
845
+ .number()
846
+ .min(0.0)
847
+ .max(1.0)
848
+ .optional()
849
+ .nullable()
850
+ .describe("This setting limits the model’s choices to a percentage of likely tokens: only the top tokens whose probabilities add up to P. A lower value makes the model’s responses more predictable, while the default setting allows for a full range of token choices. Think of it like a dynamic Top-K."),
851
+ max_tokens: z
852
+ .int()
853
+ .min(0)
854
+ .max(2147483647)
855
+ .optional()
856
+ .nullable()
857
+ .describe("This sets the upper limit for the number of tokens the model can generate in response. It won’t produce more than this limit. The maximum value is the context length minus the prompt length."),
858
+ min_p: z
859
+ .number()
860
+ .min(0.0)
861
+ .max(1.0)
862
+ .optional()
863
+ .nullable()
864
+ .describe("Represents the minimum probability for a token to be considered, relative to the probability of the most likely token. (The value changes depending on the confidence level of the most probable token.) If your Min-P is set to 0.1, that means it will only allow for tokens that are at least 1/10th as probable as the best possible option."),
865
+ provider: EnsembleLlm.ProviderSchema.optional().nullable(),
866
+ reasoning: EnsembleLlm.ReasoningSchema.optional().nullable(),
867
+ repetition_penalty: z
868
+ .number()
869
+ .min(0.0)
870
+ .max(2.0)
871
+ .optional()
872
+ .nullable()
873
+ .describe("Helps to reduce the repetition of tokens from the input. A higher value makes the model less likely to repeat tokens, but too high a value can make the output less coherent (often with run-on sentences that lack small words). Token penalty scales based on original token’s probability."),
874
+ top_a: z
875
+ .number()
876
+ .min(0.0)
877
+ .max(1.0)
878
+ .optional()
879
+ .nullable()
880
+ .describe("Consider only the top tokens with “sufficiently high” probabilities based on the probability of the most likely token. Think of it like a dynamic Top-P. A lower Top-A value focuses the choices based on the highest probability token but with a narrower scope. A higher Top-A value does not necessarily affect the creativity of the output, but rather refines the filtering process based on the maximum probability."),
881
+ top_k: z
882
+ .int()
883
+ .min(0)
884
+ .max(2147483647)
885
+ .optional()
886
+ .nullable()
887
+ .describe("This limits the model’s choice of tokens at each step, making it choose from a smaller set. A value of 1 means the model will always pick the most likely next token, leading to predictable results. By default this setting is disabled, making the model to consider all choices."),
888
+ verbosity: EnsembleLlm.VerbositySchema.optional().nullable(),
889
+ })
890
+ .describe("An LLM to be used within an Ensemble or standalone with Chat Completions.");
891
+ export const EnsembleLlmBaseWithFallbacksAndCountSchema = EnsembleLlmBaseSchema.extend({
892
+ count: z
893
+ .uint32()
894
+ .min(1)
895
+ .optional()
896
+ .nullable()
897
+ .describe("A count greater than one effectively means that there are multiple instances of this LLM in an ensemble."),
898
+ fallbacks: z
899
+ .array(EnsembleLlmBaseSchema)
900
+ .optional()
901
+ .nullable()
902
+ .describe("A list of fallback LLMs to use if the primary LLM fails."),
903
+ }).describe("An LLM to be used within an Ensemble, including optional fallbacks and count.");
904
+ export const EnsembleLlmSchema = EnsembleLlmBaseSchema.extend({
905
+ id: z.string().describe("The unique identifier for the Ensemble LLM."),
906
+ }).describe("An LLM to be used within an Ensemble or standalone with Chat Completions, including its unique identifier.");
907
+ export const EnsembleLlmWithFallbacksAndCountSchema = EnsembleLlmSchema.extend({
908
+ count: EnsembleLlmBaseWithFallbacksAndCountSchema.shape.count,
909
+ fallbacks: z
910
+ .array(EnsembleLlmSchema)
911
+ .optional()
912
+ .nullable()
913
+ .describe(EnsembleLlmBaseWithFallbacksAndCountSchema.shape.fallbacks.description),
914
+ }).describe("An LLM to be used within an Ensemble, including its unique identifier, optional fallbacks, and count.");
915
+ // Ensemble
916
+ export const EnsembleBaseSchema = z
917
+ .object({
918
+ llms: z
919
+ .array(EnsembleLlmBaseWithFallbacksAndCountSchema)
920
+ .describe("The list of LLMs that make up the ensemble."),
921
+ })
922
+ .describe("An ensemble of LLMs.");
923
+ export const EnsembleSchema = z
924
+ .object({
925
+ id: z.string().describe("The unique identifier for the Ensemble."),
926
+ llms: z
927
+ .array(EnsembleLlmWithFallbacksAndCountSchema)
928
+ .describe(EnsembleBaseSchema.shape.llms.description),
929
+ })
930
+ .describe("An ensemble of LLMs with a unique identifier.");
931
+ export var Ensemble;
932
+ (function (Ensemble) {
933
+ Ensemble.ListItemSchema = z.object({
934
+ id: z.string().describe("The unique identifier for the Ensemble."),
935
+ });
936
+ async function list(openai, options) {
937
+ const response = await openai.get("/ensembles", options);
938
+ return response;
939
+ }
940
+ Ensemble.list = list;
941
+ Ensemble.RetrieveItemSchema = EnsembleSchema.extend({
942
+ created: z
943
+ .uint32()
944
+ .describe("The Unix timestamp (in seconds) when the Ensemble was created."),
945
+ });
946
+ async function retrieve(openai, id, options) {
947
+ const response = await openai.get(`/ensembles/${id}`, options);
948
+ return response;
949
+ }
950
+ Ensemble.retrieve = retrieve;
951
+ Ensemble.HistoricalUsageSchema = z.object({
952
+ requests: z
953
+ .uint32()
954
+ .describe("The total number of requests made to this Ensemble."),
955
+ completion_tokens: z
956
+ .uint32()
957
+ .describe("The total number of completion tokens generated by this Ensemble."),
958
+ prompt_tokens: z
959
+ .uint32()
960
+ .describe("The total number of prompt tokens sent to this Ensemble."),
961
+ total_cost: z
962
+ .number()
963
+ .describe("The total cost incurred by using this Ensemble."),
964
+ });
965
+ async function retrieveUsage(openai, id, options) {
966
+ const response = await openai.get(`/ensembles/${id}/usage`, options);
967
+ return response;
968
+ }
969
+ Ensemble.retrieveUsage = retrieveUsage;
970
+ })(Ensemble || (Ensemble = {}));
971
+ // Chat Completions
1
972
  export var Chat;
2
973
  (function (Chat) {
3
974
  let Completions;
4
975
  (function (Completions) {
976
+ let Request;
977
+ (function (Request) {
978
+ let Provider;
979
+ (function (Provider) {
980
+ Provider.DataCollectionSchema = z
981
+ .enum(["allow", "deny"])
982
+ .describe("Specifies whether to allow providers which collect data.");
983
+ Provider.SortSchema = z
984
+ .enum(["price", "throughput", "latency"])
985
+ .describe("Specifies the sorting strategy for provider selection.");
986
+ Provider.MaxPriceSchema = z.object({
987
+ prompt: z
988
+ .number()
989
+ .optional()
990
+ .nullable()
991
+ .describe("Maximum price for prompt tokens."),
992
+ completion: z
993
+ .number()
994
+ .optional()
995
+ .nullable()
996
+ .describe("Maximum price for completion tokens."),
997
+ image: z
998
+ .number()
999
+ .optional()
1000
+ .nullable()
1001
+ .describe("Maximum price for image generation."),
1002
+ audio: z
1003
+ .number()
1004
+ .optional()
1005
+ .nullable()
1006
+ .describe("Maximum price for audio generation."),
1007
+ request: z
1008
+ .number()
1009
+ .optional()
1010
+ .nullable()
1011
+ .describe("Maximum price per request."),
1012
+ });
1013
+ })(Provider = Request.Provider || (Request.Provider = {}));
1014
+ Request.ProviderSchema = z
1015
+ .object({
1016
+ data_collection: Provider.DataCollectionSchema.optional().nullable(),
1017
+ zdr: z
1018
+ .boolean()
1019
+ .optional()
1020
+ .nullable()
1021
+ .describe("Whether to enforce Zero Data Retention (ZDR) policies when selecting providers."),
1022
+ sort: Provider.SortSchema.optional().nullable(),
1023
+ max_price: Provider.MaxPriceSchema.optional().nullable(),
1024
+ preferred_min_throughput: z
1025
+ .number()
1026
+ .optional()
1027
+ .nullable()
1028
+ .describe("Preferred minimum throughput for the provider."),
1029
+ preferred_max_latency: z
1030
+ .number()
1031
+ .optional()
1032
+ .nullable()
1033
+ .describe("Preferred maximum latency for the provider."),
1034
+ min_throughput: z
1035
+ .number()
1036
+ .optional()
1037
+ .nullable()
1038
+ .describe("Minimum throughput for the provider."),
1039
+ max_latency: z
1040
+ .number()
1041
+ .optional()
1042
+ .nullable()
1043
+ .describe("Maximum latency for the provider."),
1044
+ })
1045
+ .describe("Options for selecting the upstream provider of this completion.");
1046
+ Request.ModelSchema = z
1047
+ .union([z.string(), EnsembleLlmBaseSchema])
1048
+ .describe("The Ensemble LLM to use for this completion. May be a unique ID or an inline definition.");
1049
+ let ResponseFormat;
1050
+ (function (ResponseFormat) {
1051
+ ResponseFormat.TextSchema = z
1052
+ .object({
1053
+ type: z.literal("text"),
1054
+ })
1055
+ .describe("The response will be arbitrary text.")
1056
+ .meta({ title: "ResponseFormatText" });
1057
+ ResponseFormat.JsonObjectSchema = z
1058
+ .object({
1059
+ type: z.literal("json_object"),
1060
+ })
1061
+ .describe("The response will be a JSON object.")
1062
+ .meta({ title: "ResponseFormatJsonObject" });
1063
+ let JsonSchema;
1064
+ (function (JsonSchema) {
1065
+ JsonSchema.JsonSchemaSchema = z
1066
+ .object({
1067
+ name: z.string().describe("The name of the JSON schema."),
1068
+ description: z
1069
+ .string()
1070
+ .optional()
1071
+ .nullable()
1072
+ .describe("The description of the JSON schema."),
1073
+ schema: z
1074
+ .any()
1075
+ .optional()
1076
+ .describe("The JSON schema definition."),
1077
+ strict: z
1078
+ .boolean()
1079
+ .optional()
1080
+ .nullable()
1081
+ .describe("Whether to enforce strict adherence to the JSON schema."),
1082
+ })
1083
+ .describe("A JSON schema definition for constraining model output.")
1084
+ .meta({ title: "ResponseFormatJsonSchemaJsonSchema" });
1085
+ })(JsonSchema = ResponseFormat.JsonSchema || (ResponseFormat.JsonSchema = {}));
1086
+ ResponseFormat.JsonSchemaSchema = z
1087
+ .object({
1088
+ type: z.literal("json_schema"),
1089
+ json_schema: JsonSchema.JsonSchemaSchema,
1090
+ })
1091
+ .describe("The response will conform to the provided JSON schema.")
1092
+ .meta({ title: "ResponseFormatJsonSchema" });
1093
+ ResponseFormat.GrammarSchema = z
1094
+ .object({
1095
+ type: z.literal("grammar"),
1096
+ grammar: z
1097
+ .string()
1098
+ .describe("The grammar definition to constrain the response."),
1099
+ })
1100
+ .describe("The response will conform to the provided grammar definition.")
1101
+ .meta({ title: "ResponseFormatGrammar" });
1102
+ ResponseFormat.PythonSchema = z
1103
+ .object({
1104
+ type: z.literal("python"),
1105
+ })
1106
+ .describe("The response will be Python code.")
1107
+ .meta({ title: "ResponseFormatPython" });
1108
+ })(ResponseFormat = Request.ResponseFormat || (Request.ResponseFormat = {}));
1109
+ Request.ResponseFormatSchema = z
1110
+ .union([
1111
+ ResponseFormat.TextSchema,
1112
+ ResponseFormat.JsonObjectSchema,
1113
+ ResponseFormat.JsonSchemaSchema,
1114
+ ResponseFormat.GrammarSchema,
1115
+ ResponseFormat.PythonSchema,
1116
+ ])
1117
+ .describe("The desired format of the model's response.")
1118
+ .meta({ title: "ResponseFormat" });
1119
+ let ToolChoice;
1120
+ (function (ToolChoice) {
1121
+ let Function;
1122
+ (function (Function) {
1123
+ Function.FunctionSchema = z
1124
+ .object({
1125
+ name: z
1126
+ .string()
1127
+ .describe("The name of the function the assistant will call."),
1128
+ })
1129
+ .meta({ title: "ToolChoiceFunctionFunction" });
1130
+ })(Function = ToolChoice.Function || (ToolChoice.Function = {}));
1131
+ ToolChoice.FunctionSchema = z
1132
+ .object({
1133
+ type: z.literal("function"),
1134
+ function: Function.FunctionSchema,
1135
+ })
1136
+ .describe("Specify a function for the assistant to call.")
1137
+ .meta({ title: "ToolChoiceFunction" });
1138
+ })(ToolChoice = Request.ToolChoice || (Request.ToolChoice = {}));
1139
+ Request.ToolChoiceSchema = z
1140
+ .union([
1141
+ z.literal("none"),
1142
+ z.literal("auto"),
1143
+ z.literal("required"),
1144
+ ToolChoice.FunctionSchema,
1145
+ ])
1146
+ .describe("Specifies tool call behavior for the assistant.")
1147
+ .meta({ title: "ToolChoice" });
1148
+ let Prediction;
1149
+ (function (Prediction) {
1150
+ let Content;
1151
+ (function (Content) {
1152
+ Content.PartSchema = z
1153
+ .object({
1154
+ type: z.literal("text"),
1155
+ text: z.string(),
1156
+ })
1157
+ .describe("A part of the predicted content.")
1158
+ .meta({ title: "PredictionContentPart" });
1159
+ })(Content = Prediction.Content || (Prediction.Content = {}));
1160
+ Prediction.ContentSchema = z.union([
1161
+ z.string().meta({ title: "PredictionContentText" }),
1162
+ z.array(Content.PartSchema).meta({ title: "PredictionContentParts" }),
1163
+ ]);
1164
+ })(Prediction = Request.Prediction || (Request.Prediction = {}));
1165
+ Request.PredictionSchema = z
1166
+ .object({
1167
+ type: z.literal("content"),
1168
+ content: Prediction.ContentSchema,
1169
+ })
1170
+ .describe("Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content.");
1171
+ Request.SeedSchema = z
1172
+ .bigint()
1173
+ .describe("If specified, upstream systems will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.");
1174
+ Request.BackoffMaxElapsedTimeSchema = z
1175
+ .uint32()
1176
+ .describe("The maximum total time in milliseconds to spend on retries when a transient error occurs.");
1177
+ Request.FirstChunkTimeoutSchema = z
1178
+ .uint32()
1179
+ .describe("The maximum time in milliseconds to wait for the first chunk of a streaming response.");
1180
+ Request.OtherChunkTimeoutSchema = z
1181
+ .uint32()
1182
+ .describe("The maximum time in milliseconds to wait between subsequent chunks of a streaming response.");
1183
+ Request.ChatCompletionCreateParamsBaseSchema = z
1184
+ .object({
1185
+ messages: MessagesSchema,
1186
+ provider: Request.ProviderSchema.optional().nullable(),
1187
+ model: Request.ModelSchema,
1188
+ models: z
1189
+ .array(Request.ModelSchema)
1190
+ .optional()
1191
+ .nullable()
1192
+ .describe("Fallback Ensemble LLMs to use if the primary Ensemble LLM fails."),
1193
+ top_logprobs: z
1194
+ .int()
1195
+ .min(0)
1196
+ .max(20)
1197
+ .optional()
1198
+ .nullable()
1199
+ .describe("An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability."),
1200
+ response_format: Request.ResponseFormatSchema.optional().nullable(),
1201
+ seed: Request.SeedSchema.optional().nullable(),
1202
+ tool_choice: Request.ToolChoiceSchema.optional().nullable(),
1203
+ tools: ToolsSchema,
1204
+ parallel_tool_calls: z
1205
+ .boolean()
1206
+ .optional()
1207
+ .nullable()
1208
+ .describe("Whether to allow the model to make multiple tool calls in parallel."),
1209
+ prediction: Request.PredictionSchema.optional().nullable(),
1210
+ backoff_max_elapsed_time: Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
1211
+ first_chunk_timeout: Request.FirstChunkTimeoutSchema.optional().nullable(),
1212
+ other_chunk_timeout: Request.OtherChunkTimeoutSchema.optional().nullable(),
1213
+ })
1214
+ .describe("Base parameters for creating a chat completion.");
1215
+ Request.StreamTrueSchema = z
1216
+ .literal(true)
1217
+ .describe("Whether to stream the response as a series of chunks.");
1218
+ Request.ChatCompletionCreateParamsStreamingSchema = Request.ChatCompletionCreateParamsBaseSchema.extend({
1219
+ stream: Request.StreamTrueSchema,
1220
+ })
1221
+ .describe("Parameters for creating a streaming chat completion.")
1222
+ .meta({ title: "ChatCompletionCreateParamsStreaming" });
1223
+ Request.StreamFalseSchema = z
1224
+ .literal(false)
1225
+ .describe("Whether to stream the response as a series of chunks.");
1226
+ Request.ChatCompletionCreateParamsNonStreamingSchema = Request.ChatCompletionCreateParamsBaseSchema.extend({
1227
+ stream: Request.StreamFalseSchema.optional().nullable(),
1228
+ })
1229
+ .describe("Parameters for creating a unary chat completion.")
1230
+ .meta({ title: "ChatCompletionCreateParamsNonStreaming" });
1231
+ Request.ChatCompletionCreateParamsSchema = z
1232
+ .union([
1233
+ Request.ChatCompletionCreateParamsStreamingSchema,
1234
+ Request.ChatCompletionCreateParamsNonStreamingSchema,
1235
+ ])
1236
+ .describe("Parameters for creating a chat completion.")
1237
+ .meta({ title: "ChatCompletionCreateParams" });
1238
+ })(Request = Completions.Request || (Completions.Request = {}));
5
1239
  let Response;
6
1240
  (function (Response) {
7
- let Streaming;
8
- (function (Streaming) {
9
- let ChatCompletionChunk;
10
- (function (ChatCompletionChunk) {
11
- function merged(a, b) {
12
- const id = a.id;
13
- const [choices, choicesChanged] = Choice.mergedList(a.choices, b.choices);
14
- const created = a.created;
15
- const model = a.model;
16
- const object = a.object;
17
- const [service_tier, service_tierChanged] = merge(a.service_tier, b.service_tier);
18
- const [system_fingerprint, system_fingerprintChanged] = merge(a.system_fingerprint, b.system_fingerprint);
19
- const [usage, usageChanged] = merge(a.usage, b.usage, Usage.merged);
20
- const [provider, providerChanged] = merge(a.provider, b.provider);
21
- if (choicesChanged ||
22
- service_tierChanged ||
23
- system_fingerprintChanged ||
24
- usageChanged ||
25
- providerChanged) {
26
- return [
27
- Object.assign(Object.assign(Object.assign(Object.assign({ id,
28
- choices,
29
- created,
30
- model,
31
- object }, (service_tier !== undefined ? { service_tier } : {})), (system_fingerprint !== undefined
32
- ? { system_fingerprint }
33
- : {})), (usage !== undefined ? { usage } : {})), (provider !== undefined ? { provider } : {})),
34
- true,
35
- ];
36
- }
37
- else {
38
- return [a, false];
39
- }
1241
+ Response.FinishReasonSchema = z
1242
+ .enum(["stop", "length", "tool_calls", "content_filter", "error"])
1243
+ .describe("The reason why the assistant ceased to generate further tokens.");
1244
+ let Usage;
1245
+ (function (Usage) {
1246
+ Usage.CompletionTokensDetailsSchema = z
1247
+ .object({
1248
+ accepted_prediction_tokens: z
1249
+ .uint32()
1250
+ .optional()
1251
+ .describe("The number of accepted prediction tokens in the completion."),
1252
+ audio_tokens: z
1253
+ .uint32()
1254
+ .optional()
1255
+ .describe("The number of generated audio tokens in the completion."),
1256
+ reasoning_tokens: z
1257
+ .uint32()
1258
+ .optional()
1259
+ .describe("The number of generated reasoning tokens in the completion."),
1260
+ rejected_prediction_tokens: z
1261
+ .uint32()
1262
+ .optional()
1263
+ .describe("The number of rejected prediction tokens in the completion."),
1264
+ })
1265
+ .describe("Detailed breakdown of generated completion tokens.");
1266
+ Usage.PromptTokensDetailsSchema = z
1267
+ .object({
1268
+ audio_tokens: z
1269
+ .uint32()
1270
+ .optional()
1271
+ .describe("The number of audio tokens in the prompt."),
1272
+ cached_tokens: z
1273
+ .uint32()
1274
+ .optional()
1275
+ .describe("The number of cached tokens in the prompt."),
1276
+ cache_write_tokens: z
1277
+ .uint32()
1278
+ .optional()
1279
+ .describe("The number of prompt tokens written to cache."),
1280
+ video_tokens: z
1281
+ .uint32()
1282
+ .optional()
1283
+ .describe("The number of video tokens in the prompt."),
1284
+ })
1285
+ .describe("Detailed breakdown of prompt tokens.");
1286
+ Usage.CostDetailsSchema = z
1287
+ .object({
1288
+ upstream_inference_cost: z
1289
+ .number()
1290
+ .optional()
1291
+ .describe("The cost incurred upstream."),
1292
+ upstream_upstream_inference_cost: z
1293
+ .number()
1294
+ .optional()
1295
+ .describe("The cost incurred by upstream's upstream."),
1296
+ })
1297
+ .describe("Detailed breakdown of upstream costs incurred.");
1298
+ })(Usage = Response.Usage || (Response.Usage = {}));
1299
+ Response.UsageSchema = z
1300
+ .object({
1301
+ completion_tokens: z
1302
+ .uint32()
1303
+ .describe("The number of tokens generated in the completion."),
1304
+ prompt_tokens: z
1305
+ .uint32()
1306
+ .describe("The number of tokens in the prompt."),
1307
+ total_tokens: z
1308
+ .uint32()
1309
+ .describe("The total number of tokens used in the prompt or generated in the completion."),
1310
+ completion_tokens_details: Usage.CompletionTokensDetailsSchema.optional(),
1311
+ prompt_tokens_details: Usage.PromptTokensDetailsSchema.optional(),
1312
+ cost: z
1313
+ .number()
1314
+ .describe("The cost in credits incurred for this completion."),
1315
+ cost_details: Usage.CostDetailsSchema.optional(),
1316
+ total_cost: z
1317
+ .number()
1318
+ .describe("The total cost in credits incurred including upstream costs."),
1319
+ cost_multiplier: z
1320
+ .number()
1321
+ .describe("The cost multiplier applied to upstream costs for computing ObjectiveAI costs."),
1322
+ is_byok: z
1323
+ .boolean()
1324
+ .describe("Whether the completion used a BYOK (Bring Your Own Key) API Key."),
1325
+ })
1326
+ .describe("Token and cost usage statistics for the completion.");
1327
+ let Logprobs;
1328
+ (function (Logprobs) {
1329
+ function merged(a, b) {
1330
+ const [content, contentChanged] = merge(a.content, b.content, Logprob.mergedList);
1331
+ const [refusal, refusalChanged] = merge(a.refusal, b.refusal, Logprob.mergedList);
1332
+ if (contentChanged || refusalChanged) {
1333
+ return [{ content, refusal }, true];
40
1334
  }
41
- ChatCompletionChunk.merged = merged;
42
- })(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
43
- let Choice;
44
- (function (Choice) {
45
- function merged(a, b) {
46
- const [delta, deltaChanged] = merge(a.delta, b.delta, Delta.merged);
47
- const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
48
- const index = a.index;
49
- const [logprobs, logprobsChanged] = merge(a.logprobs, b.logprobs, Logprobs.merged);
50
- if (deltaChanged || finish_reasonChanged || logprobsChanged) {
51
- return [
52
- Object.assign({ delta,
53
- finish_reason,
54
- index }, (logprobs !== undefined ? { logprobs } : {})),
55
- true,
56
- ];
57
- }
58
- else {
59
- return [a, false];
60
- }
1335
+ else {
1336
+ return [a, false];
61
1337
  }
62
- Choice.merged = merged;
1338
+ }
1339
+ Logprobs.merged = merged;
1340
+ let Logprob;
1341
+ (function (Logprob) {
63
1342
  function mergedList(a, b) {
64
- let merged = undefined;
65
- for (const choice of b) {
66
- const existingIndex = a.findIndex(({ index }) => index === choice.index);
67
- if (existingIndex === -1) {
68
- if (merged === undefined) {
69
- merged = [...a, choice];
70
- }
71
- else {
72
- merged.push(choice);
73
- }
74
- }
75
- else {
76
- const [mergedChoice, choiceChanged] = Choice.merged(a[existingIndex], choice);
77
- if (choiceChanged) {
78
- if (merged === undefined) {
79
- merged = [...a];
80
- }
81
- merged[existingIndex] = mergedChoice;
82
- }
83
- }
1343
+ if (b.length === 0) {
1344
+ return [a, false];
84
1345
  }
85
- return merged ? [merged, true] : [a, false];
86
- }
87
- Choice.mergedList = mergedList;
88
- })(Choice = Streaming.Choice || (Streaming.Choice = {}));
89
- let Delta;
90
- (function (Delta) {
91
- function merged(a, b) {
92
- const [content, contentChanged] = merge(a.content, b.content, mergedString);
93
- const [refusal, refusalChanged] = merge(a.refusal, b.refusal, mergedString);
94
- const [role, roleChanged] = merge(a.role, b.role);
95
- const [tool_calls, tool_callsChanged] = merge(a.tool_calls, b.tool_calls, ToolCall.mergedList);
96
- const [reasoning, reasoningChanged] = merge(a.reasoning, b.reasoning, mergedString);
97
- const [images, imagesChanged] = merge(a.images, b.images, Image.mergedList);
98
- if (contentChanged ||
99
- reasoningChanged ||
100
- refusalChanged ||
101
- roleChanged ||
102
- tool_callsChanged ||
103
- imagesChanged) {
104
- return [
105
- Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({}, (content !== undefined ? { content } : {})), (reasoning !== undefined ? { reasoning } : {})), (refusal !== undefined ? { refusal } : {})), (role !== undefined ? { role } : {})), (tool_calls !== undefined ? { tool_calls } : {})), (images !== undefined ? { images } : {})),
106
- true,
107
- ];
1346
+ else if (a.length === 0) {
1347
+ return [b, true];
108
1348
  }
109
1349
  else {
110
- return [a, false];
1350
+ return [[...a, ...b], true];
111
1351
  }
112
1352
  }
113
- Delta.merged = merged;
114
- })(Delta = Streaming.Delta || (Streaming.Delta = {}));
1353
+ Logprob.mergedList = mergedList;
1354
+ Logprob.TopLogprobSchema = z
1355
+ .object({
1356
+ token: z.string().describe("The token string."),
1357
+ bytes: z
1358
+ .array(z.uint32())
1359
+ .optional()
1360
+ .nullable()
1361
+ .describe("The byte representation of the token."),
1362
+ logprob: z
1363
+ .number()
1364
+ .optional()
1365
+ .nullable()
1366
+ .describe("The log probability of the token."),
1367
+ })
1368
+ .describe("The log probability of a token in the list of top tokens.");
1369
+ })(Logprob = Logprobs.Logprob || (Logprobs.Logprob = {}));
1370
+ Logprobs.LogprobSchema = z
1371
+ .object({
1372
+ token: z
1373
+ .string()
1374
+ .describe("The token string which was selected by the sampler."),
1375
+ bytes: z
1376
+ .array(z.uint32())
1377
+ .optional()
1378
+ .nullable()
1379
+ .describe("The byte representation of the token which was selected by the sampler."),
1380
+ logprob: z
1381
+ .number()
1382
+ .describe("The log probability of the token which was selected by the sampler."),
1383
+ top_logprobs: z
1384
+ .array(Logprob.TopLogprobSchema)
1385
+ .describe("The log probabilities of the top tokens for this position."),
1386
+ })
1387
+ .describe("The token which was selected by the sampler for this position as well as the logprobabilities of the top options.");
1388
+ })(Logprobs = Response.Logprobs || (Response.Logprobs = {}));
1389
+ Response.LogprobsSchema = z
1390
+ .object({
1391
+ content: z
1392
+ .array(Logprobs.LogprobSchema)
1393
+ .optional()
1394
+ .nullable()
1395
+ .describe("The log probabilities of the tokens in the content."),
1396
+ refusal: z
1397
+ .array(Logprobs.LogprobSchema)
1398
+ .optional()
1399
+ .nullable()
1400
+ .describe("The log probabilities of the tokens in the refusal."),
1401
+ })
1402
+ .describe("The log probabilities of the tokens generated by the model.");
1403
+ Response.RoleSchema = z
1404
+ .enum(["assistant"])
1405
+ .describe("The role of the message author.");
1406
+ let Image;
1407
+ (function (Image) {
1408
+ function mergedList(a, b) {
1409
+ if (b.length === 0) {
1410
+ return [a, false];
1411
+ }
1412
+ else if (a.length === 0) {
1413
+ return [b, true];
1414
+ }
1415
+ else {
1416
+ return [[...a, ...b], true];
1417
+ }
1418
+ }
1419
+ Image.mergedList = mergedList;
1420
+ Image.ImageUrlSchema = z.object({
1421
+ type: z.literal("image_url"),
1422
+ image_url: z.object({
1423
+ url: z.string().describe("The Base64 URL of the generated image."),
1424
+ }),
1425
+ });
1426
+ })(Image = Response.Image || (Response.Image = {}));
1427
+ Response.ImageSchema = z
1428
+ .union([Image.ImageUrlSchema])
1429
+ .describe("An image generated by the model.");
1430
+ let Streaming;
1431
+ (function (Streaming) {
115
1432
  let ToolCall;
116
1433
  (function (ToolCall) {
117
1434
  function merged(a, b) {
@@ -147,9 +1464,9 @@ export var Chat;
147
1464
  (function (Function) {
148
1465
  function merged(a, b) {
149
1466
  const index = a.index;
1467
+ const [type, typeChanged] = merge(a.type, b.type);
150
1468
  const [id, idChanged] = merge(a.id, b.id);
151
1469
  const [function_, functionChanged] = merge(a.function, b.function, Definition.merged);
152
- const [type, typeChanged] = merge(a.type, b.type);
153
1470
  if (idChanged || functionChanged || typeChanged) {
154
1471
  return [
155
1472
  Object.assign(Object.assign(Object.assign({ index }, (id !== undefined ? { id } : {})), (function_ !== undefined ? { function: function_ } : {})), (type !== undefined ? { type } : {})),
@@ -180,61 +1497,73 @@ export var Chat;
180
1497
  }
181
1498
  Definition.merged = merged;
182
1499
  })(Definition = Function.Definition || (Function.Definition = {}));
1500
+ Function.DefinitionSchema = z.object({
1501
+ name: z.string().optional().describe("The name of the function."),
1502
+ arguments: z
1503
+ .string()
1504
+ .optional()
1505
+ .describe("The arguments passed to the function."),
1506
+ });
183
1507
  })(Function = ToolCall.Function || (ToolCall.Function = {}));
1508
+ ToolCall.FunctionSchema = z
1509
+ .object({
1510
+ index: z
1511
+ .uint32()
1512
+ .describe("The index of the tool call in the sequence of tool calls."),
1513
+ type: z.literal("function").optional(),
1514
+ id: z
1515
+ .string()
1516
+ .optional()
1517
+ .describe("The unique identifier of the function tool."),
1518
+ function: Function.DefinitionSchema.optional(),
1519
+ })
1520
+ .describe("A function tool call made by the assistant.");
184
1521
  })(ToolCall = Streaming.ToolCall || (Streaming.ToolCall = {}));
185
- })(Streaming = Response.Streaming || (Response.Streaming = {}));
186
- let Usage;
187
- (function (Usage) {
188
- function merged(a, b) {
189
- const [completion_tokens, completion_tokensChanged] = merge(a.completion_tokens, b.completion_tokens, mergedNumber);
190
- const [prompt_tokens, prompt_tokensChanged] = merge(a.prompt_tokens, b.prompt_tokens, mergedNumber);
191
- const [total_tokens, total_tokensChanged] = merge(a.total_tokens, b.total_tokens, mergedNumber);
192
- const [completion_tokens_details, completion_tokens_detailsChanged] = merge(a.completion_tokens_details, b.completion_tokens_details, CompletionTokensDetails.merged);
193
- const [prompt_tokens_details, prompt_tokens_detailsChanged] = merge(a.prompt_tokens_details, b.prompt_tokens_details, PromptTokensDetails.merged);
194
- const [cost, costChanged] = merge(a.cost, b.cost, mergedNumber);
195
- const [cost_details, cost_detailsChanged] = merge(a.cost_details, b.cost_details, CostDetails.merged);
196
- if (completion_tokensChanged ||
197
- prompt_tokensChanged ||
198
- total_tokensChanged ||
199
- completion_tokens_detailsChanged ||
200
- prompt_tokens_detailsChanged ||
201
- costChanged ||
202
- cost_detailsChanged) {
203
- return [
204
- Object.assign(Object.assign(Object.assign(Object.assign({ completion_tokens,
205
- prompt_tokens,
206
- total_tokens }, (completion_tokens_details !== undefined
207
- ? { completion_tokens_details }
208
- : {})), (prompt_tokens_details !== undefined
209
- ? { prompt_tokens_details }
210
- : {})), (cost !== undefined ? { cost } : {})), (cost_details !== undefined ? { cost_details } : {})),
211
- true,
212
- ];
213
- }
214
- else {
215
- return [a, false];
216
- }
217
- }
218
- Usage.merged = merged;
219
- let CompletionTokensDetails;
220
- (function (CompletionTokensDetails) {
1522
+ Streaming.ToolCallSchema = z
1523
+ .union([ToolCall.FunctionSchema])
1524
+ .describe("A tool call made by the assistant.");
1525
+ Streaming.DeltaSchema = z
1526
+ .object({
1527
+ content: z
1528
+ .string()
1529
+ .optional()
1530
+ .describe("The content added in this delta."),
1531
+ refusal: z
1532
+ .string()
1533
+ .optional()
1534
+ .describe("The refusal message added in this delta."),
1535
+ role: Response.RoleSchema.optional(),
1536
+ tool_calls: z
1537
+ .array(Streaming.ToolCallSchema)
1538
+ .optional()
1539
+ .describe("Tool calls made in this delta."),
1540
+ reasoning: z
1541
+ .string()
1542
+ .optional()
1543
+ .describe("The reasoning added in this delta."),
1544
+ images: z
1545
+ .array(Response.ImageSchema)
1546
+ .optional()
1547
+ .describe("Images added in this delta."),
1548
+ })
1549
+ .describe("A delta in a streaming chat completion response.");
1550
+ let Delta;
1551
+ (function (Delta) {
221
1552
  function merged(a, b) {
222
- const [accepted_prediction_tokens, accepted_prediction_tokensChanged,] = merge(a.accepted_prediction_tokens, b.accepted_prediction_tokens, mergedNumber);
223
- const [audio_tokens, audio_tokensChanged] = merge(a.audio_tokens, b.audio_tokens, mergedNumber);
224
- const [reasoning_tokens, reasoning_tokensChanged] = merge(a.reasoning_tokens, b.reasoning_tokens, mergedNumber);
225
- const [rejected_prediction_tokens, rejected_prediction_tokensChanged,] = merge(a.rejected_prediction_tokens, b.rejected_prediction_tokens, mergedNumber);
226
- if (accepted_prediction_tokensChanged ||
227
- audio_tokensChanged ||
228
- reasoning_tokensChanged ||
229
- rejected_prediction_tokensChanged) {
1553
+ const [content, contentChanged] = merge(a.content, b.content, mergedString);
1554
+ const [refusal, refusalChanged] = merge(a.refusal, b.refusal, mergedString);
1555
+ const [role, roleChanged] = merge(a.role, b.role);
1556
+ const [tool_calls, tool_callsChanged] = merge(a.tool_calls, b.tool_calls, ToolCall.mergedList);
1557
+ const [reasoning, reasoningChanged] = merge(a.reasoning, b.reasoning, mergedString);
1558
+ const [images, imagesChanged] = merge(a.images, b.images, Image.mergedList);
1559
+ if (contentChanged ||
1560
+ reasoningChanged ||
1561
+ refusalChanged ||
1562
+ roleChanged ||
1563
+ tool_callsChanged ||
1564
+ imagesChanged) {
230
1565
  return [
231
- Object.assign(Object.assign(Object.assign(Object.assign({}, (accepted_prediction_tokens !== undefined
232
- ? { accepted_prediction_tokens }
233
- : {})), (audio_tokens !== undefined ? { audio_tokens } : {})), (reasoning_tokens !== undefined
234
- ? { reasoning_tokens }
235
- : {})), (rejected_prediction_tokens !== undefined
236
- ? { rejected_prediction_tokens }
237
- : {})),
1566
+ Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({}, (content !== undefined ? { content } : {})), (reasoning !== undefined ? { reasoning } : {})), (refusal !== undefined ? { refusal } : {})), (role !== undefined ? { role } : {})), (tool_calls !== undefined ? { tool_calls } : {})), (images !== undefined ? { images } : {})),
238
1567
  true,
239
1568
  ];
240
1569
  }
@@ -242,16 +1571,30 @@ export var Chat;
242
1571
  return [a, false];
243
1572
  }
244
1573
  }
245
- CompletionTokensDetails.merged = merged;
246
- })(CompletionTokensDetails = Usage.CompletionTokensDetails || (Usage.CompletionTokensDetails = {}));
247
- let PromptTokensDetails;
248
- (function (PromptTokensDetails) {
1574
+ Delta.merged = merged;
1575
+ })(Delta = Streaming.Delta || (Streaming.Delta = {}));
1576
+ Streaming.ChoiceSchema = z
1577
+ .object({
1578
+ delta: Streaming.DeltaSchema,
1579
+ finish_reason: Response.FinishReasonSchema.optional(),
1580
+ index: z
1581
+ .uint32()
1582
+ .describe("The index of the choice in the list of choices."),
1583
+ logprobs: Response.LogprobsSchema.optional(),
1584
+ })
1585
+ .describe("A choice in a streaming chat completion response.");
1586
+ let Choice;
1587
+ (function (Choice) {
249
1588
  function merged(a, b) {
250
- const [audio_tokens, audio_tokensChanged] = merge(a.audio_tokens, b.audio_tokens, mergedNumber);
251
- const [cached_tokens, cached_tokensChanged] = merge(a.cached_tokens, b.cached_tokens, mergedNumber);
252
- if (audio_tokensChanged || cached_tokensChanged) {
1589
+ const [delta, deltaChanged] = merge(a.delta, b.delta, Delta.merged);
1590
+ const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
1591
+ const index = a.index;
1592
+ const [logprobs, logprobsChanged] = merge(a.logprobs, b.logprobs, Logprobs.merged);
1593
+ if (deltaChanged || finish_reasonChanged || logprobsChanged) {
253
1594
  return [
254
- Object.assign(Object.assign({}, (audio_tokens !== undefined ? { audio_tokens } : {})), (cached_tokens !== undefined ? { cached_tokens } : {})),
1595
+ Object.assign({ delta,
1596
+ finish_reason,
1597
+ index }, (logprobs !== undefined ? { logprobs } : {})),
255
1598
  true,
256
1599
  ];
257
1600
  }
@@ -259,21 +1602,92 @@ export var Chat;
259
1602
  return [a, false];
260
1603
  }
261
1604
  }
262
- PromptTokensDetails.merged = merged;
263
- })(PromptTokensDetails = Usage.PromptTokensDetails || (Usage.PromptTokensDetails = {}));
264
- let CostDetails;
265
- (function (CostDetails) {
1605
+ Choice.merged = merged;
1606
+ function mergedList(a, b) {
1607
+ let merged = undefined;
1608
+ for (const choice of b) {
1609
+ const existingIndex = a.findIndex(({ index }) => index === choice.index);
1610
+ if (existingIndex === -1) {
1611
+ if (merged === undefined) {
1612
+ merged = [...a, choice];
1613
+ }
1614
+ else {
1615
+ merged.push(choice);
1616
+ }
1617
+ }
1618
+ else {
1619
+ const [mergedChoice, choiceChanged] = Choice.merged(a[existingIndex], choice);
1620
+ if (choiceChanged) {
1621
+ if (merged === undefined) {
1622
+ merged = [...a];
1623
+ }
1624
+ merged[existingIndex] = mergedChoice;
1625
+ }
1626
+ }
1627
+ }
1628
+ return merged ? [merged, true] : [a, false];
1629
+ }
1630
+ Choice.mergedList = mergedList;
1631
+ })(Choice = Streaming.Choice || (Streaming.Choice = {}));
1632
+ Streaming.ChatCompletionChunkSchema = z
1633
+ .object({
1634
+ id: z
1635
+ .string()
1636
+ .describe("The unique identifier of the chat completion."),
1637
+ upstream_id: z
1638
+ .string()
1639
+ .describe("The unique identifier of the upstream chat completion."),
1640
+ choices: z
1641
+ .array(Streaming.ChoiceSchema)
1642
+ .describe("The list of choices in this chunk."),
1643
+ created: z
1644
+ .uint32()
1645
+ .describe("The Unix timestamp (in seconds) when the chat completion was created."),
1646
+ model: z
1647
+ .string()
1648
+ .describe("The unique identifier of the Ensemble LLM used for this chat completion."),
1649
+ upstream_model: z
1650
+ .string()
1651
+ .describe("The upstream model used for this chat completion."),
1652
+ object: z.literal("chat.completion.chunk"),
1653
+ service_tier: z.string().optional(),
1654
+ system_fingerprint: z.string().optional(),
1655
+ usage: Response.UsageSchema.optional(),
1656
+ provider: z
1657
+ .string()
1658
+ .optional()
1659
+ .describe("The provider used for this chat completion."),
1660
+ })
1661
+ .describe("A chunk in a streaming chat completion response.");
1662
+ let ChatCompletionChunk;
1663
+ (function (ChatCompletionChunk) {
266
1664
  function merged(a, b) {
267
- const [upstream_inference_cost, upstream_inference_costChanged] = merge(a.upstream_inference_cost, b.upstream_inference_cost, mergedNumber);
268
- const [upstream_upstream_inference_cost, upstream_upstream_inference_costChanged,] = merge(a.upstream_upstream_inference_cost, b.upstream_upstream_inference_cost, mergedNumber);
269
- if (upstream_inference_costChanged ||
270
- upstream_upstream_inference_costChanged) {
1665
+ const id = a.id;
1666
+ const upstream_id = a.upstream_id;
1667
+ const [choices, choicesChanged] = Choice.mergedList(a.choices, b.choices);
1668
+ const created = a.created;
1669
+ const model = a.model;
1670
+ const upstream_model = a.upstream_model;
1671
+ const object = a.object;
1672
+ const [service_tier, service_tierChanged] = merge(a.service_tier, b.service_tier);
1673
+ const [system_fingerprint, system_fingerprintChanged] = merge(a.system_fingerprint, b.system_fingerprint);
1674
+ const [usage, usageChanged] = merge(a.usage, b.usage);
1675
+ const [provider, providerChanged] = merge(a.provider, b.provider);
1676
+ if (choicesChanged ||
1677
+ service_tierChanged ||
1678
+ system_fingerprintChanged ||
1679
+ usageChanged ||
1680
+ providerChanged) {
271
1681
  return [
272
- Object.assign(Object.assign({}, (upstream_inference_cost !== undefined
273
- ? { upstream_inference_cost }
274
- : {})), (upstream_upstream_inference_cost !== undefined
275
- ? { upstream_upstream_inference_cost }
276
- : {})),
1682
+ Object.assign(Object.assign(Object.assign(Object.assign({ id,
1683
+ upstream_id,
1684
+ choices,
1685
+ created,
1686
+ model,
1687
+ upstream_model,
1688
+ object }, (service_tier !== undefined ? { service_tier } : {})), (system_fingerprint !== undefined
1689
+ ? { system_fingerprint }
1690
+ : {})), (usage !== undefined ? { usage } : {})), (provider !== undefined ? { provider } : {})),
277
1691
  true,
278
1692
  ];
279
1693
  }
@@ -281,68 +1695,106 @@ export var Chat;
281
1695
  return [a, false];
282
1696
  }
283
1697
  }
284
- CostDetails.merged = merged;
285
- })(CostDetails = Usage.CostDetails || (Usage.CostDetails = {}));
286
- })(Usage = Response.Usage || (Response.Usage = {}));
287
- let Logprobs;
288
- (function (Logprobs) {
289
- function merged(a, b) {
290
- const [content, contentChanged] = merge(a.content, b.content, Logprob.mergedList);
291
- const [refusal, refusalChanged] = merge(a.refusal, b.refusal, Logprob.mergedList);
292
- if (contentChanged || refusalChanged) {
293
- return [{ content, refusal }, true];
294
- }
295
- else {
296
- return [a, false];
297
- }
298
- }
299
- Logprobs.merged = merged;
300
- let Logprob;
301
- (function (Logprob) {
302
- function mergedList(a, b) {
303
- if (b.length === 0) {
304
- return [a, false];
305
- }
306
- else if (a.length === 0) {
307
- return [b, true];
308
- }
309
- else {
310
- return [[...a, ...b], true];
311
- }
312
- }
313
- Logprob.mergedList = mergedList;
314
- })(Logprob = Logprobs.Logprob || (Logprobs.Logprob = {}));
315
- })(Logprobs = Response.Logprobs || (Response.Logprobs = {}));
316
- let Image;
317
- (function (Image) {
318
- function mergedList(a, b) {
319
- if (b.length === 0) {
320
- return [a, false];
321
- }
322
- else if (a.length === 0) {
323
- return [b, true];
324
- }
325
- else {
326
- return [[...a, ...b], true];
327
- }
328
- }
329
- Image.mergedList = mergedList;
330
- })(Image = Response.Image || (Response.Image = {}));
1698
+ ChatCompletionChunk.merged = merged;
1699
+ })(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
1700
+ })(Streaming = Response.Streaming || (Response.Streaming = {}));
1701
+ let Unary;
1702
+ (function (Unary) {
1703
+ let ToolCall;
1704
+ (function (ToolCall) {
1705
+ let Function;
1706
+ (function (Function) {
1707
+ Function.DefinitionSchema = z.object({
1708
+ name: z
1709
+ .string()
1710
+ .describe(Streaming.ToolCall.Function.DefinitionSchema.shape.name
1711
+ .description),
1712
+ arguments: z
1713
+ .string()
1714
+ .describe(Streaming.ToolCall.Function.DefinitionSchema.shape.arguments
1715
+ .description),
1716
+ });
1717
+ })(Function = ToolCall.Function || (ToolCall.Function = {}));
1718
+ ToolCall.FunctionSchema = z
1719
+ .object({
1720
+ type: z.literal("function"),
1721
+ id: z
1722
+ .string()
1723
+ .describe(Streaming.ToolCall.FunctionSchema.shape.id.description),
1724
+ function: Function.DefinitionSchema,
1725
+ })
1726
+ .describe(Streaming.ToolCall.FunctionSchema.description);
1727
+ })(ToolCall = Unary.ToolCall || (Unary.ToolCall = {}));
1728
+ Unary.ToolCallSchema = z
1729
+ .union([ToolCall.FunctionSchema])
1730
+ .describe(Streaming.ToolCallSchema.description);
1731
+ Unary.MessageSchema = z
1732
+ .object({
1733
+ content: z
1734
+ .string()
1735
+ .nullable()
1736
+ .describe("The content of the message."),
1737
+ refusal: z
1738
+ .string()
1739
+ .nullable()
1740
+ .describe("The refusal message, if any."),
1741
+ role: Response.RoleSchema,
1742
+ tool_calls: z
1743
+ .array(Unary.ToolCallSchema)
1744
+ .nullable()
1745
+ .describe("The tool calls made by the assistant, if any."),
1746
+ reasoning: z
1747
+ .string()
1748
+ .optional()
1749
+ .describe("The reasoning provided by the assistant, if any."),
1750
+ images: z
1751
+ .array(Response.ImageSchema)
1752
+ .optional()
1753
+ .describe("The images generated by the assistant, if any."),
1754
+ })
1755
+ .describe("A message generated by the assistant.");
1756
+ Unary.ChoiceSchema = z
1757
+ .object({
1758
+ message: Unary.MessageSchema,
1759
+ finish_reason: Response.FinishReasonSchema,
1760
+ index: z
1761
+ .uint32()
1762
+ .describe(Streaming.ChoiceSchema.shape.index.description),
1763
+ logprobs: Response.LogprobsSchema.nullable(),
1764
+ })
1765
+ .describe("A choice in a unary chat completion response.");
1766
+ Unary.ChatCompletionSchema = z
1767
+ .object({
1768
+ id: z
1769
+ .string()
1770
+ .describe("The unique identifier of the chat completion."),
1771
+ upstream_id: z
1772
+ .string()
1773
+ .describe("The unique identifier of the upstream chat completion."),
1774
+ choices: z
1775
+ .array(Unary.ChoiceSchema)
1776
+ .describe("The list of choices in this chat completion."),
1777
+ created: z
1778
+ .uint32()
1779
+ .describe("The Unix timestamp (in seconds) when the chat completion was created."),
1780
+ model: z
1781
+ .string()
1782
+ .describe("The unique identifier of the Ensemble LLM used for this chat completion."),
1783
+ upstream_model: z
1784
+ .string()
1785
+ .describe("The upstream model used for this chat completion."),
1786
+ object: z.literal("chat.completion"),
1787
+ service_tier: z.string().optional(),
1788
+ system_fingerprint: z.string().optional(),
1789
+ usage: Response.UsageSchema,
1790
+ provider: z
1791
+ .string()
1792
+ .optional()
1793
+ .describe("The provider used for this chat completion."),
1794
+ })
1795
+ .describe("A unary chat completion response.");
1796
+ })(Unary = Response.Unary || (Response.Unary = {}));
331
1797
  })(Response = Completions.Response || (Completions.Response = {}));
332
- async function list(openai, listOptions, options) {
333
- const response = await openai.chat.completions.list(Object.assign({ query: listOptions }, options));
334
- return response;
335
- }
336
- Completions.list = list;
337
- async function publish(openai, id, options) {
338
- await openai.post(`/chat/completions/${id}/publish`, options);
339
- }
340
- Completions.publish = publish;
341
- async function retrieve(openai, id, options) {
342
- const response = await openai.chat.completions.retrieve(id, options);
343
- return response;
344
- }
345
- Completions.retrieve = retrieve;
346
1798
  async function create(openai, body, options) {
347
1799
  var _a;
348
1800
  const response = await openai.post("/chat/completions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
@@ -351,68 +1803,169 @@ export var Chat;
351
1803
  Completions.create = create;
352
1804
  })(Completions = Chat.Completions || (Chat.Completions = {}));
353
1805
  })(Chat || (Chat = {}));
354
- export var Score;
355
- (function (Score) {
1806
+ // Vector Completions
1807
+ export var Vector;
1808
+ (function (Vector) {
356
1809
  let Completions;
357
1810
  (function (Completions) {
1811
+ let Request;
1812
+ (function (Request) {
1813
+ Request.EnsembleSchema = z
1814
+ .union([z.string(), EnsembleBaseSchema])
1815
+ .describe("The Ensemble to use for this completion. May be a unique ID or an inline definition.");
1816
+ Request.ProfileSchema = z
1817
+ .array(z.number())
1818
+ .describe('The profile to use for the completion. Must be of the same length as the Ensemble\'s "LLMs" field, ignoring count.');
1819
+ Request.VectorCompletionCreateParamsBaseSchema = z
1820
+ .object({
1821
+ retry: z
1822
+ .string()
1823
+ .optional()
1824
+ .nullable()
1825
+ .describe("The unique ID of a previous incomplete or failed completion."),
1826
+ messages: MessagesSchema,
1827
+ provider: Chat.Completions.Request.ProviderSchema.optional().nullable(),
1828
+ ensemble: Request.EnsembleSchema,
1829
+ profile: Request.ProfileSchema,
1830
+ seed: Chat.Completions.Request.SeedSchema.optional().nullable(),
1831
+ tools: ToolsSchema.optional()
1832
+ .nullable()
1833
+ .describe(`${ToolsSchema.description} These are readonly and will only be useful for explaining prior tool calls or otherwise influencing behavior.`),
1834
+ responses: VectorResponsesSchema,
1835
+ backoff_max_elapsed_time: Chat.Completions.Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
1836
+ first_chunk_timeout: Chat.Completions.Request.FirstChunkTimeoutSchema.optional().nullable(),
1837
+ other_chunk_timeout: Chat.Completions.Request.OtherChunkTimeoutSchema.optional().nullable(),
1838
+ })
1839
+ .describe("Base parameters for creating a vector completion.");
1840
+ Request.VectorCompletionCreateParamsStreamingSchema = Request.VectorCompletionCreateParamsBaseSchema.extend({
1841
+ stream: Chat.Completions.Request.StreamTrueSchema,
1842
+ })
1843
+ .describe("Parameters for creating a streaming vector completion.")
1844
+ .meta({ title: "VectorCompletionCreateParamsStreaming" });
1845
+ Request.VectorCompletionCreateParamsNonStreamingSchema = Request.VectorCompletionCreateParamsBaseSchema.extend({
1846
+ stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
1847
+ })
1848
+ .describe("Parameters for creating a unary vector completion.")
1849
+ .meta({ title: "VectorCompletionCreateParamsNonStreaming" });
1850
+ Request.VectorCompletionCreateParamsSchema = z
1851
+ .union([
1852
+ Request.VectorCompletionCreateParamsStreamingSchema,
1853
+ Request.VectorCompletionCreateParamsNonStreamingSchema,
1854
+ ])
1855
+ .describe("Parameters for creating a vector completion.")
1856
+ .meta({ title: "VectorCompletionCreateParams" });
1857
+ })(Request = Completions.Request || (Completions.Request = {}));
358
1858
  let Response;
359
1859
  (function (Response) {
1860
+ let Vote;
1861
+ (function (Vote) {
1862
+ function mergedList(a, b) {
1863
+ let merged = undefined;
1864
+ for (const vote of b) {
1865
+ const existingIndex = a.findIndex(({ flat_ensemble_index }) => flat_ensemble_index === vote.flat_ensemble_index);
1866
+ if (existingIndex === -1) {
1867
+ if (merged === undefined) {
1868
+ merged = [...a, vote];
1869
+ }
1870
+ else {
1871
+ merged.push(vote);
1872
+ }
1873
+ }
1874
+ }
1875
+ return merged ? [merged, true] : [a, false];
1876
+ }
1877
+ Vote.mergedList = mergedList;
1878
+ })(Vote = Response.Vote || (Response.Vote = {}));
1879
+ Response.VoteSchema = z
1880
+ .object({
1881
+ model: z
1882
+ .string()
1883
+ .describe("The unique identifier of the Ensemble LLM which generated this vote."),
1884
+ ensemble_index: z
1885
+ .uint32()
1886
+ .describe("The index of the Ensemble LLM in the Ensemble."),
1887
+ flat_ensemble_index: z
1888
+ .uint32()
1889
+ .describe("The flat index of the Ensemble LLM in the expanded Ensemble, accounting for counts."),
1890
+ vote: z
1891
+ .array(z.number())
1892
+ .describe("The vote generated by this Ensemble LLM. It is of the same length of the number of responses provided in the request. If the Ensemble LLM used logprobs, may be a probability distribution; otherwise, one of the responses will have a value of 1 and the rest 0."),
1893
+ weight: z.number().describe("The weight assigned to this vote."),
1894
+ retry: z
1895
+ .boolean()
1896
+ .optional()
1897
+ .describe("Whether this vote came from a previous Vector Completion which was retried."),
1898
+ })
1899
+ .describe("A vote from an Ensemble LLM within a Vector Completion.");
1900
+ Response.VotesSchema = z
1901
+ .array(Response.VoteSchema)
1902
+ .describe("The list of votes for responses in the request from the Ensemble LLMs within the provided Ensemble.");
1903
+ let Scores;
1904
+ (function (Scores) {
1905
+ function merged(a, b) {
1906
+ if (a.length === b.length) {
1907
+ for (let i = 0; i < a.length; i++) {
1908
+ if (a[i] !== b[i]) {
1909
+ return [b, true];
1910
+ }
1911
+ }
1912
+ return [a, false];
1913
+ }
1914
+ else {
1915
+ return [b, true];
1916
+ }
1917
+ }
1918
+ Scores.merged = merged;
1919
+ })(Scores = Response.Scores || (Response.Scores = {}));
1920
+ Response.ScoresSchema = z
1921
+ .array(z.number())
1922
+ .describe("The scores for each response in the request, aggregated from the votes of the Ensemble LLMs.");
1923
+ let Weights;
1924
+ (function (Weights) {
1925
+ function merged(a, b) {
1926
+ return Scores.merged(a, b);
1927
+ }
1928
+ Weights.merged = merged;
1929
+ })(Weights = Response.Weights || (Response.Weights = {}));
1930
+ Response.WeightsSchema = z
1931
+ .array(z.number())
1932
+ .describe("The weights assigned to each response in the request, aggregated from the votes of the Ensemble LLMs.");
1933
+ Response.EnsembleSchema = z
1934
+ .string()
1935
+ .describe("The unique identifier of the Ensemble used for this vector completion.");
1936
+ Response.UsageSchema = z
1937
+ .object({
1938
+ completion_tokens: z
1939
+ .uint32()
1940
+ .describe("The number of tokens generated in the completion."),
1941
+ prompt_tokens: z
1942
+ .uint32()
1943
+ .describe("The number of tokens in the prompt."),
1944
+ total_tokens: z
1945
+ .uint32()
1946
+ .describe("The total number of tokens used in the prompt or generated in the completion."),
1947
+ completion_tokens_details: Chat.Completions.Response.Usage.CompletionTokensDetailsSchema.optional(),
1948
+ prompt_tokens_details: Chat.Completions.Response.Usage.PromptTokensDetailsSchema.optional(),
1949
+ cost: z
1950
+ .number()
1951
+ .describe("The cost in credits incurred for this completion."),
1952
+ cost_details: Chat.Completions.Response.Usage.CostDetailsSchema.optional(),
1953
+ total_cost: z
1954
+ .number()
1955
+ .describe("The total cost in credits incurred including upstream costs."),
1956
+ })
1957
+ .describe("Token and cost usage statistics for the completion.");
360
1958
  let Streaming;
361
1959
  (function (Streaming) {
362
1960
  let ChatCompletionChunk;
363
1961
  (function (ChatCompletionChunk) {
364
1962
  function merged(a, b) {
365
- const id = a.id;
366
- const [choices, choicesChanged] = Choice.mergedList(a.choices, b.choices);
367
- const created = a.created;
368
- const model = a.model;
369
- const object = a.object;
370
- const [usage, usageChanged] = merge(a.usage, b.usage, Chat.Completions.Response.Usage.merged);
371
- const [weight_data, weight_dataChanged] = merge(a.weight_data, b.weight_data);
372
- if (choicesChanged || usageChanged || weight_dataChanged) {
373
- return [
374
- Object.assign(Object.assign({ id,
375
- choices,
376
- created,
377
- model,
378
- object }, (usage !== undefined ? { usage } : {})), (weight_data !== undefined ? { weight_data } : {})),
379
- true,
380
- ];
381
- }
382
- else {
383
- return [a, false];
384
- }
385
- }
386
- ChatCompletionChunk.merged = merged;
387
- })(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
388
- let Choice;
389
- (function (Choice) {
390
- function merged(a, b) {
391
- const [delta, deltaChanged] = merge(a.delta, b.delta, Delta.merged);
392
- const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
393
1963
  const index = a.index;
394
- const [logprobs, logprobsChanged] = merge(a.logprobs, b.logprobs, Chat.Completions.Response.Logprobs.merged);
395
- const [weight, weightChanged] = merge(a.weight, b.weight);
396
- const [confidence, confidenceChanged] = merge(a.confidence, b.confidence);
1964
+ const [base, baseChanged] = Chat.Completions.Response.Streaming.ChatCompletionChunk.merged(a, b);
397
1965
  const [error, errorChanged] = merge(a.error, b.error);
398
- const [model, modelChanged] = merge(a.model, b.model);
399
- const [model_index, model_indexChanged] = merge(a.model_index, b.model_index);
400
- const [completion_metadata, completion_metadataChanged] = merge(a.completion_metadata, b.completion_metadata, CompletionMetadata.merged);
401
- if (deltaChanged ||
402
- finish_reasonChanged ||
403
- logprobsChanged ||
404
- weightChanged ||
405
- confidenceChanged ||
406
- errorChanged ||
407
- modelChanged ||
408
- model_indexChanged ||
409
- completion_metadataChanged) {
1966
+ if (baseChanged || errorChanged) {
410
1967
  return [
411
- Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({ delta,
412
- finish_reason,
413
- index }, (logprobs !== undefined ? { logprobs } : {})), (weight !== undefined ? { weight } : {})), (confidence !== undefined ? { confidence } : {})), (error !== undefined ? { error } : {})), (model !== undefined ? { model } : {})), (model_index !== undefined ? { model_index } : {})), (completion_metadata !== undefined
414
- ? { completion_metadata }
415
- : {})),
1968
+ Object.assign(Object.assign({ index }, base), (error !== undefined ? { error } : {})),
416
1969
  true,
417
1970
  ];
418
1971
  }
@@ -420,52 +1973,65 @@ export var Score;
420
1973
  return [a, false];
421
1974
  }
422
1975
  }
423
- Choice.merged = merged;
1976
+ ChatCompletionChunk.merged = merged;
424
1977
  function mergedList(a, b) {
425
1978
  let merged = undefined;
426
- for (const choice of b) {
427
- const existingIndex = a.findIndex(({ index }) => index === choice.index);
1979
+ for (const chunk of b) {
1980
+ const existingIndex = a.findIndex(({ index }) => index === chunk.index);
428
1981
  if (existingIndex === -1) {
429
1982
  if (merged === undefined) {
430
- merged = [...a, choice];
1983
+ merged = [...a, chunk];
431
1984
  }
432
1985
  else {
433
- merged.push(choice);
1986
+ merged.push(chunk);
434
1987
  }
435
1988
  }
436
1989
  else {
437
- const [mergedChoice, choiceChanged] = Choice.merged(a[existingIndex], choice);
438
- if (choiceChanged) {
1990
+ const [mergedChunk, chunkChanged] = ChatCompletionChunk.merged(a[existingIndex], chunk);
1991
+ if (chunkChanged) {
439
1992
  if (merged === undefined) {
440
1993
  merged = [...a];
441
1994
  }
442
- merged[existingIndex] = mergedChoice;
1995
+ merged[existingIndex] = mergedChunk;
443
1996
  }
444
1997
  }
445
1998
  }
446
1999
  return merged ? [merged, true] : [a, false];
447
2000
  }
448
- Choice.mergedList = mergedList;
449
- })(Choice = Streaming.Choice || (Streaming.Choice = {}));
450
- let Delta;
451
- (function (Delta) {
2001
+ ChatCompletionChunk.mergedList = mergedList;
2002
+ })(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
2003
+ Streaming.ChatCompletionChunkSchema = Chat.Completions.Response.Streaming.ChatCompletionChunkSchema.extend({
2004
+ index: z
2005
+ .uint32()
2006
+ .describe("The index of the completion amongst all chat completions."),
2007
+ error: ObjectiveAIErrorSchema.optional().describe("An error encountered during the generation of this chat completion."),
2008
+ }).describe("A chat completion chunk generated in the pursuit of a vector completion.");
2009
+ let VectorCompletionChunk;
2010
+ (function (VectorCompletionChunk) {
452
2011
  function merged(a, b) {
453
- const [content, contentChanged] = merge(a.content, b.content, mergedString);
454
- const [refusal, refusalChanged] = merge(a.refusal, b.refusal, mergedString);
455
- const [role, roleChanged] = merge(a.role, b.role);
456
- const [tool_calls, tool_callsChanged] = merge(a.tool_calls, b.tool_calls, Chat.Completions.Response.Streaming.ToolCall.mergedList);
457
- const [reasoning, reasoningChanged] = merge(a.reasoning, b.reasoning, mergedString);
458
- const [images, imagesChanged] = merge(a.images, b.images, Chat.Completions.Response.Image.mergedList);
459
- const [vote, voteChanged] = merge(a.vote, b.vote);
460
- if (contentChanged ||
461
- reasoningChanged ||
462
- refusalChanged ||
463
- roleChanged ||
464
- tool_callsChanged ||
465
- imagesChanged ||
466
- voteChanged) {
2012
+ const id = a.id;
2013
+ const [completions, completionsChanged] = ChatCompletionChunk.mergedList(a.completions, b.completions);
2014
+ const [votes, votesChanged] = Vote.mergedList(a.votes, b.votes);
2015
+ const [scores, scoresChanged] = Scores.merged(a.scores, b.scores);
2016
+ const [weights, weightsChanged] = Weights.merged(a.weights, b.weights);
2017
+ const created = a.created;
2018
+ const ensemble = a.ensemble;
2019
+ const object = a.object;
2020
+ const [usage, usageChanged] = merge(a.usage, b.usage);
2021
+ if (completionsChanged ||
2022
+ votesChanged ||
2023
+ scoresChanged ||
2024
+ weightsChanged ||
2025
+ usageChanged) {
467
2026
  return [
468
- Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({}, (content !== undefined ? { content } : {})), (reasoning !== undefined ? { reasoning } : {})), (refusal !== undefined ? { refusal } : {})), (role !== undefined ? { role } : {})), (tool_calls !== undefined ? { tool_calls } : {})), (images !== undefined ? { images } : {})), (vote !== undefined ? { vote } : {})),
2027
+ Object.assign({ id,
2028
+ completions,
2029
+ votes,
2030
+ scores,
2031
+ weights,
2032
+ created,
2033
+ ensemble,
2034
+ object }, (usage !== undefined ? { usage } : {})),
469
2035
  true,
470
2036
  ];
471
2037
  }
@@ -473,91 +2039,846 @@ export var Score;
473
2039
  return [a, false];
474
2040
  }
475
2041
  }
476
- Delta.merged = merged;
477
- })(Delta = Streaming.Delta || (Streaming.Delta = {}));
2042
+ VectorCompletionChunk.merged = merged;
2043
+ })(VectorCompletionChunk = Streaming.VectorCompletionChunk || (Streaming.VectorCompletionChunk = {}));
2044
+ Streaming.VectorCompletionChunkSchema = z
2045
+ .object({
2046
+ id: z
2047
+ .string()
2048
+ .describe("The unique identifier of the vector completion."),
2049
+ completions: z
2050
+ .array(Streaming.ChatCompletionChunkSchema)
2051
+ .describe("The list of chat completion chunks created for this vector completion."),
2052
+ votes: Response.VotesSchema,
2053
+ scores: Response.ScoresSchema,
2054
+ weights: Response.WeightsSchema,
2055
+ created: z
2056
+ .uint32()
2057
+ .describe("The Unix timestamp (in seconds) when the vector completion was created."),
2058
+ ensemble: Response.EnsembleSchema,
2059
+ object: z.literal("vector.completion.chunk"),
2060
+ usage: Response.UsageSchema.optional(),
2061
+ })
2062
+ .describe("A chunk in a streaming vector completion response.");
478
2063
  })(Streaming = Response.Streaming || (Response.Streaming = {}));
479
- let CompletionMetadata;
480
- (function (CompletionMetadata) {
481
- function merged(a, b) {
482
- const id = a.id;
483
- const created = a.created;
484
- const model = a.model;
485
- const [service_tier, service_tierChanged] = merge(a.service_tier, b.service_tier);
486
- const [system_fingerprint, system_fingerprintChanged] = merge(a.system_fingerprint, b.system_fingerprint);
487
- const [usage, usageChanged] = merge(a.usage, b.usage, Chat.Completions.Response.Usage.merged);
488
- if (service_tierChanged ||
489
- system_fingerprintChanged ||
490
- usageChanged) {
491
- return [
492
- Object.assign(Object.assign(Object.assign({ id,
493
- created,
494
- model }, (service_tier !== undefined ? { service_tier } : {})), (system_fingerprint !== undefined
495
- ? { system_fingerprint }
496
- : {})), (usage !== undefined ? { usage } : {})),
497
- true,
498
- ];
499
- }
500
- else {
501
- return [a, false];
502
- }
503
- }
504
- CompletionMetadata.merged = merged;
505
- })(CompletionMetadata = Response.CompletionMetadata || (Response.CompletionMetadata = {}));
2064
+ let Unary;
2065
+ (function (Unary) {
2066
+ Unary.ChatCompletionSchema = Chat.Completions.Response.Unary.ChatCompletionSchema.extend({
2067
+ index: z
2068
+ .uint32()
2069
+ .describe("The index of the completion amongst all chat completions."),
2070
+ error: ObjectiveAIErrorSchema.optional().describe("An error encountered during the generation of this chat completion."),
2071
+ }).describe("A chat completion generated in the pursuit of a vector completion.");
2072
+ Unary.VectorCompletionSchema = z
2073
+ .object({
2074
+ id: z
2075
+ .string()
2076
+ .describe("The unique identifier of the vector completion."),
2077
+ completions: z
2078
+ .array(Unary.ChatCompletionSchema)
2079
+ .describe("The list of chat completions created for this vector completion."),
2080
+ votes: Response.VotesSchema,
2081
+ scores: Response.ScoresSchema,
2082
+ weights: Response.WeightsSchema,
2083
+ created: z
2084
+ .uint32()
2085
+ .describe("The Unix timestamp (in seconds) when the vector completion was created."),
2086
+ ensemble: Response.EnsembleSchema,
2087
+ object: z.literal("vector.completion"),
2088
+ usage: Response.UsageSchema,
2089
+ })
2090
+ .describe("A unary vector completion response.");
2091
+ })(Unary = Response.Unary || (Response.Unary = {}));
506
2092
  })(Response = Completions.Response || (Completions.Response = {}));
507
- async function list(openai, listOptions, options) {
508
- const response = await openai.get("/score/completions", Object.assign({ query: listOptions }, options));
509
- return response;
510
- }
511
- Completions.list = list;
512
- async function publish(openai, id, options) {
513
- await openai.post(`/score/completions/${id}/publish`, options);
514
- }
515
- Completions.publish = publish;
516
- async function retrieve(openai, id, options) {
517
- const response = await openai.get(`/score/completions/${id}`, options);
518
- return response;
519
- }
520
- Completions.retrieve = retrieve;
521
- async function trainingTableAdd(openai, id, correctVote, options) {
522
- await openai.post(`/score/completions/${id}/training_table`, Object.assign({ body: { correct_vote: correctVote } }, options));
523
- }
524
- Completions.trainingTableAdd = trainingTableAdd;
525
- async function trainingTableDelete(openai, id, options) {
526
- await openai.delete(`/score/completions/${id}/training_table`, options);
527
- }
528
- Completions.trainingTableDelete = trainingTableDelete;
529
2093
  async function create(openai, body, options) {
530
2094
  var _a;
531
- const response = await openai.post("/score/completions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
2095
+ const response = await openai.post("/vector/completions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
532
2096
  return response;
533
2097
  }
534
2098
  Completions.create = create;
535
- })(Completions = Score.Completions || (Score.Completions = {}));
536
- })(Score || (Score = {}));
537
- export var Multichat;
538
- (function (Multichat) {
539
- let Completions;
540
- (function (Completions) {
2099
+ })(Completions = Vector.Completions || (Vector.Completions = {}));
2100
+ })(Vector || (Vector = {}));
2101
+ // Function
2102
+ export var Function;
2103
+ (function (Function_1) {
2104
+ Function_1.VectorCompletionProfileSchema = z
2105
+ .object({
2106
+ ensemble: Vector.Completions.Request.EnsembleSchema,
2107
+ profile: Vector.Completions.Request.ProfileSchema,
2108
+ })
2109
+ .describe("A vector completion profile containing an Ensemble and array of weights.");
2110
+ Function_1.FunctionProfileVersionRequiredSchema = z
2111
+ .union([
2112
+ z.object({
2113
+ function_author: z
2114
+ .string()
2115
+ .describe("The author of the function the profile was published to."),
2116
+ function_id: z
2117
+ .string()
2118
+ .describe("The unique identifier of the function the profile was published to."),
2119
+ author: z.string().describe("The author of the profile."),
2120
+ id: z.string().describe("The unique identifier of the profile."),
2121
+ version: z.uint32().describe("The version of the profile."),
2122
+ }),
2123
+ z.lazy(() => z.array(Function_1.ProfileVersionRequiredSchema).meta({
2124
+ title: "ProfileVersionRequiredArray",
2125
+ recursive: true,
2126
+ })),
2127
+ ])
2128
+ .describe("A function profile where remote profiles must specify a version.");
2129
+ Function_1.FunctionProfileVersionOptionalSchema = z
2130
+ .union([
2131
+ z.object({
2132
+ function_author: z
2133
+ .string()
2134
+ .describe("The author of the function the profile was published to."),
2135
+ function_id: z
2136
+ .string()
2137
+ .describe("The unique identifier of the function the profile was published to."),
2138
+ author: z.string().describe("The author of the profile."),
2139
+ id: z.string().describe("The unique identifier of the profile."),
2140
+ version: z
2141
+ .uint32()
2142
+ .optional()
2143
+ .nullable()
2144
+ .describe("The version of the profile."),
2145
+ }),
2146
+ z
2147
+ .lazy(() => z.array(Function_1.ProfileVersionOptionalSchema))
2148
+ .meta({
2149
+ title: "ProfileVersionOptionalArray",
2150
+ recursive: true,
2151
+ }),
2152
+ ])
2153
+ .describe("A function profile where remote profiles may omit a version.");
2154
+ Function_1.ProfileVersionRequiredSchema = z
2155
+ .union([
2156
+ Function_1.FunctionProfileVersionRequiredSchema,
2157
+ Function_1.VectorCompletionProfileSchema,
2158
+ ])
2159
+ .describe("A profile where remote function profiles must specify a version.");
2160
+ Function_1.ProfileVersionOptionalSchema = z
2161
+ .union([
2162
+ Function_1.FunctionProfileVersionOptionalSchema,
2163
+ Function_1.VectorCompletionProfileSchema,
2164
+ ])
2165
+ .describe("A profile where remote function profiles may omit a version.");
2166
+ Function_1.InputSchemaSchema = z.lazy(() => z
2167
+ .union([
2168
+ InputSchema.ObjectSchema,
2169
+ InputSchema.ArraySchema,
2170
+ InputSchema.StringSchema,
2171
+ InputSchema.NumberSchema,
2172
+ InputSchema.IntegerSchema,
2173
+ InputSchema.BooleanSchema,
2174
+ InputSchema.ImageSchema,
2175
+ InputSchema.AudioSchema,
2176
+ InputSchema.VideoSchema,
2177
+ InputSchema.FileSchema,
2178
+ ])
2179
+ .describe("An input schema defining the structure of function inputs."));
2180
+ let InputSchema;
2181
+ (function (InputSchema) {
2182
+ InputSchema.ObjectSchema = z
2183
+ .object({
2184
+ type: z.literal("object"),
2185
+ description: z
2186
+ .string()
2187
+ .optional()
2188
+ .nullable()
2189
+ .describe("The description of the object input."),
2190
+ properties: z
2191
+ .record(z.string(), Function_1.InputSchemaSchema.meta({
2192
+ title: "InputSchema",
2193
+ recursive: true,
2194
+ }))
2195
+ .describe("The properties of the object input."),
2196
+ required: z
2197
+ .array(z.string())
2198
+ .optional()
2199
+ .nullable()
2200
+ .describe("The required properties of the object input."),
2201
+ })
2202
+ .describe("An object input schema.");
2203
+ InputSchema.ArraySchema = z
2204
+ .object({
2205
+ type: z.literal("array"),
2206
+ description: z
2207
+ .string()
2208
+ .optional()
2209
+ .nullable()
2210
+ .describe("The description of the array input."),
2211
+ minItems: z
2212
+ .uint32()
2213
+ .optional()
2214
+ .nullable()
2215
+ .describe("The minimum number of items in the array input."),
2216
+ maxItems: z
2217
+ .uint32()
2218
+ .optional()
2219
+ .nullable()
2220
+ .describe("The maximum number of items in the array input."),
2221
+ items: Function_1.InputSchemaSchema.describe("The schema of the items in the array input.").meta({
2222
+ title: "InputSchema",
2223
+ recursive: true,
2224
+ }),
2225
+ })
2226
+ .describe("An array input schema.");
2227
+ InputSchema.StringSchema = z
2228
+ .object({
2229
+ type: z.literal("string"),
2230
+ description: z
2231
+ .string()
2232
+ .optional()
2233
+ .nullable()
2234
+ .describe("The description of the string input."),
2235
+ enum: z
2236
+ .array(z.string())
2237
+ .optional()
2238
+ .nullable()
2239
+ .describe("The enumeration of allowed string values."),
2240
+ })
2241
+ .describe("A string input schema.");
2242
+ InputSchema.NumberSchema = z
2243
+ .object({
2244
+ type: z.literal("number"),
2245
+ description: z
2246
+ .string()
2247
+ .optional()
2248
+ .nullable()
2249
+ .describe("The description of the number input."),
2250
+ minimum: z
2251
+ .number()
2252
+ .optional()
2253
+ .nullable()
2254
+ .describe("The minimum allowed value for the number input."),
2255
+ maximum: z
2256
+ .number()
2257
+ .optional()
2258
+ .nullable()
2259
+ .describe("The maximum allowed value for the number input."),
2260
+ })
2261
+ .describe("A number input schema.");
2262
+ InputSchema.IntegerSchema = z
2263
+ .object({
2264
+ type: z.literal("integer"),
2265
+ description: z
2266
+ .string()
2267
+ .optional()
2268
+ .nullable()
2269
+ .describe("The description of the integer input."),
2270
+ minimum: z
2271
+ .uint32()
2272
+ .optional()
2273
+ .nullable()
2274
+ .describe("The minimum allowed value for the integer input."),
2275
+ maximum: z
2276
+ .uint32()
2277
+ .optional()
2278
+ .nullable()
2279
+ .describe("The maximum allowed value for the integer input."),
2280
+ })
2281
+ .describe("An integer input schema.");
2282
+ InputSchema.BooleanSchema = z
2283
+ .object({
2284
+ type: z.literal("boolean"),
2285
+ description: z
2286
+ .string()
2287
+ .optional()
2288
+ .nullable()
2289
+ .describe("The description of the boolean input."),
2290
+ })
2291
+ .describe("A boolean input schema.");
2292
+ InputSchema.ImageSchema = z
2293
+ .object({
2294
+ type: z.literal("image"),
2295
+ description: z
2296
+ .string()
2297
+ .optional()
2298
+ .nullable()
2299
+ .describe("The description of the image input."),
2300
+ })
2301
+ .describe("An image input schema.");
2302
+ InputSchema.AudioSchema = z
2303
+ .object({
2304
+ type: z.literal("audio"),
2305
+ description: z
2306
+ .string()
2307
+ .optional()
2308
+ .nullable()
2309
+ .describe("The description of the audio input."),
2310
+ })
2311
+ .describe("An audio input schema.");
2312
+ InputSchema.VideoSchema = z
2313
+ .object({
2314
+ type: z.literal("video"),
2315
+ description: z
2316
+ .string()
2317
+ .optional()
2318
+ .nullable()
2319
+ .describe("The description of the video input."),
2320
+ })
2321
+ .describe("A video input schema.");
2322
+ InputSchema.FileSchema = z
2323
+ .object({
2324
+ type: z.literal("file"),
2325
+ description: z
2326
+ .string()
2327
+ .optional()
2328
+ .nullable()
2329
+ .describe("The description of the file input."),
2330
+ })
2331
+ .describe("A file input schema.");
2332
+ })(InputSchema = Function_1.InputSchema || (Function_1.InputSchema = {}));
2333
+ Function_1.InputSchema_ = z
2334
+ .lazy(() => z.union([
2335
+ Message.RichContent.PartSchema,
2336
+ z.record(z.string(), Function_1.InputSchema_.meta({
2337
+ title: "Input",
2338
+ recursive: true,
2339
+ })),
2340
+ z.array(Function_1.InputSchema_.meta({
2341
+ title: "Input",
2342
+ recursive: true,
2343
+ })),
2344
+ z.string(),
2345
+ z.number(),
2346
+ z.boolean(),
2347
+ ]))
2348
+ .describe("The input provided to the function.");
2349
+ Function_1.InputExpressionSchema = z.lazy(() => z
2350
+ .union([
2351
+ Message.RichContent.PartSchema,
2352
+ z.record(z.string(), Function_1.InputExpressionSchema.meta({
2353
+ title: "InputExpression",
2354
+ recursive: true,
2355
+ })),
2356
+ z.array(Function_1.InputExpressionSchema.meta({
2357
+ title: "InputExpression",
2358
+ recursive: true,
2359
+ })),
2360
+ z.string(),
2361
+ z.number(),
2362
+ z.boolean(),
2363
+ ExpressionSchema.describe("An expression which evaluates to an input."),
2364
+ ])
2365
+ .describe(Function_1.InputSchema_.description));
2366
+ Function_1.InputMapsExpressionSchema = z
2367
+ .union([
2368
+ ExpressionSchema.describe("An expression which evaluates to a 2D array of Inputs."),
2369
+ z
2370
+ .array(ExpressionSchema.describe("An expression which evaluates to a 1D array of Inputs."))
2371
+ .describe("A list of expressions which each evaluate to a 1D array of Inputs."),
2372
+ ])
2373
+ .describe("An expression or list of expressions which evaluate to a 2D array of Inputs. Each sub-array will be fed into Tasks which specify an index of this input map.");
2374
+ let TaskExpression;
2375
+ (function (TaskExpression) {
2376
+ TaskExpression.SkipSchema = ExpressionSchema.describe("An expression which evaluates to a boolean indicating whether to skip this task.");
2377
+ TaskExpression.MapSchema = z
2378
+ .uint32()
2379
+ .describe("If present, indicates that this task should be ran once for each entry in the specified input map (input map is a 2D array indexed by this value).");
2380
+ TaskExpression.ScalarFunctionSchema = z
2381
+ .object({
2382
+ type: z.literal("scalar.function"),
2383
+ author: z
2384
+ .string()
2385
+ .describe("The author of the remote published scalar function."),
2386
+ id: z
2387
+ .string()
2388
+ .describe("The unique identifier of the remote published scalar function."),
2389
+ version: z
2390
+ .uint32()
2391
+ .describe("The version of the remote published scalar function."),
2392
+ skip: TaskExpression.SkipSchema.optional().nullable(),
2393
+ map: TaskExpression.MapSchema.optional().nullable(),
2394
+ input: Function_1.InputExpressionSchema,
2395
+ })
2396
+ .describe("A remote published scalar function task.");
2397
+ TaskExpression.VectorFunctionSchema = z
2398
+ .object({
2399
+ type: z.literal("vector.function"),
2400
+ author: z
2401
+ .string()
2402
+ .describe("The author of the remote published vector function."),
2403
+ id: z
2404
+ .string()
2405
+ .describe("The unique identifier of the remote published vector function."),
2406
+ version: z
2407
+ .uint32()
2408
+ .describe("The version of the remote published vector function."),
2409
+ skip: TaskExpression.SkipSchema.optional().nullable(),
2410
+ map: TaskExpression.MapSchema.optional().nullable(),
2411
+ input: Function_1.InputExpressionSchema,
2412
+ })
2413
+ .describe("A remote published vector function task.");
2414
+ TaskExpression.VectorCompletionSchema = z
2415
+ .object({
2416
+ type: z.literal("vector.completion"),
2417
+ skip: TaskExpression.SkipSchema.optional().nullable(),
2418
+ map: TaskExpression.MapSchema.optional().nullable(),
2419
+ messages: MessagesExpressionSchema,
2420
+ tools: ToolsExpressionSchema.optional()
2421
+ .nullable()
2422
+ .describe(`${ToolsExpressionSchema.description} These are readonly and will only be useful for explaining prior tool calls or otherwise influencing behavior.`),
2423
+ responses: VectorResponsesExpressionSchema,
2424
+ })
2425
+ .describe("A vector completion task.");
2426
+ })(TaskExpression = Function_1.TaskExpression || (Function_1.TaskExpression = {}));
2427
+ Function_1.TaskExpressionSchema = z
2428
+ .discriminatedUnion("type", [
2429
+ TaskExpression.ScalarFunctionSchema,
2430
+ TaskExpression.VectorFunctionSchema,
2431
+ TaskExpression.VectorCompletionSchema,
2432
+ ])
2433
+ .describe("A task to be executed as part of the function. Will first be compiled using the parent function's input. May be skipped or mapped.");
2434
+ Function_1.TaskExpressionsSchema = z
2435
+ .array(Function_1.TaskExpressionSchema)
2436
+ .describe("The list of tasks to be executed as part of the function.");
2437
+ Function_1.ScalarSchema = z
2438
+ .object({
2439
+ type: z.literal("scalar.function"),
2440
+ author: z.string().describe("The author of the scalar function."),
2441
+ id: z.string().describe("The unique identifier of the scalar function."),
2442
+ version: z.uint32().describe("The version of the scalar function."),
2443
+ description: z
2444
+ .string()
2445
+ .describe("The description of the scalar function."),
2446
+ changelog: z
2447
+ .string()
2448
+ .optional()
2449
+ .nullable()
2450
+ .describe("When present, describes changes from the previous version or versions."),
2451
+ input_schema: Function_1.InputSchemaSchema,
2452
+ input_maps: Function_1.InputMapsExpressionSchema.optional().nullable(),
2453
+ tasks: Function_1.TaskExpressionsSchema,
2454
+ output: ExpressionSchema.describe("An expression which evaluates to a single number. This is the output of the scalar function. Will be provided with the outputs of all tasks."),
2455
+ })
2456
+ .describe("A scalar function.")
2457
+ .meta({ title: "ScalarFunction" });
2458
+ Function_1.VectorSchema = z
2459
+ .object({
2460
+ type: z.literal("vector.function"),
2461
+ author: z.string().describe("The author of the vector function."),
2462
+ id: z.string().describe("The unique identifier of the vector function."),
2463
+ version: z.uint32().describe("The version of the vector function."),
2464
+ description: z
2465
+ .string()
2466
+ .describe("The description of the vector function."),
2467
+ changelog: z
2468
+ .string()
2469
+ .optional()
2470
+ .nullable()
2471
+ .describe("When present, describes changes from the previous version or versions."),
2472
+ input_schema: Function_1.InputSchemaSchema,
2473
+ input_maps: Function_1.InputMapsExpressionSchema.optional().nullable(),
2474
+ tasks: Function_1.TaskExpressionsSchema,
2475
+ output: ExpressionSchema.describe("An expressions which evaluates to an array of numbers. This is the output of the vector function. Will be provided with the outputs of all tasks."),
2476
+ output_length: z
2477
+ .union([
2478
+ z.uint32().describe("The fixed length of the output vector."),
2479
+ ExpressionSchema.describe("An expression which evaluates to the length of the output vector. Will only be provided with the function input. The output length must be determinable from the input alone."),
2480
+ ])
2481
+ .describe("The length of the output vector."),
2482
+ })
2483
+ .describe("A vector function.")
2484
+ .meta({ title: "VectorFunction" });
2485
+ let Executions;
2486
+ (function (Executions) {
2487
+ let Request;
2488
+ (function (Request) {
2489
+ Request.FunctionExecutionParamsBaseSchema = z
2490
+ .object({
2491
+ retry_token: z
2492
+ .string()
2493
+ .optional()
2494
+ .nullable()
2495
+ .describe("The retry token provided by a previous incomplete or failed function execution."),
2496
+ input: Function_1.InputSchema_,
2497
+ provider: Chat.Completions.Request.ProviderSchema.optional().nullable(),
2498
+ seed: Chat.Completions.Request.SeedSchema.optional().nullable(),
2499
+ backoff_max_elapsed_time: Chat.Completions.Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
2500
+ first_chunk_timeout: Chat.Completions.Request.FirstChunkTimeoutSchema.optional().nullable(),
2501
+ other_chunk_timeout: Chat.Completions.Request.OtherChunkTimeoutSchema.optional().nullable(),
2502
+ })
2503
+ .describe("Base parameters for executing a function.");
2504
+ // Execute Inline Function
2505
+ Request.FunctionExecutionParamsExecuteInlineBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
2506
+ function: z.lazy(() => FunctionSchema),
2507
+ profile: Function_1.FunctionProfileVersionOptionalSchema,
2508
+ }).describe("Base parameters for executing an inline function.");
2509
+ Request.FunctionExecutionParamsExecuteInlineStreamingSchema = Request.FunctionExecutionParamsExecuteInlineBaseSchema.extend({
2510
+ stream: Chat.Completions.Request.StreamTrueSchema,
2511
+ })
2512
+ .describe("Parameters for executing an inline function and streaming the response.")
2513
+ .meta({ title: "FunctionExecutionParamsExecuteInlineStreaming" });
2514
+ Request.FunctionExecutionParamsExecuteInlineNonStreamingSchema = Request.FunctionExecutionParamsExecuteInlineBaseSchema.extend({
2515
+ stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
2516
+ })
2517
+ .describe("Parameters for executing an inline function with a unary response.")
2518
+ .meta({ title: "FunctionExecutionParamsExecuteInlineNonStreaming" });
2519
+ Request.FunctionExecutionParamsExecuteInlineSchema = z
2520
+ .union([
2521
+ Request.FunctionExecutionParamsExecuteInlineStreamingSchema,
2522
+ Request.FunctionExecutionParamsExecuteInlineNonStreamingSchema,
2523
+ ])
2524
+ .describe("Parameters for executing an inline function.")
2525
+ .meta({ title: "FunctionExecutionParamsExecuteInline" });
2526
+ // Execute Published Function
2527
+ Request.FunctionExecutionParamsExecuteBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
2528
+ profile: Function_1.FunctionProfileVersionOptionalSchema.optional().nullable(),
2529
+ }).describe("Base parameters for executing a remote published function.");
2530
+ Request.FunctionExecutionParamsExecuteStreamingSchema = Request.FunctionExecutionParamsExecuteBaseSchema.extend({
2531
+ stream: Chat.Completions.Request.StreamTrueSchema,
2532
+ })
2533
+ .describe("Parameters for executing a remote published function and streaming the response.")
2534
+ .meta({ title: "FunctionExecutionParamsExecuteStreaming" });
2535
+ Request.FunctionExecutionParamsExecuteNonStreamingSchema = Request.FunctionExecutionParamsExecuteBaseSchema.extend({
2536
+ stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
2537
+ })
2538
+ .describe("Parameters for executing a remote published function with a unary response.")
2539
+ .meta({ title: "FunctionExecutionParamsExecuteNonStreaming" });
2540
+ Request.FunctionExecutionParamsExecuteSchema = z
2541
+ .union([
2542
+ Request.FunctionExecutionParamsExecuteStreamingSchema,
2543
+ Request.FunctionExecutionParamsExecuteNonStreamingSchema,
2544
+ ])
2545
+ .describe("Parameters for executing a remote published function.")
2546
+ .meta({ title: "FunctionExecutionParamsExecute" });
2547
+ // Publish Scalar Function
2548
+ Request.FunctionExecutionParamsPublishScalarFunctionBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
2549
+ function: Function_1.ScalarSchema,
2550
+ publish_function: z
2551
+ .object({
2552
+ description: z
2553
+ .string()
2554
+ .describe("The description of the published scalar function."),
2555
+ changelog: z
2556
+ .string()
2557
+ .optional()
2558
+ .nullable()
2559
+ .describe("When present, describes changes from the previous version or versions."),
2560
+ input_schema: Function_1.InputSchemaSchema,
2561
+ })
2562
+ .describe("Details about the scalar function to be published."),
2563
+ profile: Function_1.FunctionProfileVersionRequiredSchema,
2564
+ publish_profile: z
2565
+ .object({
2566
+ id: z
2567
+ .literal("default")
2568
+ .describe('The identifier of the profile to publish. Must be "default" when publishing a function.'),
2569
+ version: z
2570
+ .uint32()
2571
+ .describe("The version of the profile to publish. Must match the function's version."),
2572
+ description: z
2573
+ .string()
2574
+ .describe("The description of the published profile."),
2575
+ changelog: z
2576
+ .string()
2577
+ .optional()
2578
+ .nullable()
2579
+ .describe("When present, describes changes from the previous version or versions."),
2580
+ })
2581
+ .describe("Details about the profile to be published."),
2582
+ }).describe("Base parameters for executing and publishing an inline scalar function.");
2583
+ Request.FunctionExecutionParamsPublishScalarFunctionStreamingSchema = Request.FunctionExecutionParamsPublishScalarFunctionBaseSchema.extend({
2584
+ stream: Chat.Completions.Request.StreamTrueSchema,
2585
+ })
2586
+ .describe("Parameters for executing and publishing an inline scalar function and streaming the response.")
2587
+ .meta({
2588
+ title: "FunctionExecutionParamsPublishScalarFunctionStreaming",
2589
+ });
2590
+ Request.FunctionExecutionParamsPublishScalarFunctionNonStreamingSchema = Request.FunctionExecutionParamsPublishScalarFunctionBaseSchema.extend({
2591
+ stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
2592
+ })
2593
+ .describe("Parameters for executing and publishing an inline scalar function with a unary response.")
2594
+ .meta({
2595
+ title: "FunctionExecutionParamsPublishScalarFunctionNonStreaming",
2596
+ });
2597
+ Request.FunctionExecutionParamsPublishScalarFunctionSchema = z
2598
+ .union([
2599
+ Request.FunctionExecutionParamsPublishScalarFunctionStreamingSchema,
2600
+ Request.FunctionExecutionParamsPublishScalarFunctionNonStreamingSchema,
2601
+ ])
2602
+ .describe("Parameters for executing and publishing an inline scalar function.")
2603
+ .meta({ title: "FunctionExecutionParamsPublishScalarFunction" });
2604
+ // Publish Vector Function
2605
+ Request.FunctionExecutionParamsPublishVectorFunctionBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
2606
+ function: Function_1.VectorSchema,
2607
+ publish_function: z
2608
+ .object({
2609
+ description: z
2610
+ .string()
2611
+ .describe("The description of the published vector function."),
2612
+ changelog: z
2613
+ .string()
2614
+ .optional()
2615
+ .nullable()
2616
+ .describe("When present, describes changes from the previous version or versions."),
2617
+ input_schema: Function_1.InputSchemaSchema,
2618
+ output_length: z
2619
+ .union([
2620
+ z.uint32().describe("The fixed length of the output vector."),
2621
+ ExpressionSchema.describe("An expression which evaluates to the length of the output vector. Will only be provided with the function input. The output length must be determinable from the input alone."),
2622
+ ])
2623
+ .describe("The length of the output vector."),
2624
+ })
2625
+ .describe("Details about the vector function to be published."),
2626
+ profile: Function_1.FunctionProfileVersionRequiredSchema,
2627
+ publish_profile: z
2628
+ .object({
2629
+ id: z
2630
+ .literal("default")
2631
+ .describe('The identifier of the profile to publish. Must be "default" when publishing a function.'),
2632
+ version: z
2633
+ .uint32()
2634
+ .describe("The version of the profile to publish. Must match the function's version."),
2635
+ description: z
2636
+ .string()
2637
+ .describe("The description of the published profile."),
2638
+ changelog: z
2639
+ .string()
2640
+ .optional()
2641
+ .nullable()
2642
+ .describe("When present, describes changes from the previous version or versions."),
2643
+ })
2644
+ .describe("Details about the profile to be published."),
2645
+ }).describe("Base parameters for executing and publishing an inline vector function.");
2646
+ Request.FunctionExecutionParamsPublishVectorFunctionStreamingSchema = Request.FunctionExecutionParamsPublishVectorFunctionBaseSchema.extend({
2647
+ stream: Chat.Completions.Request.StreamTrueSchema,
2648
+ })
2649
+ .describe("Parameters for executing and publishing an inline vector function and streaming the response.")
2650
+ .meta({
2651
+ title: "FunctionExecutionParamsPublishVectorFunctionStreaming",
2652
+ });
2653
+ Request.FunctionExecutionParamsPublishVectorFunctionNonStreamingSchema = Request.FunctionExecutionParamsPublishVectorFunctionBaseSchema.extend({
2654
+ stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
2655
+ })
2656
+ .describe("Parameters for executing and publishing an inline vector function with a unary response.")
2657
+ .meta({
2658
+ title: "FunctionExecutionParamsPublishVectorFunctionNonStreaming",
2659
+ });
2660
+ Request.FunctionExecutionParamsPublishVectorFunctionSchema = z
2661
+ .union([
2662
+ Request.FunctionExecutionParamsPublishVectorFunctionStreamingSchema,
2663
+ Request.FunctionExecutionParamsPublishVectorFunctionNonStreamingSchema,
2664
+ ])
2665
+ .describe("Parameters for executing and publishing an inline vector function.")
2666
+ .meta({ title: "FunctionExecutionParamsPublishVectorFunction" });
2667
+ // Publish Function
2668
+ Request.FunctionExecutionParamsPublishFunctionStreamingSchema = z
2669
+ .union([
2670
+ Request.FunctionExecutionParamsPublishScalarFunctionStreamingSchema,
2671
+ Request.FunctionExecutionParamsPublishVectorFunctionStreamingSchema,
2672
+ ])
2673
+ .describe("Parameters for executing and publishing an inline function and streaming the response.")
2674
+ .meta({ title: "FunctionExecutionParamsPublishFunctionStreaming" });
2675
+ Request.FunctionExecutionParamsPublishFunctionNonStreamingSchema = z
2676
+ .union([
2677
+ Request.FunctionExecutionParamsPublishScalarFunctionNonStreamingSchema,
2678
+ Request.FunctionExecutionParamsPublishVectorFunctionNonStreamingSchema,
2679
+ ])
2680
+ .describe("Parameters for executing and publishing an inline function with a unary response.")
2681
+ .meta({ title: "FunctionExecutionParamsPublishFunctionNonStreaming" });
2682
+ Request.FunctionExecutionParamsPublishFunctionSchema = z
2683
+ .union([
2684
+ Request.FunctionExecutionParamsPublishScalarFunctionSchema,
2685
+ Request.FunctionExecutionParamsPublishVectorFunctionSchema,
2686
+ ])
2687
+ .describe("Parameters for executing and publishing an inline function.")
2688
+ .meta({ title: "FunctionExecutionParamsPublishFunction" });
2689
+ // Publish Profile
2690
+ Request.FunctionExecutionParamsPublishProfileBaseSchema = Request.FunctionExecutionParamsBaseSchema.extend({
2691
+ profile: z
2692
+ .array(Function_1.ProfileVersionRequiredSchema)
2693
+ .describe("The profile to publish."),
2694
+ publish_profile: z
2695
+ .object({
2696
+ id: z
2697
+ .string()
2698
+ .describe("The unique identifier of the profile to publish."),
2699
+ version: z
2700
+ .uint32()
2701
+ .describe("The version of the profile to publish."),
2702
+ description: z
2703
+ .string()
2704
+ .describe("The description of the published profile."),
2705
+ changelog: z
2706
+ .string()
2707
+ .optional()
2708
+ .nullable()
2709
+ .describe("When present, describes changes from the previous version or versions."),
2710
+ })
2711
+ .describe("Details about the profile to be published."),
2712
+ }).describe("Base parameters for executing a remote published function and publishing a profile.");
2713
+ Request.FunctionExecutionParamsPublishProfileStreamingSchema = Request.FunctionExecutionParamsPublishProfileBaseSchema.extend({
2714
+ stream: Chat.Completions.Request.StreamTrueSchema,
2715
+ })
2716
+ .describe("Parameters for executing a remote published function, publishing a profile, and streaming the response.")
2717
+ .meta({ title: "FunctionExecutionParamsPublishProfileStreaming" });
2718
+ Request.FunctionExecutionParamsPublishProfileNonStreamingSchema = Request.FunctionExecutionParamsPublishProfileBaseSchema.extend({
2719
+ stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
2720
+ })
2721
+ .describe("Parameters for executing a remote published function and publishing a profile with a unary response.")
2722
+ .meta({ title: "FunctionExecutionParamsPublishProfileNonStreaming" });
2723
+ Request.FunctionExecutionParamsPublishProfileSchema = z
2724
+ .union([
2725
+ Request.FunctionExecutionParamsPublishProfileStreamingSchema,
2726
+ Request.FunctionExecutionParamsPublishProfileNonStreamingSchema,
2727
+ ])
2728
+ .describe("Parameters for executing a remote published function and publishing a profile.")
2729
+ .meta({ title: "FunctionExecutionParamsPublishProfile" });
2730
+ })(Request = Executions.Request || (Executions.Request = {}));
541
2731
  let Response;
542
2732
  (function (Response) {
2733
+ let Task;
2734
+ (function (Task) {
2735
+ Task.IndexSchema = z
2736
+ .uint32()
2737
+ .describe("The index of the task in the sequence of tasks.");
2738
+ Task.TaskIndexSchema = z
2739
+ .uint32()
2740
+ .describe("The index of the task amongst all mapped and non-skipped compiled tasks. Used internally.");
2741
+ Task.TaskPathSchema = z
2742
+ .array(z.uint32())
2743
+ .describe("The path of this task which may be used to navigate which nested task this is amongst the root functions tasks and sub-tasks.");
2744
+ })(Task = Response.Task || (Response.Task = {}));
543
2745
  let Streaming;
544
2746
  (function (Streaming) {
545
- let ChatCompletionChunk;
546
- (function (ChatCompletionChunk) {
2747
+ let TaskChunk;
2748
+ (function (TaskChunk) {
2749
+ function merged(a, b) {
2750
+ if ("scores" in a) {
2751
+ return VectorCompletion.merged(a, b);
2752
+ }
2753
+ else {
2754
+ return Function.merged(a, b);
2755
+ }
2756
+ }
2757
+ TaskChunk.merged = merged;
2758
+ function mergedList(a, b) {
2759
+ let merged = undefined;
2760
+ for (const chunk of b) {
2761
+ const existingIndex = a.findIndex(({ index }) => index === chunk.index);
2762
+ if (existingIndex === -1) {
2763
+ if (merged === undefined) {
2764
+ merged = [...a, chunk];
2765
+ }
2766
+ else {
2767
+ merged.push(chunk);
2768
+ }
2769
+ }
2770
+ else {
2771
+ const [mergedChunk, chunkChanged] = TaskChunk.merged(a[existingIndex], chunk);
2772
+ if (chunkChanged) {
2773
+ if (merged === undefined) {
2774
+ merged = [...a];
2775
+ }
2776
+ merged[existingIndex] = mergedChunk;
2777
+ }
2778
+ }
2779
+ }
2780
+ return merged ? [merged, true] : [a, false];
2781
+ }
2782
+ TaskChunk.mergedList = mergedList;
2783
+ let Function;
2784
+ (function (Function) {
2785
+ function merged(a, b) {
2786
+ const index = a.index;
2787
+ const task_index = a.task_index;
2788
+ const task_path = a.task_path;
2789
+ const [base, baseChanged] = FunctionExecutionChunk.merged(a, b);
2790
+ if (baseChanged) {
2791
+ return [
2792
+ Object.assign({ index,
2793
+ task_index,
2794
+ task_path }, base),
2795
+ true,
2796
+ ];
2797
+ }
2798
+ else {
2799
+ return [a, false];
2800
+ }
2801
+ }
2802
+ Function.merged = merged;
2803
+ })(Function = TaskChunk.Function || (TaskChunk.Function = {}));
2804
+ TaskChunk.FunctionSchema = z
2805
+ .lazy(() => Streaming.FunctionExecutionChunkSchema.extend({
2806
+ index: Task.IndexSchema,
2807
+ task_index: Task.TaskIndexSchema,
2808
+ task_path: Task.TaskPathSchema,
2809
+ tasks: z
2810
+ .array(Streaming.TaskChunkSchema)
2811
+ .meta({
2812
+ title: "TaskChunkArray",
2813
+ recursive: true,
2814
+ })
2815
+ .describe("The tasks executed as part of the function execution."),
2816
+ }))
2817
+ .describe("A chunk of a function execution task.");
2818
+ let VectorCompletion;
2819
+ (function (VectorCompletion) {
2820
+ function merged(a, b) {
2821
+ const index = a.index;
2822
+ const task_index = a.task_index;
2823
+ const task_path = a.task_path;
2824
+ const [base, baseChanged] = Vector.Completions.Response.Streaming.VectorCompletionChunk.merged(a, b);
2825
+ const [error, errorChanged] = merge(a.error, b.error);
2826
+ if (baseChanged || errorChanged) {
2827
+ return [
2828
+ Object.assign(Object.assign({ index,
2829
+ task_index,
2830
+ task_path }, base), (error !== undefined ? { error } : {})),
2831
+ true,
2832
+ ];
2833
+ }
2834
+ else {
2835
+ return [a, false];
2836
+ }
2837
+ }
2838
+ VectorCompletion.merged = merged;
2839
+ })(VectorCompletion = TaskChunk.VectorCompletion || (TaskChunk.VectorCompletion = {}));
2840
+ TaskChunk.VectorCompletionSchema = Vector.Completions.Response.Streaming.VectorCompletionChunkSchema.extend({
2841
+ index: Task.IndexSchema,
2842
+ task_index: Task.TaskIndexSchema,
2843
+ task_path: Task.TaskPathSchema,
2844
+ error: ObjectiveAIErrorSchema.optional().describe("When present, indicates that an error occurred during the vector completion task."),
2845
+ }).describe("A chunk of a vector completion task.");
2846
+ })(TaskChunk = Streaming.TaskChunk || (Streaming.TaskChunk = {}));
2847
+ Streaming.TaskChunkSchema = z
2848
+ .union([TaskChunk.FunctionSchema, TaskChunk.VectorCompletionSchema])
2849
+ .describe("A chunk of a task execution.");
2850
+ let FunctionExecutionChunk;
2851
+ (function (FunctionExecutionChunk) {
547
2852
  function merged(a, b) {
548
2853
  const id = a.id;
549
- const [choices, choicesChanged] = Choice.mergedList(a.choices, b.choices);
2854
+ const [tasks, tasksChanged] = TaskChunk.mergedList(a.tasks, b.tasks);
2855
+ const [tasks_errors, tasks_errorsChanged] = merge(a.tasks_errors, b.tasks_errors);
2856
+ const [output, outputChanged] = merge(a.output, b.output);
2857
+ const [error, errorChanged] = merge(a.error, b.error);
2858
+ const [retry_token, retry_tokenChanged] = merge(a.retry_token, b.retry_token);
2859
+ const [function_published, function_publishedChanged] = merge(a.function_published, b.function_published);
2860
+ const [profile_published, profile_publishedChanged] = merge(a.profile_published, b.profile_published);
550
2861
  const created = a.created;
551
- const model = a.model;
2862
+ const function_ = a.function;
2863
+ const profile = a.profile;
552
2864
  const object = a.object;
553
- const [usage, usageChanged] = merge(a.usage, b.usage, Chat.Completions.Response.Usage.merged);
554
- if (choicesChanged || usageChanged) {
2865
+ const [usage, usageChanged] = merge(a.usage, b.usage);
2866
+ if (tasksChanged ||
2867
+ tasks_errorsChanged ||
2868
+ outputChanged ||
2869
+ errorChanged ||
2870
+ retry_tokenChanged ||
2871
+ function_publishedChanged ||
2872
+ profile_publishedChanged ||
2873
+ usageChanged) {
555
2874
  return [
556
- Object.assign({ id,
557
- choices,
558
- created,
559
- model,
560
- object }, (usage !== undefined ? { usage } : {})),
2875
+ Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({ id,
2876
+ tasks }, (tasks_errors !== undefined ? { tasks_errors } : {})), (output !== undefined ? { output } : {})), (error !== undefined ? { error } : {})), (retry_token !== undefined ? { retry_token } : {})), (function_published !== undefined
2877
+ ? { function_published }
2878
+ : {})), (profile_published !== undefined
2879
+ ? { profile_published }
2880
+ : {})), { created, function: function_, profile,
2881
+ object }), (usage !== undefined ? { usage } : {})),
561
2882
  true,
562
2883
  ];
563
2884
  }
@@ -565,32 +2886,271 @@ export var Multichat;
565
2886
  return [a, false];
566
2887
  }
567
2888
  }
568
- ChatCompletionChunk.merged = merged;
569
- })(ChatCompletionChunk = Streaming.ChatCompletionChunk || (Streaming.ChatCompletionChunk = {}));
570
- let Choice;
571
- (function (Choice) {
2889
+ FunctionExecutionChunk.merged = merged;
2890
+ })(FunctionExecutionChunk = Streaming.FunctionExecutionChunk || (Streaming.FunctionExecutionChunk = {}));
2891
+ Streaming.FunctionExecutionChunkSchema = z
2892
+ .object({
2893
+ id: z
2894
+ .string()
2895
+ .describe("The unique identifier of the function execution."),
2896
+ tasks: z
2897
+ .array(Streaming.TaskChunkSchema)
2898
+ .describe("The tasks executed as part of the function execution."),
2899
+ tasks_errors: z
2900
+ .boolean()
2901
+ .optional()
2902
+ .describe("When true, indicates that one or more tasks encountered errors during execution."),
2903
+ output: z
2904
+ .union([
2905
+ z
2906
+ .number()
2907
+ .describe("The scalar output of the function execution."),
2908
+ z
2909
+ .array(z.number())
2910
+ .describe("The vector output of the function execution."),
2911
+ JsonValueSchema.describe("The erroneous output of the function execution."),
2912
+ ])
2913
+ .optional()
2914
+ .describe("The output of the function execution."),
2915
+ error: ObjectiveAIErrorSchema.optional().describe("When present, indicates that an error occurred during the function execution."),
2916
+ retry_token: z
2917
+ .string()
2918
+ .optional()
2919
+ .describe("A token which may be used to retry the function execution."),
2920
+ function_published: z
2921
+ .boolean()
2922
+ .optional()
2923
+ .describe("When true, indicates that a function was published as part of this execution."),
2924
+ profile_published: z
2925
+ .boolean()
2926
+ .optional()
2927
+ .describe("When true, indicates that a profile was published as part of this execution."),
2928
+ created: z
2929
+ .uint32()
2930
+ .describe("The UNIX timestamp (in seconds) when the function execution chunk was created."),
2931
+ function: z
2932
+ .string()
2933
+ .nullable()
2934
+ .describe("The unique identifier of the function being executed."),
2935
+ profile: z
2936
+ .string()
2937
+ .nullable()
2938
+ .describe("The unique identifier of the profile being used."),
2939
+ object: z
2940
+ .enum([
2941
+ "scalar.function.execution.chunk",
2942
+ "vector.function.execution.chunk",
2943
+ ])
2944
+ .describe("The object type."),
2945
+ usage: Vector.Completions.Response.UsageSchema.optional(),
2946
+ })
2947
+ .describe("A chunk of a function execution.");
2948
+ })(Streaming = Response.Streaming || (Response.Streaming = {}));
2949
+ let Unary;
2950
+ (function (Unary) {
2951
+ let Task;
2952
+ (function (Task) {
2953
+ Task.FunctionSchema = z
2954
+ .lazy(() => Unary.FunctionExecutionSchema.extend({
2955
+ index: Response.Task.IndexSchema,
2956
+ task_index: Response.Task.TaskIndexSchema,
2957
+ task_path: Response.Task.TaskPathSchema,
2958
+ tasks: z
2959
+ .array(Unary.TaskSchema)
2960
+ .meta({
2961
+ title: "TaskArray",
2962
+ recursive: true,
2963
+ })
2964
+ .describe("The tasks executed as part of the function execution."),
2965
+ }))
2966
+ .describe("A function execution task.");
2967
+ Task.VectorCompletionSchema = Vector.Completions.Response.Unary.VectorCompletionSchema.extend({
2968
+ index: Response.Task.IndexSchema,
2969
+ task_index: Response.Task.TaskIndexSchema,
2970
+ task_path: Response.Task.TaskPathSchema,
2971
+ error: ObjectiveAIErrorSchema.nullable().describe("When non-null, indicates that an error occurred during the vector completion task."),
2972
+ }).describe("A vector completion task.");
2973
+ })(Task = Unary.Task || (Unary.Task = {}));
2974
+ Unary.TaskSchema = z
2975
+ .union([Task.FunctionSchema, Task.VectorCompletionSchema])
2976
+ .describe("A task execution.");
2977
+ Unary.FunctionExecutionSchema = z
2978
+ .object({
2979
+ id: z
2980
+ .string()
2981
+ .describe("The unique identifier of the function execution."),
2982
+ tasks: z
2983
+ .array(Unary.TaskSchema)
2984
+ .describe("The tasks executed as part of the function execution."),
2985
+ tasks_errors: z
2986
+ .boolean()
2987
+ .describe("When true, indicates that one or more tasks encountered errors during execution."),
2988
+ output: z
2989
+ .union([
2990
+ z
2991
+ .number()
2992
+ .describe("The scalar output of the function execution."),
2993
+ z
2994
+ .array(z.number())
2995
+ .describe("The vector output of the function execution."),
2996
+ JsonValueSchema.describe("The erroneous output of the function execution."),
2997
+ ])
2998
+ .describe("The output of the function execution."),
2999
+ error: ObjectiveAIErrorSchema.nullable().describe("When non-null, indicates that an error occurred during the function execution."),
3000
+ retry_token: z
3001
+ .string()
3002
+ .nullable()
3003
+ .describe("A token which may be used to retry the function execution."),
3004
+ function_published: z
3005
+ .boolean()
3006
+ .optional()
3007
+ .describe("When true, indicates that a function was published as part of this execution."),
3008
+ profile_published: z
3009
+ .boolean()
3010
+ .optional()
3011
+ .describe("When true, indicates that a profile was published as part of this execution."),
3012
+ created: z
3013
+ .uint32()
3014
+ .describe("The UNIX timestamp (in seconds) when the function execution chunk was created."),
3015
+ function: z
3016
+ .string()
3017
+ .nullable()
3018
+ .describe("The unique identifier of the function being executed."),
3019
+ profile: z
3020
+ .string()
3021
+ .nullable()
3022
+ .describe("The unique identifier of the profile being used."),
3023
+ object: z
3024
+ .enum(["scalar.function.execution", "vector.function.execution"])
3025
+ .describe("The object type."),
3026
+ usage: Vector.Completions.Response.UsageSchema,
3027
+ })
3028
+ .describe("A function execution.");
3029
+ })(Unary = Response.Unary || (Response.Unary = {}));
3030
+ })(Response = Executions.Response || (Executions.Response = {}));
3031
+ })(Executions = Function_1.Executions || (Function_1.Executions = {}));
3032
+ let ComputeProfile;
3033
+ (function (ComputeProfile) {
3034
+ let Request;
3035
+ (function (Request) {
3036
+ let DatasetItem;
3037
+ (function (DatasetItem) {
3038
+ let Target;
3039
+ (function (Target) {
3040
+ Target.ScalarSchema = z
3041
+ .object({
3042
+ type: z.literal("scalar"),
3043
+ value: z.number(),
3044
+ })
3045
+ .describe("A scalar target output. The desired output is this exact scalar.");
3046
+ Target.VectorSchema = z
3047
+ .object({
3048
+ type: z.literal("vector"),
3049
+ value: z.array(z.number()),
3050
+ })
3051
+ .describe("A vector target output. The desired output is this exact vector.");
3052
+ Target.VectorWinnerSchema = z
3053
+ .object({
3054
+ type: z.literal("vector_winner"),
3055
+ value: z.uint32(),
3056
+ })
3057
+ .describe("A vector winner target output. The desired output is a vector where the highest value is at the specified index.");
3058
+ })(Target = DatasetItem.Target || (DatasetItem.Target = {}));
3059
+ DatasetItem.TargetSchema = z
3060
+ .discriminatedUnion("type", [
3061
+ Target.ScalarSchema,
3062
+ Target.VectorSchema,
3063
+ Target.VectorWinnerSchema,
3064
+ ])
3065
+ .describe("The target output for a given function input.");
3066
+ })(DatasetItem = Request.DatasetItem || (Request.DatasetItem = {}));
3067
+ Request.DatasetItemSchema = z
3068
+ .object({
3069
+ input: Function_1.InputSchema_,
3070
+ target: DatasetItem.TargetSchema,
3071
+ })
3072
+ .describe("A Function input and its corresponding target output.");
3073
+ Request.FunctionComputeProfileParamsBaseSchema = z
3074
+ .object({
3075
+ retry_token: z
3076
+ .string()
3077
+ .optional()
3078
+ .nullable()
3079
+ .describe("The retry token provided by a previous incomplete or failed profile computation."),
3080
+ max_retries: z
3081
+ .uint32()
3082
+ .optional()
3083
+ .nullable()
3084
+ .describe("The maximum number of retries to attempt when a function execution fails during profile computation."),
3085
+ n: z
3086
+ .uint32()
3087
+ .describe("The number of function executions to perform per dataset item. Generally speaking, higher N values increase the quality of the computed profile."),
3088
+ dataset: z
3089
+ .array(Request.DatasetItemSchema)
3090
+ .describe("The dataset of input and target output pairs to use for computing the profile."),
3091
+ ensemble: Vector.Completions.Request.EnsembleSchema,
3092
+ provider: Chat.Completions.Request.ProviderSchema.optional().nullable(),
3093
+ seed: Chat.Completions.Request.SeedSchema.optional().nullable(),
3094
+ backoff_max_elapsed_time: Chat.Completions.Request.BackoffMaxElapsedTimeSchema.optional().nullable(),
3095
+ first_chunk_timeout: Chat.Completions.Request.FirstChunkTimeoutSchema.optional().nullable(),
3096
+ other_chunk_timeout: Chat.Completions.Request.OtherChunkTimeoutSchema.optional().nullable(),
3097
+ })
3098
+ .describe("Base parameters for computing a function profile.");
3099
+ Request.FunctionComputeProfileParamsStreamingSchema = Request.FunctionComputeProfileParamsBaseSchema.extend({
3100
+ stream: Chat.Completions.Request.StreamTrueSchema,
3101
+ })
3102
+ .describe("Parameters for computing a function profile and streaming the response.")
3103
+ .meta({ title: "FunctionComputeProfileParamsStreaming" });
3104
+ Request.FunctionComputeProfileParamsNonStreamingSchema = Request.FunctionComputeProfileParamsBaseSchema.extend({
3105
+ stream: Chat.Completions.Request.StreamFalseSchema.optional().nullable(),
3106
+ })
3107
+ .describe("Parameters for computing a function profile with a unary response.")
3108
+ .meta({ title: "FunctionComputeProfileParamsNonStreaming" });
3109
+ Request.FunctionComputeProfileParamsSchema = z
3110
+ .union([
3111
+ Request.FunctionComputeProfileParamsStreamingSchema,
3112
+ Request.FunctionComputeProfileParamsNonStreamingSchema,
3113
+ ])
3114
+ .describe("Parameters for computing a function profile.")
3115
+ .meta({ title: "FunctionComputeProfileParams" });
3116
+ })(Request = ComputeProfile.Request || (ComputeProfile.Request = {}));
3117
+ let Response;
3118
+ (function (Response) {
3119
+ Response.FittingStatsSchema = z
3120
+ .object({
3121
+ loss: z
3122
+ .number()
3123
+ .describe("The final sum loss achieved during weights fitting."),
3124
+ executions: z
3125
+ .uint32()
3126
+ .describe("The total number of function executions used during weights fitting."),
3127
+ starts: z
3128
+ .uint32()
3129
+ .describe("The number of fitting starts attempted. Each start begins with a randomized weight vector."),
3130
+ rounds: z
3131
+ .uint32()
3132
+ .describe("The number of fitting rounds performed across all starts."),
3133
+ errors: z
3134
+ .uint32()
3135
+ .describe("The number of errors which occured while computing outputs during fitting."),
3136
+ })
3137
+ .describe("Statistics about the fitting process used to compute the weights for the profile.");
3138
+ let Streaming;
3139
+ (function (Streaming) {
3140
+ let FunctionExecutionChunk;
3141
+ (function (FunctionExecutionChunk) {
572
3142
  function merged(a, b) {
573
- const [delta, deltaChanged] = merge(a.delta, b.delta, Chat.Completions.Response.Streaming.Delta.merged);
574
- const [finish_reason, finish_reasonChanged] = merge(a.finish_reason, b.finish_reason);
575
3143
  const index = a.index;
576
- const [logprobs, logprobsChanged] = merge(a.logprobs, b.logprobs, Chat.Completions.Response.Logprobs.merged);
577
- const [error, errorChanged] = merge(a.error, b.error);
578
- const [model, modelChanged] = merge(a.model, b.model);
579
- const [model_index, model_indexChanged] = merge(a.model_index, b.model_index);
580
- const [completion_metadata, completion_metadataChanged] = merge(a.completion_metadata, b.completion_metadata, Score.Completions.Response.CompletionMetadata.merged);
581
- if (deltaChanged ||
582
- finish_reasonChanged ||
583
- logprobsChanged ||
584
- errorChanged ||
585
- modelChanged ||
586
- model_indexChanged ||
587
- completion_metadataChanged) {
3144
+ const dataset = a.dataset;
3145
+ const n = a.n;
3146
+ const retry = a.retry;
3147
+ const [base, baseChanged] = Executions.Response.Streaming.FunctionExecutionChunk.merged(a, b);
3148
+ if (baseChanged) {
588
3149
  return [
589
- Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({ delta,
590
- finish_reason,
591
- index }, (logprobs !== undefined ? { logprobs } : {})), (error !== undefined ? { error } : {})), (model !== undefined ? { model } : {})), (model_index !== undefined ? { model_index } : {})), (completion_metadata !== undefined
592
- ? { completion_metadata }
593
- : {})),
3150
+ Object.assign({ index,
3151
+ dataset,
3152
+ n,
3153
+ retry }, base),
594
3154
  true,
595
3155
  ];
596
3156
  }
@@ -598,205 +3158,343 @@ export var Multichat;
598
3158
  return [a, false];
599
3159
  }
600
3160
  }
601
- Choice.merged = merged;
3161
+ FunctionExecutionChunk.merged = merged;
602
3162
  function mergedList(a, b) {
603
3163
  let merged = undefined;
604
- for (const choice of b) {
605
- const existingIndex = a.findIndex(({ index }) => index === choice.index);
3164
+ for (const chunk of b) {
3165
+ const existingIndex = a.findIndex(({ index }) => index === chunk.index);
606
3166
  if (existingIndex === -1) {
607
3167
  if (merged === undefined) {
608
- merged = [...a, choice];
3168
+ merged = [...a, chunk];
609
3169
  }
610
3170
  else {
611
- merged.push(choice);
3171
+ merged.push(chunk);
612
3172
  }
613
3173
  }
614
3174
  else {
615
- const [mergedChoice, choiceChanged] = Choice.merged(a[existingIndex], choice);
616
- if (choiceChanged) {
3175
+ const [mergedChunk, chunkChanged] = FunctionExecutionChunk.merged(a[existingIndex], chunk);
3176
+ if (chunkChanged) {
617
3177
  if (merged === undefined) {
618
3178
  merged = [...a];
619
3179
  }
620
- merged[existingIndex] = mergedChoice;
3180
+ merged[existingIndex] = mergedChunk;
621
3181
  }
622
3182
  }
623
3183
  }
624
3184
  return merged ? [merged, true] : [a, false];
625
3185
  }
626
- Choice.mergedList = mergedList;
627
- })(Choice = Streaming.Choice || (Streaming.Choice = {}));
3186
+ FunctionExecutionChunk.mergedList = mergedList;
3187
+ })(FunctionExecutionChunk = Streaming.FunctionExecutionChunk || (Streaming.FunctionExecutionChunk = {}));
3188
+ Streaming.FunctionExecutionChunkSchema = Executions.Response.Streaming.FunctionExecutionChunkSchema.extend({
3189
+ index: z
3190
+ .uint32()
3191
+ .describe("The index of the function execution chunk in the list of executions."),
3192
+ dataset: z
3193
+ .uint32()
3194
+ .describe("The index of the dataset item this function execution chunk corresponds to."),
3195
+ n: z
3196
+ .uint32()
3197
+ .describe("The N index for this function execution chunk. There will be N function executions, and N comes from the request parameters."),
3198
+ retry: z
3199
+ .uint32()
3200
+ .describe("The retry index for this function execution chunk. There may be multiple retries for a given dataset item and N index."),
3201
+ }).describe("A chunk of a function execution ran during profile computation.");
3202
+ let FunctionComputeProfileChunk;
3203
+ (function (FunctionComputeProfileChunk) {
3204
+ function merged(a, b) {
3205
+ const id = a.id;
3206
+ const [executions, executionsChanged] = FunctionExecutionChunk.mergedList(a.executions, b.executions);
3207
+ const [executions_errors, executions_errorsChanged] = merge(a.executions_errors, b.executions_errors);
3208
+ const [profile, profileChanged] = merge(a.profile, b.profile);
3209
+ const [fitting_stats, fitting_statsChanged] = merge(a.fitting_stats, b.fitting_stats);
3210
+ const created = a.created;
3211
+ const function_ = a.function;
3212
+ const object = a.object;
3213
+ const [usage, usageChanged] = merge(a.usage, b.usage);
3214
+ if (executionsChanged ||
3215
+ executions_errorsChanged ||
3216
+ profileChanged ||
3217
+ fitting_statsChanged ||
3218
+ usageChanged) {
3219
+ return [
3220
+ Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({ id,
3221
+ executions }, (executions_errors !== undefined
3222
+ ? { executions_errors }
3223
+ : {})), (profile !== undefined ? { profile } : {})), (fitting_stats !== undefined ? { fitting_stats } : {})), { created, function: function_, object }), (usage !== undefined ? { usage } : {})),
3224
+ true,
3225
+ ];
3226
+ }
3227
+ else {
3228
+ return [a, false];
3229
+ }
3230
+ }
3231
+ FunctionComputeProfileChunk.merged = merged;
3232
+ })(FunctionComputeProfileChunk = Streaming.FunctionComputeProfileChunk || (Streaming.FunctionComputeProfileChunk = {}));
3233
+ Streaming.FunctionComputeProfileChunkSchema = z
3234
+ .object({
3235
+ id: z
3236
+ .string()
3237
+ .describe("The unique identifier of the function profile computation chunk."),
3238
+ executions: z
3239
+ .array(Streaming.FunctionExecutionChunkSchema)
3240
+ .describe("The function executions performed as part of computing the profile."),
3241
+ executions_errors: z
3242
+ .boolean()
3243
+ .optional()
3244
+ .describe("When true, indicates that one or more function executions encountered errors during profile computation."),
3245
+ profile: z
3246
+ .array(Function_1.ProfileVersionRequiredSchema)
3247
+ .optional()
3248
+ .describe("The computed function profile."),
3249
+ fitting_stats: Response.FittingStatsSchema.optional(),
3250
+ created: z
3251
+ .uint32()
3252
+ .describe("The UNIX timestamp (in seconds) when the function profile computation was created."),
3253
+ function: z
3254
+ .string()
3255
+ .describe("The unique identifier of the function for which the profile is being computed."),
3256
+ object: z.literal("function.compute.profile.chunk"),
3257
+ usage: Vector.Completions.Response.UsageSchema.optional(),
3258
+ })
3259
+ .describe("A chunk of a function profile computation.");
628
3260
  })(Streaming = Response.Streaming || (Response.Streaming = {}));
629
- })(Response = Completions.Response || (Completions.Response = {}));
630
- async function list(openai, listOptions, options) {
631
- const response = await openai.get("/multichat/completions", Object.assign({ query: listOptions }, options));
632
- return response;
633
- }
634
- Completions.list = list;
635
- async function publish(openai, id, options) {
636
- await openai.post(`/multichat/completions/${id}/publish`, options);
637
- }
638
- Completions.publish = publish;
639
- async function retrieve(openai, id, options) {
640
- const response = await openai.get(`/multichat/completions/${id}`, options);
3261
+ let Unary;
3262
+ (function (Unary) {
3263
+ Unary.FunctionExecutionSchema = Executions.Response.Unary.FunctionExecutionSchema.extend({
3264
+ index: z
3265
+ .uint32()
3266
+ .describe("The index of the function execution in the list of executions."),
3267
+ dataset: z
3268
+ .uint32()
3269
+ .describe("The index of the dataset item this function execution corresponds to."),
3270
+ n: z
3271
+ .uint32()
3272
+ .describe("The N index for this function execution. There will be N function executions, and N comes from the request parameters."),
3273
+ retry: z
3274
+ .uint32()
3275
+ .describe("The retry index for this function execution. There may be multiple retries for a given dataset item and N index."),
3276
+ }).describe("A function execution ran during profile computation.");
3277
+ Unary.FunctionComputeProfileSchema = z
3278
+ .object({
3279
+ id: z
3280
+ .string()
3281
+ .describe("The unique identifier of the function profile computation."),
3282
+ executions: z
3283
+ .array(Unary.FunctionExecutionSchema)
3284
+ .describe("The function executions performed as part of computing the profile."),
3285
+ executions_errors: z
3286
+ .boolean()
3287
+ .describe("When true, indicates that one or more function executions encountered errors during profile computation."),
3288
+ profile: z
3289
+ .array(Function_1.ProfileVersionRequiredSchema)
3290
+ .describe("The computed function profile."),
3291
+ fitting_stats: Response.FittingStatsSchema,
3292
+ created: z
3293
+ .uint32()
3294
+ .describe("The UNIX timestamp (in seconds) when the function profile computation was created."),
3295
+ function: z
3296
+ .string()
3297
+ .describe("The unique identifier of the function for which the profile is being computed."),
3298
+ object: z.literal("function.compute.profile"),
3299
+ usage: Vector.Completions.Response.UsageSchema,
3300
+ })
3301
+ .describe("A function profile computation.");
3302
+ })(Unary = Response.Unary || (Response.Unary = {}));
3303
+ })(Response = ComputeProfile.Response || (ComputeProfile.Response = {}));
3304
+ })(ComputeProfile = Function_1.ComputeProfile || (Function_1.ComputeProfile = {}));
3305
+ let Profile;
3306
+ (function (Profile) {
3307
+ Profile.ListItemSchema = z.object({
3308
+ function_author: z
3309
+ .string()
3310
+ .describe("The author of the function the profile was published to."),
3311
+ function_id: z
3312
+ .string()
3313
+ .describe("The unique identifier of the function the profile was published to."),
3314
+ author: z.string().describe("The author of the profile."),
3315
+ id: z.string().describe("The unique identifier of the profile."),
3316
+ version: z.uint32().describe("The version of the profile."),
3317
+ });
3318
+ async function list(openai, options) {
3319
+ const response = await openai.get("/functions/profiles", options);
641
3320
  return response;
642
3321
  }
643
- Completions.retrieve = retrieve;
644
- async function create(openai, body, options) {
645
- var _a;
646
- const response = await openai.post("/multichat/completions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
3322
+ Profile.list = list;
3323
+ Profile.RetrieveItemSchema = z.object({
3324
+ created: z
3325
+ .uint32()
3326
+ .describe("The UNIX timestamp (in seconds) when the profile was created."),
3327
+ shape: z
3328
+ .string()
3329
+ .describe("The shape of the profile. Unless Task Skip expressions work out favorably, profiles only work for functions with the same shape."),
3330
+ function_author: z
3331
+ .string()
3332
+ .describe("The author of the function the profile was published to."),
3333
+ function_id: z
3334
+ .string()
3335
+ .describe("The unique identifier of the function the profile was published to."),
3336
+ author: z.string().describe("The author of the profile."),
3337
+ id: z.string().describe("The unique identifier of the profile."),
3338
+ version: z.uint32().describe("The version of the profile."),
3339
+ profile: z
3340
+ .array(Function.ProfileVersionRequiredSchema)
3341
+ .describe("The function profile."),
3342
+ });
3343
+ async function retrieve(openai, function_author, function_id, author, id, version, options) {
3344
+ const response = await openai.get(version !== null && version !== undefined
3345
+ ? `/functions/${function_author}/${function_id}/profiles/${author}/${id}/${version}`
3346
+ : `/functions/${function_author}/${function_id}/profiles/${author}/${id}`, options);
647
3347
  return response;
648
3348
  }
649
- Completions.create = create;
650
- })(Completions = Multichat.Completions || (Multichat.Completions = {}));
651
- })(Multichat || (Multichat = {}));
652
- export var Conversation;
653
- (function (Conversation) {
654
- let Completions;
655
- (function (Completions) {
656
- async function list(openai, listOptions, options) {
657
- const response = await openai.get("/conversation/completions", Object.assign({ query: listOptions }, options));
3349
+ Profile.retrieve = retrieve;
3350
+ Profile.HistoricalUsageSchema = z.object({
3351
+ requests: z
3352
+ .uint32()
3353
+ .describe("The total number of requests made to Functions while using this Profile."),
3354
+ completion_tokens: z
3355
+ .uint32()
3356
+ .describe("The total number of completion tokens generated by Functions while using this Profile."),
3357
+ prompt_tokens: z
3358
+ .uint32()
3359
+ .describe("The total number of prompt tokens sent to Functions while using this Profile."),
3360
+ total_cost: z
3361
+ .number()
3362
+ .describe("The total cost incurred by using this Profile."),
3363
+ });
3364
+ async function retrieveUsage(openai, function_author, function_id, author, id, version, options) {
3365
+ const response = await openai.get(version !== null && version !== undefined
3366
+ ? `/functions/${function_author}/${function_id}/profiles/${author}/${id}/${version}/usage`
3367
+ : `/functions/${function_author}/${function_id}/profiles/${author}/${id}/usage`, options);
658
3368
  return response;
659
3369
  }
660
- Completions.list = list;
661
- })(Completions = Conversation.Completions || (Conversation.Completions = {}));
662
- })(Conversation || (Conversation = {}));
663
- export var Functions;
664
- (function (Functions) {
665
- let Response;
666
- (function (Response) {
667
- let Streaming;
668
- (function (Streaming) {
669
- let CompletionChunk;
670
- (function (CompletionChunk) {
671
- let ScoreCompletionChunk;
672
- (function (ScoreCompletionChunk) {
673
- function merged(a, b) {
674
- const [base, baseChanged] = Score.Completions.Response.Streaming.ChatCompletionChunk.merged(a, b);
675
- return baseChanged
676
- ? [Object.assign(Object.assign({ type: a.type }, base), { index: a.index }), true]
677
- : [a, false];
678
- }
679
- ScoreCompletionChunk.merged = merged;
680
- })(ScoreCompletionChunk = CompletionChunk.ScoreCompletionChunk || (CompletionChunk.ScoreCompletionChunk = {}));
681
- let MultichatCompletionChunk;
682
- (function (MultichatCompletionChunk) {
683
- function merged(a, b) {
684
- const [base, baseChanged] = Multichat.Completions.Response.Streaming.ChatCompletionChunk.merged(a, b);
685
- return baseChanged
686
- ? [Object.assign(Object.assign({ type: a.type }, base), { index: a.index }), true]
687
- : [a, false];
688
- }
689
- MultichatCompletionChunk.merged = merged;
690
- })(MultichatCompletionChunk = CompletionChunk.MultichatCompletionChunk || (CompletionChunk.MultichatCompletionChunk = {}));
691
- function mergedList(a, b) {
692
- let merged = undefined;
693
- for (const chunk of b) {
694
- const existingIndex = a.findIndex((c) => c.index === chunk.index && c.type === chunk.type);
695
- if (existingIndex === -1) {
696
- if (merged === undefined) {
697
- merged = [...a, chunk];
698
- }
699
- else {
700
- merged.push(chunk);
701
- }
702
- }
703
- else if (chunk.type === "score") {
704
- const [mergedChunk, chunkChanged] = ScoreCompletionChunk.merged(a[existingIndex], chunk);
705
- if (chunkChanged) {
706
- if (merged === undefined) {
707
- merged = [...a];
708
- }
709
- merged[existingIndex] = mergedChunk;
710
- }
711
- }
712
- else if (chunk.type === "multichat") {
713
- const [mergedChunk, chunkChanged] = MultichatCompletionChunk.merged(a[existingIndex], chunk);
714
- if (chunkChanged) {
715
- if (merged === undefined) {
716
- merged = [...a];
717
- }
718
- merged[existingIndex] = mergedChunk;
719
- }
720
- }
721
- }
722
- return merged ? [merged, true] : [a, false];
723
- }
724
- CompletionChunk.mergedList = mergedList;
725
- })(CompletionChunk = Streaming.CompletionChunk || (Streaming.CompletionChunk = {}));
726
- function merged(a, b) {
727
- const [completions, completionsChanged] = CompletionChunk.mergedList(a.completions, b.completions);
728
- const [output, outputChanged] = merge(a.output, b.output);
729
- const [retry_token, retry_tokenChanged] = merge(a.retry_token, b.retry_token);
730
- const [error, errorChanged] = merge(a.error, b.error);
731
- const [function_published, function_publishedChanged] = merge(a.function_published, b.function_published);
732
- if (completionsChanged ||
733
- outputChanged ||
734
- retry_tokenChanged ||
735
- errorChanged ||
736
- function_publishedChanged) {
737
- return [
738
- Object.assign(Object.assign(Object.assign(Object.assign({ completions }, (output !== undefined ? { output } : {})), (retry_token !== undefined ? { retry_token } : {})), (error !== undefined ? { error } : {})), (function_published !== undefined
739
- ? { function_published }
740
- : {})),
741
- true,
742
- ];
743
- }
744
- else {
745
- return [a, false];
746
- }
747
- }
748
- Streaming.merged = merged;
749
- })(Streaming = Response.Streaming || (Response.Streaming = {}));
750
- })(Response = Functions.Response || (Functions.Response = {}));
751
- async function list(openai, listOptions, options) {
752
- const response = await openai.get("/functions", Object.assign({ query: listOptions }, options));
3370
+ Profile.retrieveUsage = retrieveUsage;
3371
+ })(Profile = Function_1.Profile || (Function_1.Profile = {}));
3372
+ async function executeInline(openai, body, options) {
3373
+ var _a;
3374
+ const response = await openai.post("/functions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
753
3375
  return response;
754
3376
  }
755
- Functions.list = list;
756
- async function count(openai, options) {
757
- const response = await openai.get("/functions/count", options);
3377
+ Function_1.executeInline = executeInline;
3378
+ async function execute(openai, author, id, version, body, options) {
3379
+ var _a;
3380
+ const response = await openai.post(version !== null && version !== undefined
3381
+ ? `/functions/${author}/${id}/${version}`
3382
+ : `/functions/${author}/${id}`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
758
3383
  return response;
759
3384
  }
760
- Functions.count = count;
761
- async function retrieve(openai, author, id, version, retrieveOptions, options) {
762
- const url = version !== null && version !== undefined
763
- ? `/functions/${author}/${id}/${version}`
764
- : `/functions/${author}/${id}`;
765
- const response = await openai.get(url, Object.assign({ query: retrieveOptions }, options));
3385
+ Function_1.execute = execute;
3386
+ async function publishFunction(openai, author, id, version, body, options) {
3387
+ var _a;
3388
+ const response = await openai.post(`/functions/${author}/${id}/${version}/publish`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
766
3389
  return response;
767
3390
  }
768
- Functions.retrieve = retrieve;
769
- async function executeById(openai, author, id, version, body, options) {
3391
+ Function_1.publishFunction = publishFunction;
3392
+ async function publishProfile(openai, function_author, function_id, body, options) {
770
3393
  var _a;
771
- const url = version !== null && version !== undefined
772
- ? `/functions/${author}/${id}/${version}`
773
- : `/functions/${author}/${id}`;
774
- const response = await openai.post(url, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
3394
+ const response = await openai.post(`/functions/${function_author}/${function_id}/profiles/publish`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
775
3395
  return response;
776
3396
  }
777
- Functions.executeById = executeById;
778
- async function executeByDefinition(openai, body, options) {
3397
+ Function_1.publishProfile = publishProfile;
3398
+ async function computeProfile(openai, author, id, version, body, options) {
779
3399
  var _a;
780
- const response = await openai.post("/functions", Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
3400
+ const response = await openai.post(version !== null && version !== undefined
3401
+ ? `/functions/${author}/${id}/${version}/profiles/compute`
3402
+ : `/functions/${author}/${id}/profiles/compute`, Object.assign({ body, stream: (_a = body.stream) !== null && _a !== void 0 ? _a : false }, options));
3403
+ return response;
3404
+ }
3405
+ Function_1.computeProfile = computeProfile;
3406
+ Function_1.ListItemSchema = z.object({
3407
+ author: z.string().describe("The author of the function."),
3408
+ id: z.string().describe("The unique identifier of the function."),
3409
+ version: z.uint32().describe("The version of the function."),
3410
+ });
3411
+ async function list(openai, options) {
3412
+ const response = await openai.get("/functions", options);
781
3413
  return response;
782
3414
  }
783
- Functions.executeByDefinition = executeByDefinition;
784
- })(Functions || (Functions = {}));
785
- export var Models;
786
- (function (Models) {
787
- async function list(openai, listOptions, options) {
788
- const response = await openai.models.list(Object.assign({ query: listOptions }, options));
3415
+ Function_1.list = list;
3416
+ Function_1.ScalarRetrieveItemSchema = Function_1.ScalarSchema.extend({
3417
+ created: z
3418
+ .uint32()
3419
+ .describe("The UNIX timestamp (in seconds) when the function was created."),
3420
+ shape: z
3421
+ .string()
3422
+ .describe("The shape of the function. Unless Task Skip expressions work out favorably, functions only work with profiles that have the same shape."),
3423
+ })
3424
+ .describe("A retrieved scalar function.")
3425
+ .meta({ title: "RetrievedScalarFunction" });
3426
+ Function_1.VectorRetrieveItemSchema = Function_1.VectorSchema.extend({
3427
+ created: z
3428
+ .uint32()
3429
+ .describe("The UNIX timestamp (in seconds) when the function was created."),
3430
+ shape: z
3431
+ .string()
3432
+ .describe("The shape of the function. Unless Task Skip expressions work out favorably, functions only work with profiles that have the same shape."),
3433
+ })
3434
+ .describe("A retrieved vector function.")
3435
+ .meta({ title: "RetrievedVectorFunction" });
3436
+ Function_1.RetrieveItemSchema = z.discriminatedUnion("type", [
3437
+ Function_1.ScalarRetrieveItemSchema,
3438
+ Function_1.VectorRetrieveItemSchema,
3439
+ ]);
3440
+ async function retrieve(openai, author, id, version, options) {
3441
+ const response = await openai.get(version !== null && version !== undefined
3442
+ ? `/functions/${author}/${id}/${version}`
3443
+ : `/functions/${author}/${id}`, options);
789
3444
  return response;
790
3445
  }
791
- Models.list = list;
792
- async function retrieve(openai, model, retrieveOptions, options) {
793
- const response = await openai.models.retrieve(model, Object.assign({ query: retrieveOptions }, options));
3446
+ Function_1.retrieve = retrieve;
3447
+ Function_1.HistoricalUsageSchema = z.object({
3448
+ requests: z
3449
+ .uint32()
3450
+ .describe("The total number of requests made to this Function."),
3451
+ completion_tokens: z
3452
+ .uint32()
3453
+ .describe("The total number of completion tokens generated by this Function."),
3454
+ prompt_tokens: z
3455
+ .uint32()
3456
+ .describe("The total number of prompt tokens sent to this Function."),
3457
+ total_cost: z
3458
+ .number()
3459
+ .describe("The total cost incurred by using this Function."),
3460
+ });
3461
+ async function retrieveUsage(openai, author, id, version, options) {
3462
+ const response = await openai.get(version !== null && version !== undefined
3463
+ ? `/functions/${author}/${id}/${version}/usage`
3464
+ : `/functions/${author}/${id}/usage`, options);
794
3465
  return response;
795
3466
  }
796
- Models.retrieve = retrieve;
797
- })(Models || (Models = {}));
3467
+ Function_1.retrieveUsage = retrieveUsage;
3468
+ })(Function || (Function = {}));
3469
+ export const FunctionSchema = z
3470
+ .discriminatedUnion("type", [Function.ScalarSchema, Function.VectorSchema])
3471
+ .describe("A function.");
798
3472
  export var Auth;
799
3473
  (function (Auth) {
3474
+ Auth.ApiKeySchema = z.object({
3475
+ api_key: z.string().describe("The API key."),
3476
+ created: z
3477
+ .string()
3478
+ .describe("The RFC 3339 timestamp when the API key was created."),
3479
+ expires: z
3480
+ .string()
3481
+ .nullable()
3482
+ .describe("The RFC 3339 timestamp when the API key expires, or null if it does not expire."),
3483
+ disabled: z
3484
+ .string()
3485
+ .nullable()
3486
+ .describe("The RFC 3339 timestamp when the API key was disabled, or null if it is not disabled."),
3487
+ name: z.string().describe("The name of the API key."),
3488
+ description: z
3489
+ .string()
3490
+ .nullable()
3491
+ .describe("The description of the API key, or null if no description was provided."),
3492
+ });
3493
+ Auth.ApiKeyWithCostSchema = Auth.ApiKeySchema.extend({
3494
+ cost: z
3495
+ .number()
3496
+ .describe("The total cost incurred while using this API key."),
3497
+ });
800
3498
  let ApiKey;
801
3499
  (function (ApiKey) {
802
3500
  async function list(openai, options) {
@@ -821,6 +3519,9 @@ export var Auth;
821
3519
  }
822
3520
  ApiKey.remove = remove;
823
3521
  })(ApiKey = Auth.ApiKey || (Auth.ApiKey = {}));
3522
+ Auth.OpenRouterApiKeySchema = z.object({
3523
+ api_key: z.string().describe("The OpenRouter API key."),
3524
+ });
824
3525
  let OpenRouterApiKey;
825
3526
  (function (OpenRouterApiKey) {
826
3527
  async function retrieve(openai, options) {
@@ -841,6 +3542,15 @@ export var Auth;
841
3542
  }
842
3543
  OpenRouterApiKey.remove = remove;
843
3544
  })(OpenRouterApiKey = Auth.OpenRouterApiKey || (Auth.OpenRouterApiKey = {}));
3545
+ Auth.CreditsSchema = z.object({
3546
+ credits: z.number().describe("The current number of credits available."),
3547
+ total_credits_purchased: z
3548
+ .number()
3549
+ .describe("The total number of credits ever purchased."),
3550
+ total_credits_used: z
3551
+ .number()
3552
+ .describe("The total number of credits ever used."),
3553
+ });
844
3554
  let Credits;
845
3555
  (function (Credits) {
846
3556
  async function retrieve(openai, options) {
@@ -863,106 +3573,6 @@ export var Auth;
863
3573
  Username.set = set;
864
3574
  })(Username = Auth.Username || (Auth.Username = {}));
865
3575
  })(Auth || (Auth = {}));
866
- export var Metadata;
867
- (function (Metadata) {
868
- async function get(openai, options) {
869
- const response = await openai.get("/metadata", options);
870
- return response;
871
- }
872
- Metadata.get = get;
873
- })(Metadata || (Metadata = {}));
874
- export var ScoreLlm;
875
- (function (ScoreLlm) {
876
- async function list(openai, listOptions, options) {
877
- const response = await openai.get("/score/llms", Object.assign({ query: listOptions }, options));
878
- return response;
879
- }
880
- ScoreLlm.list = list;
881
- async function count(openai, options) {
882
- const response = await openai.get("/score/llms/count", options);
883
- return response;
884
- }
885
- ScoreLlm.count = count;
886
- async function retrieve(openai, model, retrieveOptions, options) {
887
- const response = await openai.get(`/score/llms/${model}`, Object.assign({ query: retrieveOptions }, options));
888
- return response;
889
- }
890
- ScoreLlm.retrieve = retrieve;
891
- async function retrieveValidate(openai, model, retrieveOptions, options) {
892
- const response = await openai.post("/score/llms", Object.assign({ query: retrieveOptions, body: model }, options));
893
- return response;
894
- }
895
- ScoreLlm.retrieveValidate = retrieveValidate;
896
- })(ScoreLlm || (ScoreLlm = {}));
897
- export var MultichatLlm;
898
- (function (MultichatLlm) {
899
- async function list(openai, listOptions, options) {
900
- const response = await openai.get("/multichat/llms", Object.assign({ query: listOptions }, options));
901
- return response;
902
- }
903
- MultichatLlm.list = list;
904
- async function count(openai, options) {
905
- const response = await openai.get("/multichat/llms/count", options);
906
- return response;
907
- }
908
- MultichatLlm.count = count;
909
- async function retrieve(openai, model, retrieveOptions, options) {
910
- const response = await openai.get(`/multichat/llms/${model}`, Object.assign({ query: retrieveOptions }, options));
911
- return response;
912
- }
913
- MultichatLlm.retrieve = retrieve;
914
- async function retrieveValidate(openai, model, retrieveOptions, options) {
915
- const response = await openai.post("/multichat/llms", Object.assign({ query: retrieveOptions, body: model }, options));
916
- return response;
917
- }
918
- MultichatLlm.retrieveValidate = retrieveValidate;
919
- })(MultichatLlm || (MultichatLlm = {}));
920
- export var ScoreModel;
921
- (function (ScoreModel) {
922
- async function list(openai, listOptions, options) {
923
- const response = await openai.get("/score/models", Object.assign({ query: listOptions }, options));
924
- return response;
925
- }
926
- ScoreModel.list = list;
927
- async function count(openai, options) {
928
- const response = await openai.get("/score/models/count", options);
929
- return response;
930
- }
931
- ScoreModel.count = count;
932
- async function retrieve(openai, model, retrieveOptions, options) {
933
- const response = await openai.get(`/score/models/${model}`, Object.assign({ query: retrieveOptions }, options));
934
- return response;
935
- }
936
- ScoreModel.retrieve = retrieve;
937
- async function retrieveValidate(openai, model, retrieveOptions, options) {
938
- const response = await openai.post("/score/models", Object.assign({ query: retrieveOptions, body: model }, options));
939
- return response;
940
- }
941
- ScoreModel.retrieveValidate = retrieveValidate;
942
- })(ScoreModel || (ScoreModel = {}));
943
- export var MultichatModel;
944
- (function (MultichatModel) {
945
- async function list(openai, listOptions, options) {
946
- const response = await openai.get("/multichat/models", Object.assign({ query: listOptions }, options));
947
- return response;
948
- }
949
- MultichatModel.list = list;
950
- async function count(openai, options) {
951
- const response = await openai.get("/multichat/models/count", options);
952
- return response;
953
- }
954
- MultichatModel.count = count;
955
- async function retrieve(openai, model, retrieveOptions, options) {
956
- const response = await openai.get(`/multichat/models/${model}`, Object.assign({ query: retrieveOptions }, options));
957
- return response;
958
- }
959
- MultichatModel.retrieve = retrieve;
960
- async function retrieveValidate(openai, model, retrieveOptions, options) {
961
- const response = await openai.post("/multichat/models", Object.assign({ query: retrieveOptions, body: model }, options));
962
- return response;
963
- }
964
- MultichatModel.retrieveValidate = retrieveValidate;
965
- })(MultichatModel || (MultichatModel = {}));
966
3576
  function merge(a, b, combine) {
967
3577
  if (a !== null && a !== undefined && b !== null && b !== undefined) {
968
3578
  return combine ? combine(a, b) : [a, false];
@@ -983,6 +3593,6 @@ function merge(a, b, combine) {
983
3593
  function mergedString(a, b) {
984
3594
  return b === "" ? [a, false] : [a + b, true];
985
3595
  }
986
- function mergedNumber(a, b) {
987
- return b === 0 ? [a, false] : [a + b, true];
988
- }
3596
+ // function mergedNumber(a: number, b: number): [number, boolean] {
3597
+ // return b === 0 ? [a, false] : [a + b, true];
3598
+ // }