llama-stack-client 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/core.d.ts +1 -1
  2. package/core.d.ts.map +1 -1
  3. package/core.js +5 -5
  4. package/core.js.map +1 -1
  5. package/core.mjs +5 -5
  6. package/core.mjs.map +1 -1
  7. package/index.d.mts +6 -0
  8. package/index.d.ts +6 -0
  9. package/index.d.ts.map +1 -1
  10. package/index.js +6 -0
  11. package/index.js.map +1 -1
  12. package/index.mjs +6 -0
  13. package/index.mjs.map +1 -1
  14. package/internal/decoders/line.d.ts +0 -1
  15. package/internal/decoders/line.d.ts.map +1 -1
  16. package/package.json +2 -3
  17. package/resources/chat/chat.d.ts +153 -0
  18. package/resources/chat/chat.d.ts.map +1 -0
  19. package/resources/chat/chat.js +39 -0
  20. package/resources/chat/chat.js.map +1 -0
  21. package/resources/chat/chat.mjs +12 -0
  22. package/resources/chat/chat.mjs.map +1 -0
  23. package/resources/chat/completions.d.ts +632 -0
  24. package/resources/chat/completions.d.ts.map +1 -0
  25. package/resources/chat/completions.js +16 -0
  26. package/resources/chat/completions.js.map +1 -0
  27. package/resources/chat/completions.mjs +12 -0
  28. package/resources/chat/completions.mjs.map +1 -0
  29. package/resources/chat/index.d.ts +3 -0
  30. package/resources/chat/index.d.ts.map +1 -0
  31. package/resources/chat/index.js +9 -0
  32. package/resources/chat/index.js.map +1 -0
  33. package/resources/chat/index.mjs +4 -0
  34. package/resources/chat/index.mjs.map +1 -0
  35. package/resources/chat.d.ts +2 -0
  36. package/resources/chat.d.ts.map +1 -0
  37. package/resources/chat.js +19 -0
  38. package/resources/chat.js.map +1 -0
  39. package/resources/chat.mjs +3 -0
  40. package/resources/chat.mjs.map +1 -0
  41. package/resources/completions.d.ts +193 -0
  42. package/resources/completions.d.ts.map +1 -0
  43. package/resources/completions.js +16 -0
  44. package/resources/completions.js.map +1 -0
  45. package/resources/completions.mjs +12 -0
  46. package/resources/completions.mjs.map +1 -0
  47. package/resources/index.d.ts +2 -0
  48. package/resources/index.d.ts.map +1 -1
  49. package/resources/index.js +5 -1
  50. package/resources/index.js.map +1 -1
  51. package/resources/index.mjs +2 -0
  52. package/resources/index.mjs.map +1 -1
  53. package/resources/inspect.d.ts +2 -1
  54. package/resources/inspect.d.ts.map +1 -1
  55. package/resources/post-training/job.d.ts.map +1 -1
  56. package/resources/post-training/job.js.map +1 -1
  57. package/resources/post-training/job.mjs.map +1 -1
  58. package/resources/post-training/post-training.d.ts +19 -19
  59. package/resources/post-training/post-training.d.ts.map +1 -1
  60. package/resources/shared.d.ts +17 -1
  61. package/resources/shared.d.ts.map +1 -1
  62. package/src/core.ts +4 -4
  63. package/src/index.ts +22 -0
  64. package/src/resources/chat/chat.ts +206 -0
  65. package/src/resources/chat/completions.ts +890 -0
  66. package/src/resources/chat/index.ts +10 -0
  67. package/src/resources/chat.ts +3 -0
  68. package/src/resources/completions.ts +268 -0
  69. package/src/resources/index.ts +8 -0
  70. package/src/resources/inspect.ts +3 -1
  71. package/src/resources/post-training/job.ts +1 -0
  72. package/src/resources/post-training/post-training.ts +32 -32
  73. package/src/resources/shared.ts +18 -1
  74. package/src/version.ts +1 -1
  75. package/version.d.ts +1 -1
  76. package/version.js +1 -1
  77. package/version.mjs +1 -1
@@ -0,0 +1,890 @@
1
+ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ import { APIResource } from '../../resource';
4
+ import { APIPromise } from '../../core';
5
+ import * as Core from '../../core';
6
+ import * as CompletionsAPI from './completions';
7
+ import * as ChatAPI from './chat';
8
+ import { Stream } from '../../streaming';
9
+
10
+ export class Completions extends APIResource {
11
+ /**
12
+ * Generate an OpenAI-compatible chat completion for the given messages using the
13
+ * specified model.
14
+ */
15
+ create(
16
+ body: CompletionCreateParamsNonStreaming,
17
+ options?: Core.RequestOptions,
18
+ ): APIPromise<CompletionCreateResponse>;
19
+ create(
20
+ body: CompletionCreateParamsStreaming,
21
+ options?: Core.RequestOptions,
22
+ ): APIPromise<Stream<ChatAPI.ChatCompletionChunk>>;
23
+ create(
24
+ body: CompletionCreateParamsBase,
25
+ options?: Core.RequestOptions,
26
+ ): APIPromise<Stream<ChatAPI.ChatCompletionChunk> | CompletionCreateResponse>;
27
+ create(
28
+ body: CompletionCreateParams,
29
+ options?: Core.RequestOptions,
30
+ ): APIPromise<CompletionCreateResponse> | APIPromise<Stream<ChatAPI.ChatCompletionChunk>> {
31
+ return this._client.post('/v1/openai/v1/chat/completions', {
32
+ body,
33
+ ...options,
34
+ stream: body.stream ?? false,
35
+ }) as APIPromise<CompletionCreateResponse> | APIPromise<Stream<ChatAPI.ChatCompletionChunk>>;
36
+ }
37
+ }
38
+
39
+ /**
40
+ * Response from an OpenAI-compatible chat completion request.
41
+ */
42
+ export type CompletionCreateResponse =
43
+ | CompletionCreateResponse.OpenAIChatCompletion
44
+ | ChatAPI.ChatCompletionChunk;
45
+
46
+ export namespace CompletionCreateResponse {
47
+ /**
48
+ * Response from an OpenAI-compatible chat completion request.
49
+ */
50
+ export interface OpenAIChatCompletion {
51
+ /**
52
+ * The ID of the chat completion
53
+ */
54
+ id: string;
55
+
56
+ /**
57
+ * List of choices
58
+ */
59
+ choices: Array<OpenAIChatCompletion.Choice>;
60
+
61
+ /**
62
+ * The Unix timestamp in seconds when the chat completion was created
63
+ */
64
+ created: number;
65
+
66
+ /**
67
+ * The model that was used to generate the chat completion
68
+ */
69
+ model: string;
70
+
71
+ /**
72
+ * The object type, which will be "chat.completion"
73
+ */
74
+ object: 'chat.completion';
75
+ }
76
+
77
+ export namespace OpenAIChatCompletion {
78
+ /**
79
+ * A choice from an OpenAI-compatible chat completion response.
80
+ */
81
+ export interface Choice {
82
+ /**
83
+ * The reason the model stopped generating
84
+ */
85
+ finish_reason: string;
86
+
87
+ /**
88
+ * The index of the choice
89
+ */
90
+ index: number;
91
+
92
+ /**
93
+ * The message from the model
94
+ */
95
+ message:
96
+ | Choice.OpenAIUserMessageParam
97
+ | Choice.OpenAISystemMessageParam
98
+ | Choice.OpenAIAssistantMessageParam
99
+ | Choice.OpenAIToolMessageParam
100
+ | Choice.OpenAIDeveloperMessageParam;
101
+
102
+ /**
103
+ * (Optional) The log probabilities for the tokens in the message
104
+ */
105
+ logprobs?: Choice.Logprobs;
106
+ }
107
+
108
+ export namespace Choice {
109
+ /**
110
+ * A message from the user in an OpenAI-compatible chat completion request.
111
+ */
112
+ export interface OpenAIUserMessageParam {
113
+ /**
114
+ * The content of the message, which can include text and other media
115
+ */
116
+ content:
117
+ | string
118
+ | Array<
119
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
120
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
121
+ >;
122
+
123
+ /**
124
+ * Must be "user" to identify this as a user message
125
+ */
126
+ role: 'user';
127
+
128
+ /**
129
+ * (Optional) The name of the user message participant.
130
+ */
131
+ name?: string;
132
+ }
133
+
134
+ export namespace OpenAIUserMessageParam {
135
+ export interface OpenAIChatCompletionContentPartTextParam {
136
+ text: string;
137
+
138
+ type: 'text';
139
+ }
140
+
141
+ export interface OpenAIChatCompletionContentPartImageParam {
142
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
143
+
144
+ type: 'image_url';
145
+ }
146
+
147
+ export namespace OpenAIChatCompletionContentPartImageParam {
148
+ export interface ImageURL {
149
+ url: string;
150
+
151
+ detail?: string;
152
+ }
153
+ }
154
+ }
155
+
156
+ /**
157
+ * A system message providing instructions or context to the model.
158
+ */
159
+ export interface OpenAISystemMessageParam {
160
+ /**
161
+ * The content of the "system prompt". If multiple system messages are provided,
162
+ * they are concatenated. The underlying Llama Stack code may also add other system
163
+ * messages (for example, for formatting tool definitions).
164
+ */
165
+ content:
166
+ | string
167
+ | Array<
168
+ | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
169
+ | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
170
+ >;
171
+
172
+ /**
173
+ * Must be "system" to identify this as a system message
174
+ */
175
+ role: 'system';
176
+
177
+ /**
178
+ * (Optional) The name of the system message participant.
179
+ */
180
+ name?: string;
181
+ }
182
+
183
+ export namespace OpenAISystemMessageParam {
184
+ export interface OpenAIChatCompletionContentPartTextParam {
185
+ text: string;
186
+
187
+ type: 'text';
188
+ }
189
+
190
+ export interface OpenAIChatCompletionContentPartImageParam {
191
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
192
+
193
+ type: 'image_url';
194
+ }
195
+
196
+ export namespace OpenAIChatCompletionContentPartImageParam {
197
+ export interface ImageURL {
198
+ url: string;
199
+
200
+ detail?: string;
201
+ }
202
+ }
203
+ }
204
+
205
+ /**
206
+ * A message containing the model's (assistant) response in an OpenAI-compatible
207
+ * chat completion request.
208
+ */
209
+ export interface OpenAIAssistantMessageParam {
210
+ /**
211
+ * Must be "assistant" to identify this as the model's response
212
+ */
213
+ role: 'assistant';
214
+
215
+ /**
216
+ * The content of the model's response
217
+ */
218
+ content?:
219
+ | string
220
+ | Array<
221
+ | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
222
+ | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
223
+ >;
224
+
225
+ /**
226
+ * (Optional) The name of the assistant message participant.
227
+ */
228
+ name?: string;
229
+
230
+ /**
231
+ * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.
232
+ */
233
+ tool_calls?: Array<OpenAIAssistantMessageParam.ToolCall>;
234
+ }
235
+
236
+ export namespace OpenAIAssistantMessageParam {
237
+ export interface OpenAIChatCompletionContentPartTextParam {
238
+ text: string;
239
+
240
+ type: 'text';
241
+ }
242
+
243
+ export interface OpenAIChatCompletionContentPartImageParam {
244
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
245
+
246
+ type: 'image_url';
247
+ }
248
+
249
+ export namespace OpenAIChatCompletionContentPartImageParam {
250
+ export interface ImageURL {
251
+ url: string;
252
+
253
+ detail?: string;
254
+ }
255
+ }
256
+
257
+ export interface ToolCall {
258
+ type: 'function';
259
+
260
+ id?: string;
261
+
262
+ function?: ToolCall.Function;
263
+
264
+ index?: number;
265
+ }
266
+
267
+ export namespace ToolCall {
268
+ export interface Function {
269
+ arguments?: string;
270
+
271
+ name?: string;
272
+ }
273
+ }
274
+ }
275
+
276
+ /**
277
+ * A message representing the result of a tool invocation in an OpenAI-compatible
278
+ * chat completion request.
279
+ */
280
+ export interface OpenAIToolMessageParam {
281
+ /**
282
+ * The response content from the tool
283
+ */
284
+ content:
285
+ | string
286
+ | Array<
287
+ | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
288
+ | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
289
+ >;
290
+
291
+ /**
292
+ * Must be "tool" to identify this as a tool response
293
+ */
294
+ role: 'tool';
295
+
296
+ /**
297
+ * Unique identifier for the tool call this response is for
298
+ */
299
+ tool_call_id: string;
300
+ }
301
+
302
+ export namespace OpenAIToolMessageParam {
303
+ export interface OpenAIChatCompletionContentPartTextParam {
304
+ text: string;
305
+
306
+ type: 'text';
307
+ }
308
+
309
+ export interface OpenAIChatCompletionContentPartImageParam {
310
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
311
+
312
+ type: 'image_url';
313
+ }
314
+
315
+ export namespace OpenAIChatCompletionContentPartImageParam {
316
+ export interface ImageURL {
317
+ url: string;
318
+
319
+ detail?: string;
320
+ }
321
+ }
322
+ }
323
+
324
+ /**
325
+ * A message from the developer in an OpenAI-compatible chat completion request.
326
+ */
327
+ export interface OpenAIDeveloperMessageParam {
328
+ /**
329
+ * The content of the developer message
330
+ */
331
+ content:
332
+ | string
333
+ | Array<
334
+ | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
335
+ | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
336
+ >;
337
+
338
+ /**
339
+ * Must be "developer" to identify this as a developer message
340
+ */
341
+ role: 'developer';
342
+
343
+ /**
344
+ * (Optional) The name of the developer message participant.
345
+ */
346
+ name?: string;
347
+ }
348
+
349
+ export namespace OpenAIDeveloperMessageParam {
350
+ export interface OpenAIChatCompletionContentPartTextParam {
351
+ text: string;
352
+
353
+ type: 'text';
354
+ }
355
+
356
+ export interface OpenAIChatCompletionContentPartImageParam {
357
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
358
+
359
+ type: 'image_url';
360
+ }
361
+
362
+ export namespace OpenAIChatCompletionContentPartImageParam {
363
+ export interface ImageURL {
364
+ url: string;
365
+
366
+ detail?: string;
367
+ }
368
+ }
369
+ }
370
+
371
+ /**
372
+ * (Optional) The log probabilities for the tokens in the message
373
+ */
374
+ export interface Logprobs {
375
+ /**
376
+ * (Optional) The log probabilities for the tokens in the message
377
+ */
378
+ content?: Array<Logprobs.Content>;
379
+
380
+ /**
381
+ * (Optional) The log probabilities for the tokens in the message
382
+ */
383
+ refusal?: Array<Logprobs.Refusal>;
384
+ }
385
+
386
+ export namespace Logprobs {
387
+ /**
388
+ * The log probability for a token from an OpenAI-compatible chat completion
389
+ * response.
390
+ */
391
+ export interface Content {
392
+ token: string;
393
+
394
+ logprob: number;
395
+
396
+ top_logprobs: Array<Content.TopLogprob>;
397
+
398
+ bytes?: Array<number>;
399
+ }
400
+
401
+ export namespace Content {
402
+ /**
403
+ * The top log probability for a token from an OpenAI-compatible chat completion
404
+ * response.
405
+ */
406
+ export interface TopLogprob {
407
+ token: string;
408
+
409
+ logprob: number;
410
+
411
+ bytes?: Array<number>;
412
+ }
413
+ }
414
+
415
+ /**
416
+ * The log probability for a token from an OpenAI-compatible chat completion
417
+ * response.
418
+ */
419
+ export interface Refusal {
420
+ token: string;
421
+
422
+ logprob: number;
423
+
424
+ top_logprobs: Array<Refusal.TopLogprob>;
425
+
426
+ bytes?: Array<number>;
427
+ }
428
+
429
+ export namespace Refusal {
430
+ /**
431
+ * The top log probability for a token from an OpenAI-compatible chat completion
432
+ * response.
433
+ */
434
+ export interface TopLogprob {
435
+ token: string;
436
+
437
+ logprob: number;
438
+
439
+ bytes?: Array<number>;
440
+ }
441
+ }
442
+ }
443
+ }
444
+ }
445
+ }
446
+
447
+ export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
448
+
449
+ export interface CompletionCreateParamsBase {
450
+ /**
451
+ * List of messages in the conversation
452
+ */
453
+ messages: Array<
454
+ | CompletionCreateParams.OpenAIUserMessageParam
455
+ | CompletionCreateParams.OpenAISystemMessageParam
456
+ | CompletionCreateParams.OpenAIAssistantMessageParam
457
+ | CompletionCreateParams.OpenAIToolMessageParam
458
+ | CompletionCreateParams.OpenAIDeveloperMessageParam
459
+ >;
460
+
461
+ /**
462
+ * The identifier of the model to use. The model must be registered with Llama
463
+ * Stack and available via the /models endpoint.
464
+ */
465
+ model: string;
466
+
467
+ /**
468
+ * (Optional) The penalty for repeated tokens
469
+ */
470
+ frequency_penalty?: number;
471
+
472
+ /**
473
+ * (Optional) The function call to use
474
+ */
475
+ function_call?: string | Record<string, boolean | number | string | Array<unknown> | unknown | null>;
476
+
477
+ /**
478
+ * (Optional) List of functions to use
479
+ */
480
+ functions?: Array<Record<string, boolean | number | string | Array<unknown> | unknown | null>>;
481
+
482
+ /**
483
+ * (Optional) The logit bias to use
484
+ */
485
+ logit_bias?: Record<string, number>;
486
+
487
+ /**
488
+ * (Optional) The log probabilities to use
489
+ */
490
+ logprobs?: boolean;
491
+
492
+ /**
493
+ * (Optional) The maximum number of tokens to generate
494
+ */
495
+ max_completion_tokens?: number;
496
+
497
+ /**
498
+ * (Optional) The maximum number of tokens to generate
499
+ */
500
+ max_tokens?: number;
501
+
502
+ /**
503
+ * (Optional) The number of completions to generate
504
+ */
505
+ n?: number;
506
+
507
+ /**
508
+ * (Optional) Whether to parallelize tool calls
509
+ */
510
+ parallel_tool_calls?: boolean;
511
+
512
+ /**
513
+ * (Optional) The penalty for repeated tokens
514
+ */
515
+ presence_penalty?: number;
516
+
517
+ /**
518
+ * (Optional) The response format to use
519
+ */
520
+ response_format?:
521
+ | CompletionCreateParams.OpenAIResponseFormatText
522
+ | CompletionCreateParams.OpenAIResponseFormatJsonSchema
523
+ | CompletionCreateParams.OpenAIResponseFormatJsonObject;
524
+
525
+ /**
526
+ * (Optional) The seed to use
527
+ */
528
+ seed?: number;
529
+
530
+ /**
531
+ * (Optional) The stop tokens to use
532
+ */
533
+ stop?: string | Array<string>;
534
+
535
+ /**
536
+ * (Optional) Whether to stream the response
537
+ */
538
+ stream?: boolean;
539
+
540
+ /**
541
+ * (Optional) The stream options to use
542
+ */
543
+ stream_options?: Record<string, boolean | number | string | Array<unknown> | unknown | null>;
544
+
545
+ /**
546
+ * (Optional) The temperature to use
547
+ */
548
+ temperature?: number;
549
+
550
+ /**
551
+ * (Optional) The tool choice to use
552
+ */
553
+ tool_choice?: string | Record<string, boolean | number | string | Array<unknown> | unknown | null>;
554
+
555
+ /**
556
+ * (Optional) The tools to use
557
+ */
558
+ tools?: Array<Record<string, boolean | number | string | Array<unknown> | unknown | null>>;
559
+
560
+ /**
561
+ * (Optional) The top log probabilities to use
562
+ */
563
+ top_logprobs?: number;
564
+
565
+ /**
566
+ * (Optional) The top p to use
567
+ */
568
+ top_p?: number;
569
+
570
+ /**
571
+ * (Optional) The user to use
572
+ */
573
+ user?: string;
574
+ }
575
+
576
+ export namespace CompletionCreateParams {
577
+ /**
578
+ * A message from the user in an OpenAI-compatible chat completion request.
579
+ */
580
+ export interface OpenAIUserMessageParam {
581
+ /**
582
+ * The content of the message, which can include text and other media
583
+ */
584
+ content:
585
+ | string
586
+ | Array<
587
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
588
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
589
+ >;
590
+
591
+ /**
592
+ * Must be "user" to identify this as a user message
593
+ */
594
+ role: 'user';
595
+
596
+ /**
597
+ * (Optional) The name of the user message participant.
598
+ */
599
+ name?: string;
600
+ }
601
+
602
+ export namespace OpenAIUserMessageParam {
603
+ export interface OpenAIChatCompletionContentPartTextParam {
604
+ text: string;
605
+
606
+ type: 'text';
607
+ }
608
+
609
+ export interface OpenAIChatCompletionContentPartImageParam {
610
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
611
+
612
+ type: 'image_url';
613
+ }
614
+
615
+ export namespace OpenAIChatCompletionContentPartImageParam {
616
+ export interface ImageURL {
617
+ url: string;
618
+
619
+ detail?: string;
620
+ }
621
+ }
622
+ }
623
+
624
+ /**
625
+ * A system message providing instructions or context to the model.
626
+ */
627
+ export interface OpenAISystemMessageParam {
628
+ /**
629
+ * The content of the "system prompt". If multiple system messages are provided,
630
+ * they are concatenated. The underlying Llama Stack code may also add other system
631
+ * messages (for example, for formatting tool definitions).
632
+ */
633
+ content:
634
+ | string
635
+ | Array<
636
+ | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam
637
+ | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam
638
+ >;
639
+
640
+ /**
641
+ * Must be "system" to identify this as a system message
642
+ */
643
+ role: 'system';
644
+
645
+ /**
646
+ * (Optional) The name of the system message participant.
647
+ */
648
+ name?: string;
649
+ }
650
+
651
+ export namespace OpenAISystemMessageParam {
652
+ export interface OpenAIChatCompletionContentPartTextParam {
653
+ text: string;
654
+
655
+ type: 'text';
656
+ }
657
+
658
+ export interface OpenAIChatCompletionContentPartImageParam {
659
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
660
+
661
+ type: 'image_url';
662
+ }
663
+
664
+ export namespace OpenAIChatCompletionContentPartImageParam {
665
+ export interface ImageURL {
666
+ url: string;
667
+
668
+ detail?: string;
669
+ }
670
+ }
671
+ }
672
+
673
+ /**
674
+ * A message containing the model's (assistant) response in an OpenAI-compatible
675
+ * chat completion request.
676
+ */
677
+ export interface OpenAIAssistantMessageParam {
678
+ /**
679
+ * Must be "assistant" to identify this as the model's response
680
+ */
681
+ role: 'assistant';
682
+
683
+ /**
684
+ * The content of the model's response
685
+ */
686
+ content?:
687
+ | string
688
+ | Array<
689
+ | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam
690
+ | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam
691
+ >;
692
+
693
+ /**
694
+ * (Optional) The name of the assistant message participant.
695
+ */
696
+ name?: string;
697
+
698
+ /**
699
+ * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.
700
+ */
701
+ tool_calls?: Array<OpenAIAssistantMessageParam.ToolCall>;
702
+ }
703
+
704
+ export namespace OpenAIAssistantMessageParam {
705
+ export interface OpenAIChatCompletionContentPartTextParam {
706
+ text: string;
707
+
708
+ type: 'text';
709
+ }
710
+
711
+ export interface OpenAIChatCompletionContentPartImageParam {
712
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
713
+
714
+ type: 'image_url';
715
+ }
716
+
717
+ export namespace OpenAIChatCompletionContentPartImageParam {
718
+ export interface ImageURL {
719
+ url: string;
720
+
721
+ detail?: string;
722
+ }
723
+ }
724
+
725
+ export interface ToolCall {
726
+ type: 'function';
727
+
728
+ id?: string;
729
+
730
+ function?: ToolCall.Function;
731
+
732
+ index?: number;
733
+ }
734
+
735
+ export namespace ToolCall {
736
+ export interface Function {
737
+ arguments?: string;
738
+
739
+ name?: string;
740
+ }
741
+ }
742
+ }
743
+
744
+ /**
745
+ * A message representing the result of a tool invocation in an OpenAI-compatible
746
+ * chat completion request.
747
+ */
748
+ export interface OpenAIToolMessageParam {
749
+ /**
750
+ * The response content from the tool
751
+ */
752
+ content:
753
+ | string
754
+ | Array<
755
+ | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam
756
+ | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam
757
+ >;
758
+
759
+ /**
760
+ * Must be "tool" to identify this as a tool response
761
+ */
762
+ role: 'tool';
763
+
764
+ /**
765
+ * Unique identifier for the tool call this response is for
766
+ */
767
+ tool_call_id: string;
768
+ }
769
+
770
+ export namespace OpenAIToolMessageParam {
771
+ export interface OpenAIChatCompletionContentPartTextParam {
772
+ text: string;
773
+
774
+ type: 'text';
775
+ }
776
+
777
+ export interface OpenAIChatCompletionContentPartImageParam {
778
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
779
+
780
+ type: 'image_url';
781
+ }
782
+
783
+ export namespace OpenAIChatCompletionContentPartImageParam {
784
+ export interface ImageURL {
785
+ url: string;
786
+
787
+ detail?: string;
788
+ }
789
+ }
790
+ }
791
+
792
+ /**
793
+ * A message from the developer in an OpenAI-compatible chat completion request.
794
+ */
795
+ export interface OpenAIDeveloperMessageParam {
796
+ /**
797
+ * The content of the developer message
798
+ */
799
+ content:
800
+ | string
801
+ | Array<
802
+ | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam
803
+ | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam
804
+ >;
805
+
806
+ /**
807
+ * Must be "developer" to identify this as a developer message
808
+ */
809
+ role: 'developer';
810
+
811
+ /**
812
+ * (Optional) The name of the developer message participant.
813
+ */
814
+ name?: string;
815
+ }
816
+
817
+ export namespace OpenAIDeveloperMessageParam {
818
+ export interface OpenAIChatCompletionContentPartTextParam {
819
+ text: string;
820
+
821
+ type: 'text';
822
+ }
823
+
824
+ export interface OpenAIChatCompletionContentPartImageParam {
825
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
826
+
827
+ type: 'image_url';
828
+ }
829
+
830
+ export namespace OpenAIChatCompletionContentPartImageParam {
831
+ export interface ImageURL {
832
+ url: string;
833
+
834
+ detail?: string;
835
+ }
836
+ }
837
+ }
838
+
839
+ export interface OpenAIResponseFormatText {
840
+ type: 'text';
841
+ }
842
+
843
+ export interface OpenAIResponseFormatJsonSchema {
844
+ json_schema: OpenAIResponseFormatJsonSchema.JsonSchema;
845
+
846
+ type: 'json_schema';
847
+ }
848
+
849
+ export namespace OpenAIResponseFormatJsonSchema {
850
+ export interface JsonSchema {
851
+ name: string;
852
+
853
+ description?: string;
854
+
855
+ schema?: Record<string, boolean | number | string | Array<unknown> | unknown | null>;
856
+
857
+ strict?: boolean;
858
+ }
859
+ }
860
+
861
+ export interface OpenAIResponseFormatJsonObject {
862
+ type: 'json_object';
863
+ }
864
+
865
+ export type CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming;
866
+ export type CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming;
867
+ }
868
+
869
+ export interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase {
870
+ /**
871
+ * (Optional) Whether to stream the response
872
+ */
873
+ stream?: false;
874
+ }
875
+
876
+ export interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase {
877
+ /**
878
+ * (Optional) Whether to stream the response
879
+ */
880
+ stream: true;
881
+ }
882
+
883
+ export declare namespace Completions {
884
+ export {
885
+ type CompletionCreateResponse as CompletionCreateResponse,
886
+ type CompletionCreateParams as CompletionCreateParams,
887
+ type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,
888
+ type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,
889
+ };
890
+ }