openai 4.77.3 → 4.78.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/CHANGELOG.md +16 -0
  2. package/README.md +1 -1
  3. package/package.json +1 -1
  4. package/resources/beta/beta.d.ts +4 -0
  5. package/resources/beta/beta.d.ts.map +1 -1
  6. package/resources/beta/beta.js +4 -0
  7. package/resources/beta/beta.js.map +1 -1
  8. package/resources/beta/beta.mjs +4 -0
  9. package/resources/beta/beta.mjs.map +1 -1
  10. package/resources/beta/index.d.ts +1 -0
  11. package/resources/beta/index.d.ts.map +1 -1
  12. package/resources/beta/index.js +10 -8
  13. package/resources/beta/index.js.map +1 -1
  14. package/resources/beta/index.mjs +1 -0
  15. package/resources/beta/index.mjs.map +1 -1
  16. package/resources/beta/realtime/index.d.ts +3 -0
  17. package/resources/beta/realtime/index.d.ts.map +1 -0
  18. package/resources/beta/realtime/index.js +9 -0
  19. package/resources/beta/realtime/index.js.map +1 -0
  20. package/resources/beta/realtime/index.mjs +4 -0
  21. package/resources/beta/realtime/index.mjs.map +1 -0
  22. package/resources/beta/realtime/realtime.d.ts +1584 -0
  23. package/resources/beta/realtime/realtime.d.ts.map +1 -0
  24. package/resources/beta/realtime/realtime.js +39 -0
  25. package/resources/beta/realtime/realtime.js.map +1 -0
  26. package/resources/beta/realtime/realtime.mjs +12 -0
  27. package/resources/beta/realtime/realtime.mjs.map +1 -0
  28. package/resources/beta/realtime/sessions.d.ts +455 -0
  29. package/resources/beta/realtime/sessions.d.ts.map +1 -0
  30. package/resources/beta/realtime/sessions.js +25 -0
  31. package/resources/beta/realtime/sessions.js.map +1 -0
  32. package/resources/beta/realtime/sessions.mjs +21 -0
  33. package/resources/beta/realtime/sessions.mjs.map +1 -0
  34. package/src/resources/beta/beta.ts +6 -0
  35. package/src/resources/beta/index.ts +1 -0
  36. package/src/resources/beta/realtime/index.ts +4 -0
  37. package/src/resources/beta/realtime/realtime.ts +1904 -0
  38. package/src/resources/beta/realtime/sessions.ts +546 -0
  39. package/src/version.ts +1 -1
  40. package/version.d.ts +1 -1
  41. package/version.js +1 -1
  42. package/version.mjs +1 -1
@@ -0,0 +1,546 @@
1
+ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ import { APIResource } from '../../../resource';
4
+ import * as Core from '../../../core';
5
+
6
+ export class Sessions extends APIResource {
7
+ /**
8
+ * Create an ephemeral API token for use in client-side applications with the
9
+ * Realtime API. Can be configured with the same session parameters as the
10
+ * `session.update` client event.
11
+ *
12
+ * It responds with a session object, plus a `client_secret` key which contains a
13
+ * usable ephemeral API token that can be used to authenticate browser clients for
14
+ * the Realtime API.
15
+ */
16
+ create(body: SessionCreateParams, options?: Core.RequestOptions): Core.APIPromise<SessionCreateResponse> {
17
+ return this._client.post('/realtime/sessions', {
18
+ body,
19
+ ...options,
20
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
21
+ });
22
+ }
23
+ }
24
+
25
+ /**
26
+ * Realtime session object configuration.
27
+ */
28
+ export interface Session {
29
+ /**
30
+ * Unique identifier for the session object.
31
+ */
32
+ id?: string;
33
+
34
+ /**
35
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
36
+ */
37
+ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
38
+
39
+ /**
40
+ * Configuration for input audio transcription, defaults to off and can be set to
41
+ * `null` to turn off once on. Input audio transcription is not native to the
42
+ * model, since the model consumes audio directly. Transcription runs
43
+ * asynchronously through Whisper and should be treated as rough guidance rather
44
+ * than the representation understood by the model.
45
+ */
46
+ input_audio_transcription?: Session.InputAudioTranscription;
47
+
48
+ /**
49
+ * The default system instructions (i.e. system message) prepended to model calls.
50
+ * This field allows the client to guide the model on desired responses. The model
51
+ * can be instructed on response content and format, (e.g. "be extremely succinct",
52
+ * "act friendly", "here are examples of good responses") and on audio behavior
53
+ * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
54
+ * instructions are not guaranteed to be followed by the model, but they provide
55
+ * guidance to the model on the desired behavior.
56
+ *
57
+ * Note that the server sets default instructions which will be used if this field
58
+ * is not set and are visible in the `session.created` event at the start of the
59
+ * session.
60
+ */
61
+ instructions?: string;
62
+
63
+ /**
64
+ * Maximum number of output tokens for a single assistant response, inclusive of
65
+ * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
66
+ * `inf` for the maximum available tokens for a given model. Defaults to `inf`.
67
+ */
68
+ max_response_output_tokens?: number | 'inf';
69
+
70
+ /**
71
+ * The set of modalities the model can respond with. To disable audio, set this to
72
+ * ["text"].
73
+ */
74
+ modalities?: Array<'text' | 'audio'>;
75
+
76
+ /**
77
+ * The Realtime model used for this session.
78
+ */
79
+ model?:
80
+ | (string & {})
81
+ | 'gpt-4o-realtime-preview'
82
+ | 'gpt-4o-realtime-preview-2024-10-01'
83
+ | 'gpt-4o-realtime-preview-2024-12-17'
84
+ | 'gpt-4o-mini-realtime-preview'
85
+ | 'gpt-4o-mini-realtime-preview-2024-12-17';
86
+
87
+ /**
88
+ * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
89
+ */
90
+ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
91
+
92
+ /**
93
+ * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
94
+ */
95
+ temperature?: number;
96
+
97
+ /**
98
+ * How the model chooses tools. Options are `auto`, `none`, `required`, or specify
99
+ * a function.
100
+ */
101
+ tool_choice?: string;
102
+
103
+ /**
104
+ * Tools (functions) available to the model.
105
+ */
106
+ tools?: Array<Session.Tool>;
107
+
108
+ /**
109
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
110
+ * means that the model will detect the start and end of speech based on audio
111
+ * volume and respond at the end of user speech.
112
+ */
113
+ turn_detection?: Session.TurnDetection | null;
114
+
115
+ /**
116
+ * The voice the model uses to respond. Voice cannot be changed during the session
117
+ * once the model has responded with audio at least once. Current voice options are
118
+ * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
119
+ */
120
+ voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
121
+ }
122
+
123
+ export namespace Session {
124
+ /**
125
+ * Configuration for input audio transcription, defaults to off and can be set to
126
+ * `null` to turn off once on. Input audio transcription is not native to the
127
+ * model, since the model consumes audio directly. Transcription runs
128
+ * asynchronously through Whisper and should be treated as rough guidance rather
129
+ * than the representation understood by the model.
130
+ */
131
+ export interface InputAudioTranscription {
132
+ /**
133
+ * The model to use for transcription, `whisper-1` is the only currently supported
134
+ * model.
135
+ */
136
+ model?: string;
137
+ }
138
+
139
+ export interface Tool {
140
+ /**
141
+ * The description of the function, including guidance on when and how to call it,
142
+ * and guidance about what to tell the user when calling (if anything).
143
+ */
144
+ description?: string;
145
+
146
+ /**
147
+ * The name of the function.
148
+ */
149
+ name?: string;
150
+
151
+ /**
152
+ * Parameters of the function in JSON Schema.
153
+ */
154
+ parameters?: unknown;
155
+
156
+ /**
157
+ * The type of the tool, i.e. `function`.
158
+ */
159
+ type?: 'function';
160
+ }
161
+
162
+ /**
163
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
164
+ * means that the model will detect the start and end of speech based on audio
165
+ * volume and respond at the end of user speech.
166
+ */
167
+ export interface TurnDetection {
168
+ /**
169
+ * Amount of audio to include before the VAD detected speech (in milliseconds).
170
+ * Defaults to 300ms.
171
+ */
172
+ prefix_padding_ms?: number;
173
+
174
+ /**
175
+ * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
176
+ * With shorter values the model will respond more quickly, but may jump in on
177
+ * short pauses from the user.
178
+ */
179
+ silence_duration_ms?: number;
180
+
181
+ /**
182
+ * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
183
+ * threshold will require louder audio to activate the model, and thus might
184
+ * perform better in noisy environments.
185
+ */
186
+ threshold?: number;
187
+
188
+ /**
189
+ * Type of turn detection, only `server_vad` is currently supported.
190
+ */
191
+ type?: 'server_vad';
192
+ }
193
+ }
194
+
195
+ /**
196
+ * A new Realtime session configuration, with an ephermeral key. Default TTL for
197
+ * keys is one minute.
198
+ */
199
+ export interface SessionCreateResponse {
200
+ /**
201
+ * Ephemeral key returned by the API.
202
+ */
203
+ client_secret?: SessionCreateResponse.ClientSecret;
204
+
205
+ /**
206
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
207
+ */
208
+ input_audio_format?: string;
209
+
210
+ /**
211
+ * Configuration for input audio transcription, defaults to off and can be set to
212
+ * `null` to turn off once on. Input audio transcription is not native to the
213
+ * model, since the model consumes audio directly. Transcription runs
214
+ * asynchronously through Whisper and should be treated as rough guidance rather
215
+ * than the representation understood by the model.
216
+ */
217
+ input_audio_transcription?: SessionCreateResponse.InputAudioTranscription;
218
+
219
+ /**
220
+ * The default system instructions (i.e. system message) prepended to model calls.
221
+ * This field allows the client to guide the model on desired responses. The model
222
+ * can be instructed on response content and format, (e.g. "be extremely succinct",
223
+ * "act friendly", "here are examples of good responses") and on audio behavior
224
+ * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
225
+ * instructions are not guaranteed to be followed by the model, but they provide
226
+ * guidance to the model on the desired behavior.
227
+ *
228
+ * Note that the server sets default instructions which will be used if this field
229
+ * is not set and are visible in the `session.created` event at the start of the
230
+ * session.
231
+ */
232
+ instructions?: string;
233
+
234
+ /**
235
+ * Maximum number of output tokens for a single assistant response, inclusive of
236
+ * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
237
+ * `inf` for the maximum available tokens for a given model. Defaults to `inf`.
238
+ */
239
+ max_response_output_tokens?: number | 'inf';
240
+
241
+ /**
242
+ * The set of modalities the model can respond with. To disable audio, set this to
243
+ * ["text"].
244
+ */
245
+ modalities?: Array<'text' | 'audio'>;
246
+
247
+ /**
248
+ * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
249
+ */
250
+ output_audio_format?: string;
251
+
252
+ /**
253
+ * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
254
+ */
255
+ temperature?: number;
256
+
257
+ /**
258
+ * How the model chooses tools. Options are `auto`, `none`, `required`, or specify
259
+ * a function.
260
+ */
261
+ tool_choice?: string;
262
+
263
+ /**
264
+ * Tools (functions) available to the model.
265
+ */
266
+ tools?: Array<SessionCreateResponse.Tool>;
267
+
268
+ /**
269
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
270
+ * means that the model will detect the start and end of speech based on audio
271
+ * volume and respond at the end of user speech.
272
+ */
273
+ turn_detection?: SessionCreateResponse.TurnDetection;
274
+
275
+ /**
276
+ * The voice the model uses to respond. Voice cannot be changed during the session
277
+ * once the model has responded with audio at least once. Current voice options are
278
+ * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
279
+ */
280
+ voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
281
+ }
282
+
283
+ export namespace SessionCreateResponse {
284
+ /**
285
+ * Ephemeral key returned by the API.
286
+ */
287
+ export interface ClientSecret {
288
+ /**
289
+ * Timestamp for when the token expires. Currently, all tokens expire after one
290
+ * minute.
291
+ */
292
+ expires_at?: number;
293
+
294
+ /**
295
+ * Ephemeral key usable in client environments to authenticate connections to the
296
+ * Realtime API. Use this in client-side environments rather than a standard API
297
+ * token, which should only be used server-side.
298
+ */
299
+ value?: string;
300
+ }
301
+
302
+ /**
303
+ * Configuration for input audio transcription, defaults to off and can be set to
304
+ * `null` to turn off once on. Input audio transcription is not native to the
305
+ * model, since the model consumes audio directly. Transcription runs
306
+ * asynchronously through Whisper and should be treated as rough guidance rather
307
+ * than the representation understood by the model.
308
+ */
309
+ export interface InputAudioTranscription {
310
+ /**
311
+ * The model to use for transcription, `whisper-1` is the only currently supported
312
+ * model.
313
+ */
314
+ model?: string;
315
+ }
316
+
317
+ export interface Tool {
318
+ /**
319
+ * The description of the function, including guidance on when and how to call it,
320
+ * and guidance about what to tell the user when calling (if anything).
321
+ */
322
+ description?: string;
323
+
324
+ /**
325
+ * The name of the function.
326
+ */
327
+ name?: string;
328
+
329
+ /**
330
+ * Parameters of the function in JSON Schema.
331
+ */
332
+ parameters?: unknown;
333
+
334
+ /**
335
+ * The type of the tool, i.e. `function`.
336
+ */
337
+ type?: 'function';
338
+ }
339
+
340
+ /**
341
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
342
+ * means that the model will detect the start and end of speech based on audio
343
+ * volume and respond at the end of user speech.
344
+ */
345
+ export interface TurnDetection {
346
+ /**
347
+ * Amount of audio to include before the VAD detected speech (in milliseconds).
348
+ * Defaults to 300ms.
349
+ */
350
+ prefix_padding_ms?: number;
351
+
352
+ /**
353
+ * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
354
+ * With shorter values the model will respond more quickly, but may jump in on
355
+ * short pauses from the user.
356
+ */
357
+ silence_duration_ms?: number;
358
+
359
+ /**
360
+ * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
361
+ * threshold will require louder audio to activate the model, and thus might
362
+ * perform better in noisy environments.
363
+ */
364
+ threshold?: number;
365
+
366
+ /**
367
+ * Type of turn detection, only `server_vad` is currently supported.
368
+ */
369
+ type?: string;
370
+ }
371
+ }
372
+
373
+ export interface SessionCreateParams {
374
+ /**
375
+ * The Realtime model used for this session.
376
+ */
377
+ model:
378
+ | 'gpt-4o-realtime-preview'
379
+ | 'gpt-4o-realtime-preview-2024-10-01'
380
+ | 'gpt-4o-realtime-preview-2024-12-17'
381
+ | 'gpt-4o-mini-realtime-preview'
382
+ | 'gpt-4o-mini-realtime-preview-2024-12-17';
383
+
384
+ /**
385
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
386
+ */
387
+ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
388
+
389
+ /**
390
+ * Configuration for input audio transcription, defaults to off and can be set to
391
+ * `null` to turn off once on. Input audio transcription is not native to the
392
+ * model, since the model consumes audio directly. Transcription runs
393
+ * asynchronously through Whisper and should be treated as rough guidance rather
394
+ * than the representation understood by the model.
395
+ */
396
+ input_audio_transcription?: SessionCreateParams.InputAudioTranscription;
397
+
398
+ /**
399
+ * The default system instructions (i.e. system message) prepended to model calls.
400
+ * This field allows the client to guide the model on desired responses. The model
401
+ * can be instructed on response content and format, (e.g. "be extremely succinct",
402
+ * "act friendly", "here are examples of good responses") and on audio behavior
403
+ * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
404
+ * instructions are not guaranteed to be followed by the model, but they provide
405
+ * guidance to the model on the desired behavior.
406
+ *
407
+ * Note that the server sets default instructions which will be used if this field
408
+ * is not set and are visible in the `session.created` event at the start of the
409
+ * session.
410
+ */
411
+ instructions?: string;
412
+
413
+ /**
414
+ * Maximum number of output tokens for a single assistant response, inclusive of
415
+ * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
416
+ * `inf` for the maximum available tokens for a given model. Defaults to `inf`.
417
+ */
418
+ max_response_output_tokens?: number | 'inf';
419
+
420
+ /**
421
+ * The set of modalities the model can respond with. To disable audio, set this to
422
+ * ["text"].
423
+ */
424
+ modalities?: Array<'text' | 'audio'>;
425
+
426
+ /**
427
+ * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
428
+ */
429
+ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
430
+
431
+ /**
432
+ * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
433
+ */
434
+ temperature?: number;
435
+
436
+ /**
437
+ * How the model chooses tools. Options are `auto`, `none`, `required`, or specify
438
+ * a function.
439
+ */
440
+ tool_choice?: string;
441
+
442
+ /**
443
+ * Tools (functions) available to the model.
444
+ */
445
+ tools?: Array<SessionCreateParams.Tool>;
446
+
447
+ /**
448
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
449
+ * means that the model will detect the start and end of speech based on audio
450
+ * volume and respond at the end of user speech.
451
+ */
452
+ turn_detection?: SessionCreateParams.TurnDetection;
453
+
454
+ /**
455
+ * The voice the model uses to respond. Voice cannot be changed during the session
456
+ * once the model has responded with audio at least once. Current voice options are
457
+ * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
458
+ */
459
+ voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
460
+ }
461
+
462
+ export namespace SessionCreateParams {
463
+ /**
464
+ * Configuration for input audio transcription, defaults to off and can be set to
465
+ * `null` to turn off once on. Input audio transcription is not native to the
466
+ * model, since the model consumes audio directly. Transcription runs
467
+ * asynchronously through Whisper and should be treated as rough guidance rather
468
+ * than the representation understood by the model.
469
+ */
470
+ export interface InputAudioTranscription {
471
+ /**
472
+ * The model to use for transcription, `whisper-1` is the only currently supported
473
+ * model.
474
+ */
475
+ model?: string;
476
+ }
477
+
478
+ export interface Tool {
479
+ /**
480
+ * The description of the function, including guidance on when and how to call it,
481
+ * and guidance about what to tell the user when calling (if anything).
482
+ */
483
+ description?: string;
484
+
485
+ /**
486
+ * The name of the function.
487
+ */
488
+ name?: string;
489
+
490
+ /**
491
+ * Parameters of the function in JSON Schema.
492
+ */
493
+ parameters?: unknown;
494
+
495
+ /**
496
+ * The type of the tool, i.e. `function`.
497
+ */
498
+ type?: 'function';
499
+ }
500
+
501
+ /**
502
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
503
+ * means that the model will detect the start and end of speech based on audio
504
+ * volume and respond at the end of user speech.
505
+ */
506
+ export interface TurnDetection {
507
+ /**
508
+ * Whether or not to automatically generate a response when VAD is enabled. `true`
509
+ * by default.
510
+ */
511
+ create_response?: boolean;
512
+
513
+ /**
514
+ * Amount of audio to include before the VAD detected speech (in milliseconds).
515
+ * Defaults to 300ms.
516
+ */
517
+ prefix_padding_ms?: number;
518
+
519
+ /**
520
+ * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
521
+ * With shorter values the model will respond more quickly, but may jump in on
522
+ * short pauses from the user.
523
+ */
524
+ silence_duration_ms?: number;
525
+
526
+ /**
527
+ * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
528
+ * threshold will require louder audio to activate the model, and thus might
529
+ * perform better in noisy environments.
530
+ */
531
+ threshold?: number;
532
+
533
+ /**
534
+ * Type of turn detection, only `server_vad` is currently supported.
535
+ */
536
+ type?: string;
537
+ }
538
+ }
539
+
540
+ export declare namespace Sessions {
541
+ export {
542
+ type Session as Session,
543
+ type SessionCreateResponse as SessionCreateResponse,
544
+ type SessionCreateParams as SessionCreateParams,
545
+ };
546
+ }
package/src/version.ts CHANGED
@@ -1 +1 @@
1
- export const VERSION = '4.77.3'; // x-release-please-version
1
+ export const VERSION = '4.78.0'; // x-release-please-version
package/version.d.ts CHANGED
@@ -1,2 +1,2 @@
1
- export declare const VERSION = "4.77.3";
1
+ export declare const VERSION = "4.78.0";
2
2
  //# sourceMappingURL=version.d.ts.map
package/version.js CHANGED
@@ -1,5 +1,5 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.VERSION = void 0;
4
- exports.VERSION = '4.77.3'; // x-release-please-version
4
+ exports.VERSION = '4.78.0'; // x-release-please-version
5
5
  //# sourceMappingURL=version.js.map
package/version.mjs CHANGED
@@ -1,2 +1,2 @@
1
- export const VERSION = '4.77.3'; // x-release-please-version
1
+ export const VERSION = '4.78.0'; // x-release-please-version
2
2
  //# sourceMappingURL=version.mjs.map