@livekit/agents-plugin-openai 1.0.31 → 1.0.33
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/realtime/api_proto.cjs.map +1 -1
- package/dist/realtime/api_proto.d.cts +50 -12
- package/dist/realtime/api_proto.d.ts +50 -12
- package/dist/realtime/api_proto.d.ts.map +1 -1
- package/dist/realtime/api_proto.js.map +1 -1
- package/dist/realtime/index.cjs +19 -0
- package/dist/realtime/index.cjs.map +1 -1
- package/dist/realtime/index.d.cts +1 -0
- package/dist/realtime/index.d.ts +1 -0
- package/dist/realtime/index.d.ts.map +1 -1
- package/dist/realtime/index.js +4 -0
- package/dist/realtime/index.js.map +1 -1
- package/dist/realtime/realtime_model.cjs +69 -33
- package/dist/realtime/realtime_model.cjs.map +1 -1
- package/dist/realtime/realtime_model.d.cts +14 -6
- package/dist/realtime/realtime_model.d.ts +14 -6
- package/dist/realtime/realtime_model.d.ts.map +1 -1
- package/dist/realtime/realtime_model.js +69 -33
- package/dist/realtime/realtime_model.js.map +1 -1
- package/dist/realtime/realtime_model_beta.cjs +1300 -0
- package/dist/realtime/realtime_model_beta.cjs.map +1 -0
- package/dist/realtime/realtime_model_beta.d.cts +165 -0
- package/dist/realtime/realtime_model_beta.d.ts +165 -0
- package/dist/realtime/realtime_model_beta.d.ts.map +1 -0
- package/dist/realtime/realtime_model_beta.js +1280 -0
- package/dist/realtime/realtime_model_beta.js.map +1 -0
- package/package.json +5 -5
- package/src/realtime/api_proto.ts +76 -17
- package/src/realtime/index.ts +1 -0
- package/src/realtime/realtime_model.ts +86 -49
- package/src/realtime/realtime_model_beta.ts +1665 -0
|
@@ -0,0 +1,1280 @@
|
|
|
1
|
+
import {
|
|
2
|
+
APIConnectionError,
|
|
3
|
+
APIError,
|
|
4
|
+
AudioByteStream,
|
|
5
|
+
DEFAULT_API_CONNECT_OPTIONS,
|
|
6
|
+
Future,
|
|
7
|
+
Queue,
|
|
8
|
+
Task,
|
|
9
|
+
cancelAndWait,
|
|
10
|
+
delay,
|
|
11
|
+
isAPIError,
|
|
12
|
+
llm,
|
|
13
|
+
log,
|
|
14
|
+
shortuuid,
|
|
15
|
+
stream
|
|
16
|
+
} from "@livekit/agents";
|
|
17
|
+
import { Mutex } from "@livekit/mutex";
|
|
18
|
+
import { AudioFrame, combineAudioFrames } from "@livekit/rtc-node";
|
|
19
|
+
import { WebSocket } from "ws";
|
|
20
|
+
import * as api_proto from "./api_proto.js";
|
|
21
|
+
const lkOaiDebug = process.env.LK_OPENAI_DEBUG ? Number(process.env.LK_OPENAI_DEBUG) : 0;
|
|
22
|
+
const SAMPLE_RATE = 24e3;
|
|
23
|
+
const NUM_CHANNELS = 1;
|
|
24
|
+
const BASE_URL = "https://api.openai.com/v1";
|
|
25
|
+
const MOCK_AUDIO_ID_PREFIX = "lk_mock_audio_item_";
|
|
26
|
+
class CreateResponseHandle {
|
|
27
|
+
instructions;
|
|
28
|
+
doneFut;
|
|
29
|
+
// TODO(shubhra): add timeout
|
|
30
|
+
constructor({ instructions }) {
|
|
31
|
+
this.instructions = instructions;
|
|
32
|
+
this.doneFut = new Future();
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
const DEFAULT_FIRST_RETRY_INTERVAL_MS = 100;
|
|
36
|
+
const DEFAULT_TEMPERATURE = 0.8;
|
|
37
|
+
const DEFAULT_TURN_DETECTION = {
|
|
38
|
+
type: "semantic_vad",
|
|
39
|
+
eagerness: "medium",
|
|
40
|
+
create_response: true,
|
|
41
|
+
interrupt_response: true
|
|
42
|
+
};
|
|
43
|
+
const DEFAULT_INPUT_AUDIO_TRANSCRIPTION = {
|
|
44
|
+
model: "gpt-4o-mini-transcribe"
|
|
45
|
+
};
|
|
46
|
+
const DEFAULT_TOOL_CHOICE = "auto";
|
|
47
|
+
const DEFAULT_MAX_RESPONSE_OUTPUT_TOKENS = "inf";
|
|
48
|
+
const AZURE_DEFAULT_INPUT_AUDIO_TRANSCRIPTION = {
|
|
49
|
+
model: "whisper-1"
|
|
50
|
+
};
|
|
51
|
+
const AZURE_DEFAULT_TURN_DETECTION = {
|
|
52
|
+
type: "server_vad",
|
|
53
|
+
threshold: 0.5,
|
|
54
|
+
prefix_padding_ms: 300,
|
|
55
|
+
silence_duration_ms: 200,
|
|
56
|
+
create_response: true
|
|
57
|
+
};
|
|
58
|
+
const DEFAULT_MAX_SESSION_DURATION = 20 * 60 * 1e3;
|
|
59
|
+
const DEFAULT_REALTIME_MODEL_OPTIONS = {
|
|
60
|
+
model: "gpt-realtime",
|
|
61
|
+
voice: "marin",
|
|
62
|
+
temperature: DEFAULT_TEMPERATURE,
|
|
63
|
+
inputAudioTranscription: DEFAULT_INPUT_AUDIO_TRANSCRIPTION,
|
|
64
|
+
turnDetection: DEFAULT_TURN_DETECTION,
|
|
65
|
+
toolChoice: DEFAULT_TOOL_CHOICE,
|
|
66
|
+
maxResponseOutputTokens: DEFAULT_MAX_RESPONSE_OUTPUT_TOKENS,
|
|
67
|
+
maxSessionDuration: DEFAULT_MAX_SESSION_DURATION,
|
|
68
|
+
connOptions: DEFAULT_API_CONNECT_OPTIONS,
|
|
69
|
+
modalities: ["text", "audio"]
|
|
70
|
+
};
|
|
71
|
+
class RealtimeModel extends llm.RealtimeModel {
|
|
72
|
+
sampleRate = api_proto.SAMPLE_RATE;
|
|
73
|
+
numChannels = api_proto.NUM_CHANNELS;
|
|
74
|
+
inFrameSize = api_proto.IN_FRAME_SIZE;
|
|
75
|
+
outFrameSize = api_proto.OUT_FRAME_SIZE;
|
|
76
|
+
/* @internal */
|
|
77
|
+
_options;
|
|
78
|
+
get model() {
|
|
79
|
+
return this._options.model;
|
|
80
|
+
}
|
|
81
|
+
constructor(options = {}) {
|
|
82
|
+
const modalities = options.modalities || DEFAULT_REALTIME_MODEL_OPTIONS.modalities;
|
|
83
|
+
super({
|
|
84
|
+
messageTruncation: true,
|
|
85
|
+
turnDetection: options.turnDetection !== null,
|
|
86
|
+
userTranscription: options.inputAudioTranscription !== null,
|
|
87
|
+
autoToolReplyGeneration: false,
|
|
88
|
+
audioOutput: modalities.includes("audio")
|
|
89
|
+
});
|
|
90
|
+
const isAzure = !!(options.apiVersion || options.entraToken || options.azureDeployment);
|
|
91
|
+
if (options.apiKey === "" && !isAzure) {
|
|
92
|
+
throw new Error(
|
|
93
|
+
"OpenAI API key is required, either using the argument or by setting the OPENAI_API_KEY environment variable"
|
|
94
|
+
);
|
|
95
|
+
}
|
|
96
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
97
|
+
if (!apiKey && !isAzure) {
|
|
98
|
+
throw new Error(
|
|
99
|
+
"OpenAI API key is required, either using the argument or by setting the OPENAI_API_KEY environment variable"
|
|
100
|
+
);
|
|
101
|
+
}
|
|
102
|
+
if (!options.baseURL && isAzure) {
|
|
103
|
+
const azureEndpoint = process.env.AZURE_OPENAI_ENDPOINT;
|
|
104
|
+
if (!azureEndpoint) {
|
|
105
|
+
throw new Error(
|
|
106
|
+
"Missing Azure endpoint. Please pass base_url or set AZURE_OPENAI_ENDPOINT environment variable."
|
|
107
|
+
);
|
|
108
|
+
}
|
|
109
|
+
options.baseURL = `${azureEndpoint.replace(/\/$/, "")}/openai`;
|
|
110
|
+
}
|
|
111
|
+
const { modalities: _, ...optionsWithoutModalities } = options;
|
|
112
|
+
this._options = {
|
|
113
|
+
...DEFAULT_REALTIME_MODEL_OPTIONS,
|
|
114
|
+
...optionsWithoutModalities,
|
|
115
|
+
baseURL: options.baseURL || BASE_URL,
|
|
116
|
+
apiKey,
|
|
117
|
+
isAzure,
|
|
118
|
+
model: options.model || DEFAULT_REALTIME_MODEL_OPTIONS.model,
|
|
119
|
+
modalities
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
/**
|
|
123
|
+
* Create a RealtimeModel instance configured for Azure OpenAI Service.
|
|
124
|
+
*
|
|
125
|
+
* @param azureDeployment - The name of your Azure OpenAI deployment.
|
|
126
|
+
* @param azureEndpoint - The endpoint URL for your Azure OpenAI resource. If undefined, will attempt to read from the environment variable AZURE_OPENAI_ENDPOINT.
|
|
127
|
+
* @param apiVersion - API version to use with Azure OpenAI Service. If undefined, will attempt to read from the environment variable OPENAI_API_VERSION.
|
|
128
|
+
* @param apiKey - Azure OpenAI API key. If undefined, will attempt to read from the environment variable AZURE_OPENAI_API_KEY.
|
|
129
|
+
* @param entraToken - Azure Entra authentication token. Required if not using API key authentication.
|
|
130
|
+
* @param baseURL - Base URL for the API endpoint. If undefined, constructed from the azure_endpoint.
|
|
131
|
+
* @param voice - Voice setting for audio outputs. Defaults to "alloy".
|
|
132
|
+
* @param inputAudioTranscription - Options for transcribing input audio. Defaults to @see DEFAULT_INPUT_AUDIO_TRANSCRIPTION.
|
|
133
|
+
* @param turnDetection - Options for server-based voice activity detection (VAD). Defaults to @see DEFAULT_SERVER_VAD_OPTIONS.
|
|
134
|
+
* @param temperature - Sampling temperature for response generation. Defaults to @see DEFAULT_TEMPERATURE.
|
|
135
|
+
* @param speed - Speed of the audio output. Defaults to 1.0.
|
|
136
|
+
* @param maxResponseOutputTokens - Maximum number of tokens in the response. Defaults to @see DEFAULT_MAX_RESPONSE_OUTPUT_TOKENS.
|
|
137
|
+
* @param maxSessionDuration - Maximum duration of the session in milliseconds. Defaults to @see DEFAULT_MAX_SESSION_DURATION.
|
|
138
|
+
*
|
|
139
|
+
* @returns A RealtimeModel instance configured for Azure OpenAI Service.
|
|
140
|
+
*
|
|
141
|
+
* @throws Error if required Azure parameters are missing or invalid.
|
|
142
|
+
*/
|
|
143
|
+
static withAzure({
|
|
144
|
+
azureDeployment,
|
|
145
|
+
azureEndpoint,
|
|
146
|
+
apiVersion,
|
|
147
|
+
apiKey,
|
|
148
|
+
entraToken,
|
|
149
|
+
baseURL,
|
|
150
|
+
voice = "alloy",
|
|
151
|
+
inputAudioTranscription = AZURE_DEFAULT_INPUT_AUDIO_TRANSCRIPTION,
|
|
152
|
+
turnDetection = AZURE_DEFAULT_TURN_DETECTION,
|
|
153
|
+
temperature = 0.8,
|
|
154
|
+
speed
|
|
155
|
+
}) {
|
|
156
|
+
apiKey = apiKey || process.env.AZURE_OPENAI_API_KEY;
|
|
157
|
+
if (!apiKey && !entraToken) {
|
|
158
|
+
throw new Error(
|
|
159
|
+
"Missing credentials. Please pass one of `apiKey`, `entraToken`, or the `AZURE_OPENAI_API_KEY` environment variable."
|
|
160
|
+
);
|
|
161
|
+
}
|
|
162
|
+
apiVersion = apiVersion || process.env.OPENAI_API_VERSION;
|
|
163
|
+
if (!apiVersion) {
|
|
164
|
+
throw new Error(
|
|
165
|
+
"Must provide either the `apiVersion` argument or the `OPENAI_API_VERSION` environment variable"
|
|
166
|
+
);
|
|
167
|
+
}
|
|
168
|
+
if (!baseURL) {
|
|
169
|
+
azureEndpoint = azureEndpoint || process.env.AZURE_OPENAI_ENDPOINT;
|
|
170
|
+
if (!azureEndpoint) {
|
|
171
|
+
throw new Error(
|
|
172
|
+
"Missing Azure endpoint. Please pass the `azure_endpoint` parameter or set the `AZURE_OPENAI_ENDPOINT` environment variable."
|
|
173
|
+
);
|
|
174
|
+
}
|
|
175
|
+
baseURL = `${azureEndpoint.replace(/\/$/, "")}/openai`;
|
|
176
|
+
}
|
|
177
|
+
return new RealtimeModel({
|
|
178
|
+
voice,
|
|
179
|
+
inputAudioTranscription,
|
|
180
|
+
turnDetection,
|
|
181
|
+
temperature,
|
|
182
|
+
speed,
|
|
183
|
+
apiKey,
|
|
184
|
+
azureDeployment,
|
|
185
|
+
apiVersion,
|
|
186
|
+
entraToken,
|
|
187
|
+
baseURL
|
|
188
|
+
});
|
|
189
|
+
}
|
|
190
|
+
session() {
|
|
191
|
+
return new RealtimeSession(this);
|
|
192
|
+
}
|
|
193
|
+
async close() {
|
|
194
|
+
return;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
function processBaseURL({
|
|
198
|
+
baseURL,
|
|
199
|
+
model,
|
|
200
|
+
isAzure = false,
|
|
201
|
+
azureDeployment,
|
|
202
|
+
apiVersion
|
|
203
|
+
}) {
|
|
204
|
+
const url = new URL([baseURL, "realtime"].join("/"));
|
|
205
|
+
if (url.protocol === "https:") {
|
|
206
|
+
url.protocol = "wss:";
|
|
207
|
+
}
|
|
208
|
+
if (!url.pathname || ["", "/v1", "/openai"].includes(url.pathname.replace(/\/$/, ""))) {
|
|
209
|
+
url.pathname = url.pathname.replace(/\/$/, "") + "/realtime";
|
|
210
|
+
} else {
|
|
211
|
+
url.pathname = url.pathname.replace(/\/$/, "");
|
|
212
|
+
}
|
|
213
|
+
const queryParams = {};
|
|
214
|
+
if (isAzure) {
|
|
215
|
+
if (apiVersion) {
|
|
216
|
+
queryParams["api-version"] = apiVersion;
|
|
217
|
+
}
|
|
218
|
+
if (azureDeployment) {
|
|
219
|
+
queryParams["deployment"] = azureDeployment;
|
|
220
|
+
}
|
|
221
|
+
} else {
|
|
222
|
+
queryParams["model"] = model;
|
|
223
|
+
}
|
|
224
|
+
for (const [key, value] of Object.entries(queryParams)) {
|
|
225
|
+
url.searchParams.set(key, value);
|
|
226
|
+
}
|
|
227
|
+
return url.toString();
|
|
228
|
+
}
|
|
229
|
+
class RealtimeSession extends llm.RealtimeSession {
|
|
230
|
+
_tools = {};
|
|
231
|
+
remoteChatCtx = new llm.RemoteChatContext();
|
|
232
|
+
messageChannel = new Queue();
|
|
233
|
+
inputResampler;
|
|
234
|
+
instructions;
|
|
235
|
+
oaiRealtimeModel;
|
|
236
|
+
currentGeneration;
|
|
237
|
+
responseCreatedFutures = {};
|
|
238
|
+
textModeRecoveryRetries = 0;
|
|
239
|
+
itemCreateFutures = {};
|
|
240
|
+
itemDeleteFutures = {};
|
|
241
|
+
updateChatCtxLock = new Mutex();
|
|
242
|
+
updateFuncCtxLock = new Mutex();
|
|
243
|
+
// 100ms chunks
|
|
244
|
+
bstream = new AudioByteStream(SAMPLE_RATE, NUM_CHANNELS, SAMPLE_RATE / 10);
|
|
245
|
+
pushedDurationMs = 0;
|
|
246
|
+
#logger = log();
|
|
247
|
+
#task;
|
|
248
|
+
#closed = false;
|
|
249
|
+
constructor(realtimeModel) {
|
|
250
|
+
super(realtimeModel);
|
|
251
|
+
this.oaiRealtimeModel = realtimeModel;
|
|
252
|
+
this.#task = Task.from(({ signal }) => this.#mainTask(signal));
|
|
253
|
+
this.sendEvent(this.createSessionUpdateEvent());
|
|
254
|
+
}
|
|
255
|
+
sendEvent(command) {
|
|
256
|
+
this.messageChannel.put(command);
|
|
257
|
+
}
|
|
258
|
+
createSessionUpdateEvent() {
|
|
259
|
+
const modalities = this.oaiRealtimeModel._options.modalities.includes("audio") ? ["text", "audio"] : ["text"];
|
|
260
|
+
return {
|
|
261
|
+
type: "session.update",
|
|
262
|
+
session: {
|
|
263
|
+
model: this.oaiRealtimeModel._options.model,
|
|
264
|
+
voice: this.oaiRealtimeModel._options.voice,
|
|
265
|
+
input_audio_format: "pcm16",
|
|
266
|
+
output_audio_format: "pcm16",
|
|
267
|
+
modalities,
|
|
268
|
+
turn_detection: this.oaiRealtimeModel._options.turnDetection,
|
|
269
|
+
input_audio_transcription: this.oaiRealtimeModel._options.inputAudioTranscription,
|
|
270
|
+
// TODO(shubhra): add inputAudioNoiseReduction
|
|
271
|
+
temperature: this.oaiRealtimeModel._options.temperature,
|
|
272
|
+
tool_choice: toOaiToolChoice(this.oaiRealtimeModel._options.toolChoice),
|
|
273
|
+
max_response_output_tokens: this.oaiRealtimeModel._options.maxResponseOutputTokens === Infinity ? "inf" : this.oaiRealtimeModel._options.maxResponseOutputTokens,
|
|
274
|
+
// TODO(shubhra): add tracing options
|
|
275
|
+
instructions: this.instructions,
|
|
276
|
+
speed: this.oaiRealtimeModel._options.speed
|
|
277
|
+
}
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
get chatCtx() {
|
|
281
|
+
return this.remoteChatCtx.toChatCtx();
|
|
282
|
+
}
|
|
283
|
+
get tools() {
|
|
284
|
+
return { ...this._tools };
|
|
285
|
+
}
|
|
286
|
+
async updateChatCtx(_chatCtx) {
|
|
287
|
+
const unlock = await this.updateChatCtxLock.lock();
|
|
288
|
+
const events = this.createChatCtxUpdateEvents(_chatCtx);
|
|
289
|
+
const futures = [];
|
|
290
|
+
for (const event of events) {
|
|
291
|
+
const future = new Future();
|
|
292
|
+
futures.push(future);
|
|
293
|
+
if (event.type === "conversation.item.create") {
|
|
294
|
+
this.itemCreateFutures[event.item.id] = future;
|
|
295
|
+
} else if (event.type == "conversation.item.delete") {
|
|
296
|
+
this.itemDeleteFutures[event.item_id] = future;
|
|
297
|
+
}
|
|
298
|
+
this.sendEvent(event);
|
|
299
|
+
}
|
|
300
|
+
if (futures.length === 0) {
|
|
301
|
+
unlock();
|
|
302
|
+
return;
|
|
303
|
+
}
|
|
304
|
+
try {
|
|
305
|
+
await Promise.race([
|
|
306
|
+
Promise.all(futures),
|
|
307
|
+
delay(5e3).then(() => {
|
|
308
|
+
throw new Error("Chat ctx update events timed out");
|
|
309
|
+
})
|
|
310
|
+
]);
|
|
311
|
+
} catch (e) {
|
|
312
|
+
this.#logger.error(e.message);
|
|
313
|
+
throw e;
|
|
314
|
+
} finally {
|
|
315
|
+
unlock();
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
createChatCtxUpdateEvents(chatCtx, addMockAudio = false) {
|
|
319
|
+
const newChatCtx = chatCtx.copy();
|
|
320
|
+
if (addMockAudio) {
|
|
321
|
+
newChatCtx.items.push(createMockAudioItem());
|
|
322
|
+
} else {
|
|
323
|
+
newChatCtx.items = newChatCtx.items.filter(
|
|
324
|
+
(item) => !item.id.startsWith(MOCK_AUDIO_ID_PREFIX)
|
|
325
|
+
);
|
|
326
|
+
}
|
|
327
|
+
const events = [];
|
|
328
|
+
const diffOps = llm.computeChatCtxDiff(this.chatCtx, newChatCtx);
|
|
329
|
+
for (const op of diffOps.toRemove) {
|
|
330
|
+
events.push({
|
|
331
|
+
type: "conversation.item.delete",
|
|
332
|
+
item_id: op,
|
|
333
|
+
event_id: shortuuid("chat_ctx_delete_")
|
|
334
|
+
});
|
|
335
|
+
}
|
|
336
|
+
for (const [previousId, id] of diffOps.toCreate) {
|
|
337
|
+
const chatItem = newChatCtx.getById(id);
|
|
338
|
+
if (!chatItem) {
|
|
339
|
+
throw new Error(`Chat item ${id} not found`);
|
|
340
|
+
}
|
|
341
|
+
events.push({
|
|
342
|
+
type: "conversation.item.create",
|
|
343
|
+
item: livekitItemToOpenAIItem(chatItem),
|
|
344
|
+
previous_item_id: previousId ?? void 0,
|
|
345
|
+
event_id: shortuuid("chat_ctx_create_")
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
return events;
|
|
349
|
+
}
|
|
350
|
+
async updateTools(_tools) {
|
|
351
|
+
const unlock = await this.updateFuncCtxLock.lock();
|
|
352
|
+
const ev = this.createToolsUpdateEvent(_tools);
|
|
353
|
+
this.sendEvent(ev);
|
|
354
|
+
if (!ev.session.tools) {
|
|
355
|
+
throw new Error("Tools are missing in the session update event");
|
|
356
|
+
}
|
|
357
|
+
const retainedToolNames = new Set(ev.session.tools.map((tool) => tool.name));
|
|
358
|
+
const retainedTools = Object.fromEntries(
|
|
359
|
+
Object.entries(_tools).filter(
|
|
360
|
+
([name, tool]) => llm.isFunctionTool(tool) && retainedToolNames.has(name)
|
|
361
|
+
)
|
|
362
|
+
);
|
|
363
|
+
this._tools = retainedTools;
|
|
364
|
+
unlock();
|
|
365
|
+
}
|
|
366
|
+
createToolsUpdateEvent(_tools) {
|
|
367
|
+
const oaiTools = [];
|
|
368
|
+
for (const [name, tool] of Object.entries(_tools)) {
|
|
369
|
+
if (!llm.isFunctionTool(tool)) {
|
|
370
|
+
this.#logger.error({ name, tool }, "OpenAI Realtime API doesn't support this tool type");
|
|
371
|
+
continue;
|
|
372
|
+
}
|
|
373
|
+
const { parameters: toolParameters, description } = tool;
|
|
374
|
+
try {
|
|
375
|
+
const parameters = llm.toJsonSchema(
|
|
376
|
+
toolParameters
|
|
377
|
+
);
|
|
378
|
+
oaiTools.push({
|
|
379
|
+
name,
|
|
380
|
+
description,
|
|
381
|
+
parameters,
|
|
382
|
+
type: "function"
|
|
383
|
+
});
|
|
384
|
+
} catch (e) {
|
|
385
|
+
this.#logger.error({ name, tool }, "OpenAI Realtime API doesn't support this tool type");
|
|
386
|
+
continue;
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
return {
|
|
390
|
+
type: "session.update",
|
|
391
|
+
session: {
|
|
392
|
+
model: this.oaiRealtimeModel._options.model,
|
|
393
|
+
tools: oaiTools
|
|
394
|
+
},
|
|
395
|
+
event_id: shortuuid("tools_update_")
|
|
396
|
+
};
|
|
397
|
+
}
|
|
398
|
+
async updateInstructions(_instructions) {
|
|
399
|
+
const eventId = shortuuid("instructions_update_");
|
|
400
|
+
this.sendEvent({
|
|
401
|
+
type: "session.update",
|
|
402
|
+
session: {
|
|
403
|
+
instructions: _instructions
|
|
404
|
+
},
|
|
405
|
+
event_id: eventId
|
|
406
|
+
});
|
|
407
|
+
this.instructions = _instructions;
|
|
408
|
+
}
|
|
409
|
+
updateOptions({ toolChoice }) {
|
|
410
|
+
const options = {};
|
|
411
|
+
this.oaiRealtimeModel._options.toolChoice = toolChoice;
|
|
412
|
+
options.tool_choice = toOaiToolChoice(toolChoice);
|
|
413
|
+
this.sendEvent({
|
|
414
|
+
type: "session.update",
|
|
415
|
+
session: options,
|
|
416
|
+
event_id: shortuuid("options_update_")
|
|
417
|
+
});
|
|
418
|
+
}
|
|
419
|
+
pushAudio(frame) {
|
|
420
|
+
for (const f of this.resampleAudio(frame)) {
|
|
421
|
+
for (const nf of this.bstream.write(f.data.buffer)) {
|
|
422
|
+
this.sendEvent({
|
|
423
|
+
type: "input_audio_buffer.append",
|
|
424
|
+
audio: Buffer.from(nf.data.buffer).toString("base64")
|
|
425
|
+
});
|
|
426
|
+
this.pushedDurationMs += nf.samplesPerChannel / nf.sampleRate * 1e3;
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
async commitAudio() {
|
|
431
|
+
if (this.pushedDurationMs > 100) {
|
|
432
|
+
this.sendEvent({
|
|
433
|
+
type: "input_audio_buffer.commit"
|
|
434
|
+
});
|
|
435
|
+
this.pushedDurationMs = 0;
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
async clearAudio() {
|
|
439
|
+
this.sendEvent({
|
|
440
|
+
type: "input_audio_buffer.clear"
|
|
441
|
+
});
|
|
442
|
+
this.pushedDurationMs = 0;
|
|
443
|
+
}
|
|
444
|
+
async generateReply(instructions) {
|
|
445
|
+
const handle = this.createResponse({ instructions, userInitiated: true });
|
|
446
|
+
this.textModeRecoveryRetries = 0;
|
|
447
|
+
return handle.doneFut.await;
|
|
448
|
+
}
|
|
449
|
+
async interrupt() {
|
|
450
|
+
this.sendEvent({
|
|
451
|
+
type: "response.cancel"
|
|
452
|
+
});
|
|
453
|
+
}
|
|
454
|
+
async truncate(_options) {
|
|
455
|
+
if (!_options.modalities || _options.modalities.includes("audio")) {
|
|
456
|
+
this.sendEvent({
|
|
457
|
+
type: "conversation.item.truncate",
|
|
458
|
+
content_index: 0,
|
|
459
|
+
item_id: _options.messageId,
|
|
460
|
+
audio_end_ms: _options.audioEndMs
|
|
461
|
+
});
|
|
462
|
+
} else if (_options.audioTranscript !== void 0) {
|
|
463
|
+
const chatCtx = this.chatCtx.copy();
|
|
464
|
+
const idx = chatCtx.indexById(_options.messageId);
|
|
465
|
+
if (idx !== void 0) {
|
|
466
|
+
const item = chatCtx.items[idx];
|
|
467
|
+
if (item && item.type === "message") {
|
|
468
|
+
const newItem = llm.ChatMessage.create({
|
|
469
|
+
...item,
|
|
470
|
+
content: [_options.audioTranscript]
|
|
471
|
+
});
|
|
472
|
+
chatCtx.items[idx] = newItem;
|
|
473
|
+
const events = this.createChatCtxUpdateEvents(chatCtx);
|
|
474
|
+
for (const ev of events) {
|
|
475
|
+
this.sendEvent(ev);
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
loggableEvent(event) {
|
|
482
|
+
const untypedEvent = {};
|
|
483
|
+
for (const [key, value] of Object.entries(event)) {
|
|
484
|
+
if (value !== void 0) {
|
|
485
|
+
untypedEvent[key] = value;
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
if (untypedEvent.audio && typeof untypedEvent.audio === "string") {
|
|
489
|
+
return { ...untypedEvent, audio: "..." };
|
|
490
|
+
}
|
|
491
|
+
if (untypedEvent.delta && typeof untypedEvent.delta === "string" && event.type === "response.audio.delta") {
|
|
492
|
+
return { ...untypedEvent, delta: "..." };
|
|
493
|
+
}
|
|
494
|
+
return untypedEvent;
|
|
495
|
+
}
|
|
496
|
+
async createWsConn() {
|
|
497
|
+
const headers = {
|
|
498
|
+
"User-Agent": "LiveKit-Agents-JS"
|
|
499
|
+
};
|
|
500
|
+
if (this.oaiRealtimeModel._options.isAzure) {
|
|
501
|
+
if (this.oaiRealtimeModel._options.entraToken) {
|
|
502
|
+
headers.Authorization = `Bearer ${this.oaiRealtimeModel._options.entraToken}`;
|
|
503
|
+
} else if (this.oaiRealtimeModel._options.apiKey) {
|
|
504
|
+
headers["api-key"] = this.oaiRealtimeModel._options.apiKey;
|
|
505
|
+
} else {
|
|
506
|
+
throw new Error("Microsoft API key or entraToken is required");
|
|
507
|
+
}
|
|
508
|
+
} else {
|
|
509
|
+
headers.Authorization = `Bearer ${this.oaiRealtimeModel._options.apiKey}`;
|
|
510
|
+
headers["OpenAI-Beta"] = "realtime=v1";
|
|
511
|
+
}
|
|
512
|
+
const url = processBaseURL({
|
|
513
|
+
baseURL: this.oaiRealtimeModel._options.baseURL,
|
|
514
|
+
model: this.oaiRealtimeModel._options.model,
|
|
515
|
+
isAzure: this.oaiRealtimeModel._options.isAzure,
|
|
516
|
+
apiVersion: this.oaiRealtimeModel._options.apiVersion,
|
|
517
|
+
azureDeployment: this.oaiRealtimeModel._options.azureDeployment
|
|
518
|
+
});
|
|
519
|
+
if (lkOaiDebug) {
|
|
520
|
+
this.#logger.debug(`Connecting to OpenAI Realtime API at ${url}`);
|
|
521
|
+
}
|
|
522
|
+
return new Promise((resolve, reject) => {
|
|
523
|
+
const ws = new WebSocket(url, { headers });
|
|
524
|
+
let waiting = true;
|
|
525
|
+
const timeout = setTimeout(() => {
|
|
526
|
+
ws.close();
|
|
527
|
+
reject(new Error("WebSocket connection timeout"));
|
|
528
|
+
}, this.oaiRealtimeModel._options.connOptions.timeoutMs);
|
|
529
|
+
ws.once("open", () => {
|
|
530
|
+
if (!waiting) return;
|
|
531
|
+
waiting = false;
|
|
532
|
+
clearTimeout(timeout);
|
|
533
|
+
resolve(ws);
|
|
534
|
+
});
|
|
535
|
+
ws.once("close", () => {
|
|
536
|
+
if (!waiting) return;
|
|
537
|
+
waiting = false;
|
|
538
|
+
clearTimeout(timeout);
|
|
539
|
+
reject(new Error("OpenAI Realtime API connection closed"));
|
|
540
|
+
});
|
|
541
|
+
});
|
|
542
|
+
}
|
|
543
|
+
async #mainTask(signal) {
|
|
544
|
+
let reconnecting = false;
|
|
545
|
+
let numRetries = 0;
|
|
546
|
+
let wsConn = null;
|
|
547
|
+
const maxRetries = this.oaiRealtimeModel._options.connOptions.maxRetry;
|
|
548
|
+
const reconnect = async () => {
|
|
549
|
+
this.#logger.debug(
|
|
550
|
+
{
|
|
551
|
+
maxSessionDuration: this.oaiRealtimeModel._options.maxSessionDuration
|
|
552
|
+
},
|
|
553
|
+
"Reconnecting to OpenAI Realtime API"
|
|
554
|
+
);
|
|
555
|
+
const events = [];
|
|
556
|
+
events.push(this.createSessionUpdateEvent());
|
|
557
|
+
if (Object.keys(this._tools).length > 0) {
|
|
558
|
+
events.push(this.createToolsUpdateEvent(this._tools));
|
|
559
|
+
}
|
|
560
|
+
const chatCtx = this.chatCtx.copy({
|
|
561
|
+
excludeFunctionCall: true,
|
|
562
|
+
excludeInstructions: true,
|
|
563
|
+
excludeEmptyMessage: true
|
|
564
|
+
});
|
|
565
|
+
const oldChatCtx = this.remoteChatCtx;
|
|
566
|
+
this.remoteChatCtx = new llm.RemoteChatContext();
|
|
567
|
+
events.push(...this.createChatCtxUpdateEvents(chatCtx));
|
|
568
|
+
try {
|
|
569
|
+
for (const ev of events) {
|
|
570
|
+
this.emit("openai_client_event_queued", ev);
|
|
571
|
+
wsConn.send(JSON.stringify(ev));
|
|
572
|
+
}
|
|
573
|
+
} catch (error) {
|
|
574
|
+
this.remoteChatCtx = oldChatCtx;
|
|
575
|
+
throw new APIConnectionError({
|
|
576
|
+
message: "Failed to send message to OpenAI Realtime API during session re-connection"
|
|
577
|
+
});
|
|
578
|
+
}
|
|
579
|
+
this.#logger.debug("Reconnected to OpenAI Realtime API");
|
|
580
|
+
this.emit("session_reconnected", {});
|
|
581
|
+
};
|
|
582
|
+
reconnecting = false;
|
|
583
|
+
while (!this.#closed && !signal.aborted) {
|
|
584
|
+
this.#logger.debug("Creating WebSocket connection to OpenAI Realtime API");
|
|
585
|
+
wsConn = await this.createWsConn();
|
|
586
|
+
if (signal.aborted) break;
|
|
587
|
+
try {
|
|
588
|
+
if (reconnecting) {
|
|
589
|
+
await reconnect();
|
|
590
|
+
if (signal.aborted) break;
|
|
591
|
+
numRetries = 0;
|
|
592
|
+
}
|
|
593
|
+
await this.runWs(wsConn);
|
|
594
|
+
if (signal.aborted) break;
|
|
595
|
+
} catch (error) {
|
|
596
|
+
if (!isAPIError(error)) {
|
|
597
|
+
this.emitError({ error, recoverable: false });
|
|
598
|
+
throw error;
|
|
599
|
+
}
|
|
600
|
+
if (maxRetries === 0 || !error.retryable) {
|
|
601
|
+
this.emitError({ error, recoverable: false });
|
|
602
|
+
throw error;
|
|
603
|
+
}
|
|
604
|
+
if (numRetries === maxRetries) {
|
|
605
|
+
this.emitError({ error, recoverable: false });
|
|
606
|
+
throw new APIConnectionError({
|
|
607
|
+
message: `OpenAI Realtime API connection failed after ${numRetries} attempts`,
|
|
608
|
+
options: {
|
|
609
|
+
body: error,
|
|
610
|
+
retryable: false
|
|
611
|
+
}
|
|
612
|
+
});
|
|
613
|
+
}
|
|
614
|
+
this.emitError({ error, recoverable: true });
|
|
615
|
+
const retryInterval = numRetries === 0 ? DEFAULT_FIRST_RETRY_INTERVAL_MS : this.oaiRealtimeModel._options.connOptions.retryIntervalMs;
|
|
616
|
+
this.#logger.warn(
|
|
617
|
+
{
|
|
618
|
+
attempt: numRetries,
|
|
619
|
+
maxRetries,
|
|
620
|
+
error
|
|
621
|
+
},
|
|
622
|
+
`OpenAI Realtime API connection failed, retrying in ${retryInterval / 1e3}s`
|
|
623
|
+
);
|
|
624
|
+
await delay(retryInterval);
|
|
625
|
+
numRetries++;
|
|
626
|
+
}
|
|
627
|
+
reconnecting = true;
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
async runWs(wsConn) {
|
|
631
|
+
const forwardEvents = async (signal) => {
|
|
632
|
+
const abortFuture = new Future();
|
|
633
|
+
signal.addEventListener("abort", () => abortFuture.resolve());
|
|
634
|
+
while (!this.#closed && wsConn.readyState === WebSocket.OPEN && !signal.aborted) {
|
|
635
|
+
try {
|
|
636
|
+
const event = await Promise.race([this.messageChannel.get(), abortFuture.await]);
|
|
637
|
+
if (signal.aborted || abortFuture.done || event === void 0) {
|
|
638
|
+
break;
|
|
639
|
+
}
|
|
640
|
+
if (lkOaiDebug) {
|
|
641
|
+
this.#logger.debug(this.loggableEvent(event), `(client) -> ${event.type}`);
|
|
642
|
+
}
|
|
643
|
+
this.emit("openai_client_event_queued", event);
|
|
644
|
+
wsConn.send(JSON.stringify(event));
|
|
645
|
+
} catch (error) {
|
|
646
|
+
break;
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
wsConn.close();
|
|
650
|
+
};
|
|
651
|
+
const wsCloseFuture = new Future();
|
|
652
|
+
wsConn.onerror = (error) => {
|
|
653
|
+
wsCloseFuture.resolve(new APIConnectionError({ message: error.message }));
|
|
654
|
+
};
|
|
655
|
+
wsConn.onclose = () => {
|
|
656
|
+
wsCloseFuture.resolve();
|
|
657
|
+
};
|
|
658
|
+
wsConn.onmessage = (message) => {
|
|
659
|
+
const event = JSON.parse(message.data);
|
|
660
|
+
this.emit("openai_server_event_received", event);
|
|
661
|
+
if (lkOaiDebug) {
|
|
662
|
+
this.#logger.debug(this.loggableEvent(event), `(server) <- ${event.type}`);
|
|
663
|
+
}
|
|
664
|
+
switch (event.type) {
|
|
665
|
+
case "input_audio_buffer.speech_started":
|
|
666
|
+
this.handleInputAudioBufferSpeechStarted(event);
|
|
667
|
+
break;
|
|
668
|
+
case "input_audio_buffer.speech_stopped":
|
|
669
|
+
this.handleInputAudioBufferSpeechStopped(event);
|
|
670
|
+
break;
|
|
671
|
+
case "response.created":
|
|
672
|
+
this.handleResponseCreated(event);
|
|
673
|
+
break;
|
|
674
|
+
case "response.output_item.added":
|
|
675
|
+
this.handleResponseOutputItemAdded(event);
|
|
676
|
+
break;
|
|
677
|
+
case "conversation.item.created":
|
|
678
|
+
this.handleConversationItemCreated(event);
|
|
679
|
+
break;
|
|
680
|
+
case "conversation.item.deleted":
|
|
681
|
+
this.handleConversationItemDeleted(event);
|
|
682
|
+
break;
|
|
683
|
+
case "conversation.item.input_audio_transcription.completed":
|
|
684
|
+
this.handleConversationItemInputAudioTranscriptionCompleted(event);
|
|
685
|
+
break;
|
|
686
|
+
case "conversation.item.input_audio_transcription.failed":
|
|
687
|
+
this.handleConversationItemInputAudioTranscriptionFailed(event);
|
|
688
|
+
break;
|
|
689
|
+
case "response.content_part.added":
|
|
690
|
+
this.handleResponseContentPartAdded(event);
|
|
691
|
+
break;
|
|
692
|
+
case "response.content_part.done":
|
|
693
|
+
this.handleResponseContentPartDone(event);
|
|
694
|
+
break;
|
|
695
|
+
case "response.text.delta":
|
|
696
|
+
this.handleResponseTextDelta(event);
|
|
697
|
+
break;
|
|
698
|
+
case "response.text.done":
|
|
699
|
+
this.handleResponseTextDone(event);
|
|
700
|
+
break;
|
|
701
|
+
case "response.audio_transcript.delta":
|
|
702
|
+
this.handleResponseAudioTranscriptDelta(event);
|
|
703
|
+
break;
|
|
704
|
+
case "response.audio.delta":
|
|
705
|
+
this.handleResponseAudioDelta(event);
|
|
706
|
+
break;
|
|
707
|
+
case "response.audio_transcript.done":
|
|
708
|
+
this.handleResponseAudioTranscriptDone(event);
|
|
709
|
+
break;
|
|
710
|
+
case "response.audio.done":
|
|
711
|
+
this.handleResponseAudioDone(event);
|
|
712
|
+
break;
|
|
713
|
+
case "response.output_item.done":
|
|
714
|
+
this.handleResponseOutputItemDone(event);
|
|
715
|
+
break;
|
|
716
|
+
case "response.done":
|
|
717
|
+
this.handleResponseDone(event);
|
|
718
|
+
break;
|
|
719
|
+
case "error":
|
|
720
|
+
this.handleError(event);
|
|
721
|
+
break;
|
|
722
|
+
default:
|
|
723
|
+
if (lkOaiDebug) {
|
|
724
|
+
this.#logger.debug(`unhandled event: ${event.type}`);
|
|
725
|
+
}
|
|
726
|
+
break;
|
|
727
|
+
}
|
|
728
|
+
};
|
|
729
|
+
const sendTask = Task.from(({ signal }) => forwardEvents(signal));
|
|
730
|
+
const wsTask = Task.from(({ signal }) => {
|
|
731
|
+
const abortPromise = new Promise((resolve) => {
|
|
732
|
+
signal.addEventListener("abort", () => {
|
|
733
|
+
resolve();
|
|
734
|
+
});
|
|
735
|
+
});
|
|
736
|
+
return Promise.race([wsCloseFuture.await, abortPromise]);
|
|
737
|
+
});
|
|
738
|
+
const waitReconnectTask = Task.from(async ({ signal }) => {
|
|
739
|
+
await delay(this.oaiRealtimeModel._options.maxSessionDuration, { signal });
|
|
740
|
+
return new APIConnectionError({
|
|
741
|
+
message: "OpenAI Realtime API connection timeout"
|
|
742
|
+
});
|
|
743
|
+
});
|
|
744
|
+
try {
|
|
745
|
+
const result = await Promise.race([wsTask.result, sendTask.result, waitReconnectTask.result]);
|
|
746
|
+
if (waitReconnectTask.done && this.currentGeneration) {
|
|
747
|
+
await this.currentGeneration._doneFut.await;
|
|
748
|
+
}
|
|
749
|
+
if (result instanceof Error) {
|
|
750
|
+
throw result;
|
|
751
|
+
}
|
|
752
|
+
} finally {
|
|
753
|
+
await cancelAndWait([wsTask, sendTask, waitReconnectTask], 2e3);
|
|
754
|
+
wsConn.close();
|
|
755
|
+
}
|
|
756
|
+
}
|
|
757
|
+
async close() {
|
|
758
|
+
super.close();
|
|
759
|
+
this.#closed = true;
|
|
760
|
+
await this.#task;
|
|
761
|
+
}
|
|
762
|
+
handleInputAudioBufferSpeechStarted(_event) {
|
|
763
|
+
this.emit("input_speech_started", {});
|
|
764
|
+
}
|
|
765
|
+
handleInputAudioBufferSpeechStopped(_event) {
|
|
766
|
+
this.emit("input_speech_stopped", {
|
|
767
|
+
userTranscriptionEnabled: this.oaiRealtimeModel._options.inputAudioTranscription !== null
|
|
768
|
+
});
|
|
769
|
+
}
|
|
770
|
+
handleResponseCreated(event) {
|
|
771
|
+
var _a;
|
|
772
|
+
if (!event.response.id) {
|
|
773
|
+
throw new Error("response.id is missing");
|
|
774
|
+
}
|
|
775
|
+
this.currentGeneration = {
|
|
776
|
+
messageChannel: stream.createStreamChannel(),
|
|
777
|
+
functionChannel: stream.createStreamChannel(),
|
|
778
|
+
messages: /* @__PURE__ */ new Map(),
|
|
779
|
+
_doneFut: new Future(),
|
|
780
|
+
_createdTimestamp: Date.now()
|
|
781
|
+
};
|
|
782
|
+
const generationEv = {
|
|
783
|
+
messageStream: this.currentGeneration.messageChannel.stream(),
|
|
784
|
+
functionStream: this.currentGeneration.functionChannel.stream(),
|
|
785
|
+
userInitiated: false,
|
|
786
|
+
responseId: event.response.id
|
|
787
|
+
};
|
|
788
|
+
const clientEventId = (_a = event.response.metadata) == null ? void 0 : _a.client_event_id;
|
|
789
|
+
if (clientEventId) {
|
|
790
|
+
const handle = this.responseCreatedFutures[clientEventId];
|
|
791
|
+
if (handle) {
|
|
792
|
+
delete this.responseCreatedFutures[clientEventId];
|
|
793
|
+
generationEv.userInitiated = true;
|
|
794
|
+
if (!handle.doneFut.done) {
|
|
795
|
+
handle.doneFut.resolve(generationEv);
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
}
|
|
799
|
+
this.emit("generation_created", generationEv);
|
|
800
|
+
}
|
|
801
|
+
handleResponseOutputItemAdded(event) {
|
|
802
|
+
if (!this.currentGeneration) {
|
|
803
|
+
throw new Error("currentGeneration is not set");
|
|
804
|
+
}
|
|
805
|
+
if (!event.item.type) {
|
|
806
|
+
throw new Error("item.type is not set");
|
|
807
|
+
}
|
|
808
|
+
if (!event.response_id) {
|
|
809
|
+
throw new Error("response_id is not set");
|
|
810
|
+
}
|
|
811
|
+
const itemType = event.item.type;
|
|
812
|
+
const responseId = event.response_id;
|
|
813
|
+
if (itemType !== "message") {
|
|
814
|
+
this.resolveGeneration(responseId);
|
|
815
|
+
this.textModeRecoveryRetries = 0;
|
|
816
|
+
return;
|
|
817
|
+
}
|
|
818
|
+
const itemId = event.item.id;
|
|
819
|
+
if (!itemId) {
|
|
820
|
+
throw new Error("item.id is not set");
|
|
821
|
+
}
|
|
822
|
+
const modalitiesFut = new Future();
|
|
823
|
+
const itemGeneration = {
|
|
824
|
+
messageId: itemId,
|
|
825
|
+
textChannel: stream.createStreamChannel(),
|
|
826
|
+
audioChannel: stream.createStreamChannel(),
|
|
827
|
+
audioTranscript: "",
|
|
828
|
+
modalities: modalitiesFut
|
|
829
|
+
};
|
|
830
|
+
if (!this.oaiRealtimeModel.capabilities.audioOutput) {
|
|
831
|
+
itemGeneration.audioChannel.close();
|
|
832
|
+
modalitiesFut.resolve(["text"]);
|
|
833
|
+
}
|
|
834
|
+
this.currentGeneration.messageChannel.write({
|
|
835
|
+
messageId: itemId,
|
|
836
|
+
textStream: itemGeneration.textChannel.stream(),
|
|
837
|
+
audioStream: itemGeneration.audioChannel.stream(),
|
|
838
|
+
modalities: modalitiesFut.await
|
|
839
|
+
});
|
|
840
|
+
this.currentGeneration.messages.set(itemId, itemGeneration);
|
|
841
|
+
}
|
|
842
|
+
handleConversationItemCreated(event) {
|
|
843
|
+
if (!event.item.id) {
|
|
844
|
+
throw new Error("item.id is not set");
|
|
845
|
+
}
|
|
846
|
+
try {
|
|
847
|
+
this.remoteChatCtx.insert(event.previous_item_id, openAIItemToLivekitItem(event.item));
|
|
848
|
+
} catch (error) {
|
|
849
|
+
this.#logger.error({ error, itemId: event.item.id }, "failed to insert conversation item");
|
|
850
|
+
}
|
|
851
|
+
const fut = this.itemCreateFutures[event.item.id];
|
|
852
|
+
if (fut) {
|
|
853
|
+
fut.resolve();
|
|
854
|
+
delete this.itemCreateFutures[event.item.id];
|
|
855
|
+
}
|
|
856
|
+
}
|
|
857
|
+
handleConversationItemDeleted(event) {
|
|
858
|
+
if (!event.item_id) {
|
|
859
|
+
throw new Error("item_id is not set");
|
|
860
|
+
}
|
|
861
|
+
try {
|
|
862
|
+
this.remoteChatCtx.delete(event.item_id);
|
|
863
|
+
} catch (error) {
|
|
864
|
+
this.#logger.error({ error, itemId: event.item_id }, "failed to delete conversation item");
|
|
865
|
+
}
|
|
866
|
+
const fut = this.itemDeleteFutures[event.item_id];
|
|
867
|
+
if (fut) {
|
|
868
|
+
fut.resolve();
|
|
869
|
+
delete this.itemDeleteFutures[event.item_id];
|
|
870
|
+
}
|
|
871
|
+
}
|
|
872
|
+
handleConversationItemInputAudioTranscriptionCompleted(event) {
|
|
873
|
+
const remoteItem = this.remoteChatCtx.get(event.item_id);
|
|
874
|
+
if (!remoteItem) {
|
|
875
|
+
return;
|
|
876
|
+
}
|
|
877
|
+
const item = remoteItem.item;
|
|
878
|
+
if (item instanceof llm.ChatMessage) {
|
|
879
|
+
item.content.push(event.transcript);
|
|
880
|
+
} else {
|
|
881
|
+
throw new Error("item is not a chat message");
|
|
882
|
+
}
|
|
883
|
+
this.emit("input_audio_transcription_completed", {
|
|
884
|
+
itemId: event.item_id,
|
|
885
|
+
transcript: event.transcript,
|
|
886
|
+
isFinal: true
|
|
887
|
+
});
|
|
888
|
+
}
|
|
889
|
+
handleConversationItemInputAudioTranscriptionFailed(event) {
|
|
890
|
+
this.#logger.error(
|
|
891
|
+
{ error: event.error },
|
|
892
|
+
"OpenAI Realtime API failed to transcribe input audio"
|
|
893
|
+
);
|
|
894
|
+
}
|
|
895
|
+
handleResponseContentPartAdded(event) {
|
|
896
|
+
if (!this.currentGeneration) {
|
|
897
|
+
throw new Error("currentGeneration is not set");
|
|
898
|
+
}
|
|
899
|
+
const itemId = event.item_id;
|
|
900
|
+
const itemType = event.part.type;
|
|
901
|
+
const itemGeneration = this.currentGeneration.messages.get(itemId);
|
|
902
|
+
if (!itemGeneration) {
|
|
903
|
+
this.#logger.warn(`itemGeneration not found for itemId=${itemId}`);
|
|
904
|
+
return;
|
|
905
|
+
}
|
|
906
|
+
if (itemType === "text" && this.oaiRealtimeModel.capabilities.audioOutput) {
|
|
907
|
+
this.#logger.warn("Text response received from OpenAI Realtime API in audio modality.");
|
|
908
|
+
}
|
|
909
|
+
if (!itemGeneration.modalities.done) {
|
|
910
|
+
const modalityResult = itemType === "text" ? ["text"] : ["audio", "text"];
|
|
911
|
+
itemGeneration.modalities.resolve(modalityResult);
|
|
912
|
+
}
|
|
913
|
+
if (this.currentGeneration._firstTokenTimestamp === void 0) {
|
|
914
|
+
this.currentGeneration._firstTokenTimestamp = Date.now();
|
|
915
|
+
}
|
|
916
|
+
}
|
|
917
|
+
handleResponseContentPartDone(event) {
|
|
918
|
+
if (!event.part) {
|
|
919
|
+
return;
|
|
920
|
+
}
|
|
921
|
+
if (event.part.type !== "text") {
|
|
922
|
+
return;
|
|
923
|
+
}
|
|
924
|
+
if (!this.currentGeneration) {
|
|
925
|
+
throw new Error("currentGeneration is not set");
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
handleResponseTextDelta(event) {
|
|
929
|
+
if (!this.currentGeneration) {
|
|
930
|
+
throw new Error("currentGeneration is not set");
|
|
931
|
+
}
|
|
932
|
+
const itemGeneration = this.currentGeneration.messages.get(event.item_id);
|
|
933
|
+
if (!itemGeneration) {
|
|
934
|
+
throw new Error("itemGeneration is not set");
|
|
935
|
+
}
|
|
936
|
+
if (!this.oaiRealtimeModel.capabilities.audioOutput && !this.currentGeneration._firstTokenTimestamp) {
|
|
937
|
+
this.currentGeneration._firstTokenTimestamp = Date.now();
|
|
938
|
+
}
|
|
939
|
+
itemGeneration.textChannel.write(event.delta);
|
|
940
|
+
itemGeneration.audioTranscript += event.delta;
|
|
941
|
+
}
|
|
942
|
+
handleResponseTextDone(_event) {
|
|
943
|
+
if (!this.currentGeneration) {
|
|
944
|
+
throw new Error("currentGeneration is not set");
|
|
945
|
+
}
|
|
946
|
+
}
|
|
947
|
+
handleResponseAudioTranscriptDelta(event) {
|
|
948
|
+
if (!this.currentGeneration) {
|
|
949
|
+
throw new Error("currentGeneration is not set");
|
|
950
|
+
}
|
|
951
|
+
const itemId = event.item_id;
|
|
952
|
+
const delta = event.delta;
|
|
953
|
+
const itemGeneration = this.currentGeneration.messages.get(itemId);
|
|
954
|
+
if (!itemGeneration) {
|
|
955
|
+
throw new Error("itemGeneration is not set");
|
|
956
|
+
} else {
|
|
957
|
+
itemGeneration.textChannel.write(delta);
|
|
958
|
+
itemGeneration.audioTranscript += delta;
|
|
959
|
+
}
|
|
960
|
+
}
|
|
961
|
+
handleResponseAudioDelta(event) {
|
|
962
|
+
if (!this.currentGeneration) {
|
|
963
|
+
throw new Error("currentGeneration is not set");
|
|
964
|
+
}
|
|
965
|
+
const itemGeneration = this.currentGeneration.messages.get(event.item_id);
|
|
966
|
+
if (!itemGeneration) {
|
|
967
|
+
throw new Error("itemGeneration is not set");
|
|
968
|
+
}
|
|
969
|
+
if (this.currentGeneration._firstTokenTimestamp === void 0) {
|
|
970
|
+
this.currentGeneration._firstTokenTimestamp = Date.now();
|
|
971
|
+
}
|
|
972
|
+
if (!itemGeneration.modalities.done) {
|
|
973
|
+
itemGeneration.modalities.resolve(["audio", "text"]);
|
|
974
|
+
}
|
|
975
|
+
const binaryString = atob(event.delta);
|
|
976
|
+
const len = binaryString.length;
|
|
977
|
+
const bytes = new Uint8Array(len);
|
|
978
|
+
for (let i = 0; i < len; i++) {
|
|
979
|
+
bytes[i] = binaryString.charCodeAt(i);
|
|
980
|
+
}
|
|
981
|
+
itemGeneration.audioChannel.write(
|
|
982
|
+
new AudioFrame(
|
|
983
|
+
new Int16Array(bytes.buffer),
|
|
984
|
+
api_proto.SAMPLE_RATE,
|
|
985
|
+
api_proto.NUM_CHANNELS,
|
|
986
|
+
bytes.length / 2
|
|
987
|
+
)
|
|
988
|
+
);
|
|
989
|
+
}
|
|
990
|
+
handleResponseAudioTranscriptDone(_event) {
|
|
991
|
+
if (!this.currentGeneration) {
|
|
992
|
+
throw new Error("currentGeneration is not set");
|
|
993
|
+
}
|
|
994
|
+
}
|
|
995
|
+
handleResponseAudioDone(_event) {
|
|
996
|
+
if (!this.currentGeneration) {
|
|
997
|
+
throw new Error("currentGeneration is not set");
|
|
998
|
+
}
|
|
999
|
+
}
|
|
1000
|
+
handleResponseOutputItemDone(event) {
|
|
1001
|
+
if (!this.currentGeneration) {
|
|
1002
|
+
throw new Error("currentGeneration is not set");
|
|
1003
|
+
}
|
|
1004
|
+
const itemId = event.item.id;
|
|
1005
|
+
const itemType = event.item.type;
|
|
1006
|
+
if (itemType === "function_call") {
|
|
1007
|
+
const item = event.item;
|
|
1008
|
+
if (!item.call_id || !item.name || !item.arguments) {
|
|
1009
|
+
throw new Error("item is not a function call");
|
|
1010
|
+
}
|
|
1011
|
+
this.currentGeneration.functionChannel.write(
|
|
1012
|
+
llm.FunctionCall.create({
|
|
1013
|
+
callId: item.call_id,
|
|
1014
|
+
name: item.name,
|
|
1015
|
+
args: item.arguments
|
|
1016
|
+
})
|
|
1017
|
+
);
|
|
1018
|
+
} else if (itemType === "message") {
|
|
1019
|
+
const itemGeneration = this.currentGeneration.messages.get(itemId);
|
|
1020
|
+
if (!itemGeneration) {
|
|
1021
|
+
return;
|
|
1022
|
+
}
|
|
1023
|
+
itemGeneration.textChannel.close();
|
|
1024
|
+
itemGeneration.audioChannel.close();
|
|
1025
|
+
if (!itemGeneration.modalities.done) {
|
|
1026
|
+
itemGeneration.modalities.resolve(this.oaiRealtimeModel._options.modalities);
|
|
1027
|
+
}
|
|
1028
|
+
}
|
|
1029
|
+
}
|
|
1030
|
+
handleResponseDone(_event) {
|
|
1031
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
1032
|
+
if (!this.currentGeneration) {
|
|
1033
|
+
return;
|
|
1034
|
+
}
|
|
1035
|
+
const createdTimestamp = this.currentGeneration._createdTimestamp;
|
|
1036
|
+
const firstTokenTimestamp = this.currentGeneration._firstTokenTimestamp;
|
|
1037
|
+
this.#logger.debug(
|
|
1038
|
+
{
|
|
1039
|
+
messageCount: this.currentGeneration.messages.size
|
|
1040
|
+
},
|
|
1041
|
+
"Closing generation channels in handleResponseDone"
|
|
1042
|
+
);
|
|
1043
|
+
for (const generation of this.currentGeneration.messages.values()) {
|
|
1044
|
+
generation.textChannel.close();
|
|
1045
|
+
generation.audioChannel.close();
|
|
1046
|
+
if (!generation.modalities.done) {
|
|
1047
|
+
generation.modalities.resolve(this.oaiRealtimeModel._options.modalities);
|
|
1048
|
+
}
|
|
1049
|
+
}
|
|
1050
|
+
this.currentGeneration.functionChannel.close();
|
|
1051
|
+
this.currentGeneration.messageChannel.close();
|
|
1052
|
+
for (const itemId of this.currentGeneration.messages.keys()) {
|
|
1053
|
+
const remoteItem = this.remoteChatCtx.get(itemId);
|
|
1054
|
+
if (remoteItem && remoteItem.item instanceof llm.ChatMessage) {
|
|
1055
|
+
remoteItem.item.content.push(this.currentGeneration.messages.get(itemId).audioTranscript);
|
|
1056
|
+
}
|
|
1057
|
+
}
|
|
1058
|
+
this.currentGeneration._doneFut.resolve();
|
|
1059
|
+
this.currentGeneration = void 0;
|
|
1060
|
+
const usage = _event.response.usage;
|
|
1061
|
+
const ttftMs = firstTokenTimestamp ? firstTokenTimestamp - createdTimestamp : -1;
|
|
1062
|
+
const durationMs = Date.now() - createdTimestamp;
|
|
1063
|
+
const realtimeMetrics = {
|
|
1064
|
+
type: "realtime_model_metrics",
|
|
1065
|
+
timestamp: createdTimestamp,
|
|
1066
|
+
requestId: _event.response.id || "",
|
|
1067
|
+
ttftMs,
|
|
1068
|
+
durationMs,
|
|
1069
|
+
cancelled: _event.response.status === "cancelled",
|
|
1070
|
+
label: "openai_realtime",
|
|
1071
|
+
inputTokens: (usage == null ? void 0 : usage.input_tokens) ?? 0,
|
|
1072
|
+
outputTokens: (usage == null ? void 0 : usage.output_tokens) ?? 0,
|
|
1073
|
+
totalTokens: (usage == null ? void 0 : usage.total_tokens) ?? 0,
|
|
1074
|
+
tokensPerSecond: durationMs > 0 ? ((usage == null ? void 0 : usage.output_tokens) ?? 0) / (durationMs / 1e3) : 0,
|
|
1075
|
+
inputTokenDetails: {
|
|
1076
|
+
audioTokens: ((_a = usage == null ? void 0 : usage.input_token_details) == null ? void 0 : _a.audio_tokens) ?? 0,
|
|
1077
|
+
textTokens: ((_b = usage == null ? void 0 : usage.input_token_details) == null ? void 0 : _b.text_tokens) ?? 0,
|
|
1078
|
+
imageTokens: 0,
|
|
1079
|
+
// Not supported yet
|
|
1080
|
+
cachedTokens: ((_c = usage == null ? void 0 : usage.input_token_details) == null ? void 0 : _c.cached_tokens) ?? 0,
|
|
1081
|
+
cachedTokensDetails: ((_d = usage == null ? void 0 : usage.input_token_details) == null ? void 0 : _d.cached_tokens_details) ? {
|
|
1082
|
+
audioTokens: ((_f = (_e = usage == null ? void 0 : usage.input_token_details) == null ? void 0 : _e.cached_tokens_details) == null ? void 0 : _f.audio_tokens) ?? 0,
|
|
1083
|
+
textTokens: ((_h = (_g = usage == null ? void 0 : usage.input_token_details) == null ? void 0 : _g.cached_tokens_details) == null ? void 0 : _h.text_tokens) ?? 0,
|
|
1084
|
+
imageTokens: ((_j = (_i = usage == null ? void 0 : usage.input_token_details) == null ? void 0 : _i.cached_tokens_details) == null ? void 0 : _j.image_tokens) ?? 0
|
|
1085
|
+
} : void 0
|
|
1086
|
+
},
|
|
1087
|
+
outputTokenDetails: {
|
|
1088
|
+
textTokens: ((_k = usage == null ? void 0 : usage.output_token_details) == null ? void 0 : _k.text_tokens) ?? 0,
|
|
1089
|
+
audioTokens: ((_l = usage == null ? void 0 : usage.output_token_details) == null ? void 0 : _l.audio_tokens) ?? 0,
|
|
1090
|
+
imageTokens: 0
|
|
1091
|
+
}
|
|
1092
|
+
};
|
|
1093
|
+
this.emit("metrics_collected", realtimeMetrics);
|
|
1094
|
+
}
|
|
1095
|
+
handleError(event) {
|
|
1096
|
+
if (event.error.message.startsWith("Cancellation failed")) {
|
|
1097
|
+
return;
|
|
1098
|
+
}
|
|
1099
|
+
this.#logger.error({ error: event.error }, "OpenAI Realtime API returned an error");
|
|
1100
|
+
this.emitError({
|
|
1101
|
+
error: new APIError(event.error.message, {
|
|
1102
|
+
body: event.error,
|
|
1103
|
+
retryable: true
|
|
1104
|
+
}),
|
|
1105
|
+
recoverable: true
|
|
1106
|
+
});
|
|
1107
|
+
}
|
|
1108
|
+
emitError({ error, recoverable }) {
|
|
1109
|
+
this.emit("error", {
|
|
1110
|
+
timestamp: Date.now(),
|
|
1111
|
+
// TODO(brian): add label
|
|
1112
|
+
label: "",
|
|
1113
|
+
error,
|
|
1114
|
+
recoverable
|
|
1115
|
+
});
|
|
1116
|
+
}
|
|
1117
|
+
*resampleAudio(frame) {
|
|
1118
|
+
yield frame;
|
|
1119
|
+
}
|
|
1120
|
+
createResponse({
|
|
1121
|
+
userInitiated,
|
|
1122
|
+
instructions,
|
|
1123
|
+
oldHandle
|
|
1124
|
+
}) {
|
|
1125
|
+
const handle = oldHandle || new CreateResponseHandle({ instructions });
|
|
1126
|
+
if (oldHandle && instructions) {
|
|
1127
|
+
handle.instructions = instructions;
|
|
1128
|
+
}
|
|
1129
|
+
const eventId = shortuuid("response_create_");
|
|
1130
|
+
if (userInitiated) {
|
|
1131
|
+
this.responseCreatedFutures[eventId] = handle;
|
|
1132
|
+
}
|
|
1133
|
+
const response = {};
|
|
1134
|
+
if (instructions) response.instructions = instructions;
|
|
1135
|
+
if (userInitiated) response.metadata = { client_event_id: eventId };
|
|
1136
|
+
this.sendEvent({
|
|
1137
|
+
type: "response.create",
|
|
1138
|
+
event_id: eventId,
|
|
1139
|
+
response: Object.keys(response).length > 0 ? response : void 0
|
|
1140
|
+
});
|
|
1141
|
+
return handle;
|
|
1142
|
+
}
|
|
1143
|
+
resolveGeneration(responseId) {
|
|
1144
|
+
if (!this.currentGeneration) {
|
|
1145
|
+
throw new Error("currentGeneration is not set");
|
|
1146
|
+
}
|
|
1147
|
+
const generation_ev = {
|
|
1148
|
+
messageStream: this.currentGeneration.messageChannel.stream(),
|
|
1149
|
+
functionStream: this.currentGeneration.functionChannel.stream(),
|
|
1150
|
+
userInitiated: false,
|
|
1151
|
+
responseId
|
|
1152
|
+
};
|
|
1153
|
+
const handle = this.responseCreatedFutures[responseId];
|
|
1154
|
+
if (handle) {
|
|
1155
|
+
delete this.responseCreatedFutures[responseId];
|
|
1156
|
+
generation_ev.userInitiated = true;
|
|
1157
|
+
if (handle.doneFut.done) {
|
|
1158
|
+
this.#logger.warn({ responseId }, "response received after timeout");
|
|
1159
|
+
} else {
|
|
1160
|
+
handle.doneFut.resolve(generation_ev);
|
|
1161
|
+
}
|
|
1162
|
+
}
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
function livekitItemToOpenAIItem(item) {
|
|
1166
|
+
switch (item.type) {
|
|
1167
|
+
case "function_call":
|
|
1168
|
+
return {
|
|
1169
|
+
id: item.id,
|
|
1170
|
+
type: "function_call",
|
|
1171
|
+
call_id: item.callId,
|
|
1172
|
+
name: item.name,
|
|
1173
|
+
arguments: item.args
|
|
1174
|
+
};
|
|
1175
|
+
case "function_call_output":
|
|
1176
|
+
return {
|
|
1177
|
+
id: item.id,
|
|
1178
|
+
type: "function_call_output",
|
|
1179
|
+
call_id: item.callId,
|
|
1180
|
+
output: item.output
|
|
1181
|
+
};
|
|
1182
|
+
case "message":
|
|
1183
|
+
const role = item.role === "developer" ? "system" : item.role;
|
|
1184
|
+
const contentList = [];
|
|
1185
|
+
for (const c of item.content) {
|
|
1186
|
+
if (typeof c === "string") {
|
|
1187
|
+
contentList.push({
|
|
1188
|
+
type: role === "assistant" ? "text" : "input_text",
|
|
1189
|
+
text: c
|
|
1190
|
+
});
|
|
1191
|
+
} else if (c.type === "image_content") {
|
|
1192
|
+
continue;
|
|
1193
|
+
} else if (c.type === "audio_content") {
|
|
1194
|
+
if (role === "user") {
|
|
1195
|
+
const encodedAudio = Buffer.from(combineAudioFrames(c.frame).data).toString("base64");
|
|
1196
|
+
contentList.push({
|
|
1197
|
+
type: "input_audio",
|
|
1198
|
+
audio: encodedAudio
|
|
1199
|
+
});
|
|
1200
|
+
}
|
|
1201
|
+
}
|
|
1202
|
+
}
|
|
1203
|
+
return {
|
|
1204
|
+
id: item.id,
|
|
1205
|
+
type: "message",
|
|
1206
|
+
role,
|
|
1207
|
+
content: contentList
|
|
1208
|
+
};
|
|
1209
|
+
default:
|
|
1210
|
+
throw new Error(`Unsupported item type: ${item.type}`);
|
|
1211
|
+
}
|
|
1212
|
+
}
|
|
1213
|
+
function openAIItemToLivekitItem(item) {
|
|
1214
|
+
if (!item.id) {
|
|
1215
|
+
throw new Error("item.id is not set");
|
|
1216
|
+
}
|
|
1217
|
+
switch (item.type) {
|
|
1218
|
+
case "function_call":
|
|
1219
|
+
return llm.FunctionCall.create({
|
|
1220
|
+
id: item.id,
|
|
1221
|
+
callId: item.call_id,
|
|
1222
|
+
name: item.name,
|
|
1223
|
+
args: item.arguments
|
|
1224
|
+
});
|
|
1225
|
+
case "function_call_output":
|
|
1226
|
+
return llm.FunctionCallOutput.create({
|
|
1227
|
+
id: item.id,
|
|
1228
|
+
callId: item.call_id,
|
|
1229
|
+
output: item.output,
|
|
1230
|
+
isError: false
|
|
1231
|
+
});
|
|
1232
|
+
case "message":
|
|
1233
|
+
const content = [];
|
|
1234
|
+
const contents = Array.isArray(item.content) ? item.content : [item.content];
|
|
1235
|
+
for (const c of contents) {
|
|
1236
|
+
if (c.type === "text" || c.type === "input_text") {
|
|
1237
|
+
content.push(c.text);
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
return llm.ChatMessage.create({
|
|
1241
|
+
id: item.id,
|
|
1242
|
+
role: item.role,
|
|
1243
|
+
content
|
|
1244
|
+
});
|
|
1245
|
+
}
|
|
1246
|
+
}
|
|
1247
|
+
function createMockAudioItem(durationSeconds = 2) {
|
|
1248
|
+
const audioData = Buffer.alloc(durationSeconds * SAMPLE_RATE);
|
|
1249
|
+
return llm.ChatMessage.create({
|
|
1250
|
+
id: shortuuid(MOCK_AUDIO_ID_PREFIX),
|
|
1251
|
+
role: "user",
|
|
1252
|
+
content: [
|
|
1253
|
+
{
|
|
1254
|
+
type: "audio_content",
|
|
1255
|
+
frame: [
|
|
1256
|
+
new AudioFrame(
|
|
1257
|
+
new Int16Array(audioData.buffer),
|
|
1258
|
+
SAMPLE_RATE,
|
|
1259
|
+
NUM_CHANNELS,
|
|
1260
|
+
audioData.length / 2
|
|
1261
|
+
)
|
|
1262
|
+
]
|
|
1263
|
+
}
|
|
1264
|
+
]
|
|
1265
|
+
});
|
|
1266
|
+
}
|
|
1267
|
+
function toOaiToolChoice(toolChoice) {
|
|
1268
|
+
if (typeof toolChoice === "string") {
|
|
1269
|
+
return toolChoice;
|
|
1270
|
+
}
|
|
1271
|
+
if ((toolChoice == null ? void 0 : toolChoice.type) === "function") {
|
|
1272
|
+
return toolChoice.function.name;
|
|
1273
|
+
}
|
|
1274
|
+
return "auto";
|
|
1275
|
+
}
|
|
1276
|
+
export {
|
|
1277
|
+
RealtimeModel,
|
|
1278
|
+
RealtimeSession
|
|
1279
|
+
};
|
|
1280
|
+
//# sourceMappingURL=realtime_model_beta.js.map
|