@livekit/agents-plugin-openai 1.0.45 → 1.0.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +1 -1
  2. package/dist/index.cjs +5 -2
  3. package/dist/index.cjs.map +1 -1
  4. package/dist/index.d.cts +1 -0
  5. package/dist/index.d.ts +1 -0
  6. package/dist/index.d.ts.map +1 -1
  7. package/dist/index.js +4 -2
  8. package/dist/index.js.map +1 -1
  9. package/dist/realtime/realtime_model.cjs +2 -1
  10. package/dist/realtime/realtime_model.cjs.map +1 -1
  11. package/dist/realtime/realtime_model.d.ts.map +1 -1
  12. package/dist/realtime/realtime_model.js +2 -1
  13. package/dist/realtime/realtime_model.js.map +1 -1
  14. package/dist/realtime/realtime_model_beta.cjs +2 -1
  15. package/dist/realtime/realtime_model_beta.cjs.map +1 -1
  16. package/dist/realtime/realtime_model_beta.d.ts.map +1 -1
  17. package/dist/realtime/realtime_model_beta.js +2 -1
  18. package/dist/realtime/realtime_model_beta.js.map +1 -1
  19. package/dist/responses/index.cjs +23 -0
  20. package/dist/responses/index.cjs.map +1 -0
  21. package/dist/responses/index.d.cts +2 -0
  22. package/dist/responses/index.d.ts +2 -0
  23. package/dist/responses/index.d.ts.map +1 -0
  24. package/dist/responses/index.js +2 -0
  25. package/dist/responses/index.js.map +1 -0
  26. package/dist/responses/llm.cjs +274 -0
  27. package/dist/responses/llm.cjs.map +1 -0
  28. package/dist/responses/llm.d.cts +61 -0
  29. package/dist/responses/llm.d.ts +61 -0
  30. package/dist/responses/llm.d.ts.map +1 -0
  31. package/dist/responses/llm.js +246 -0
  32. package/dist/responses/llm.js.map +1 -0
  33. package/dist/responses/llm.test.cjs +22 -0
  34. package/dist/responses/llm.test.cjs.map +1 -0
  35. package/dist/responses/llm.test.d.cts +2 -0
  36. package/dist/responses/llm.test.d.ts +2 -0
  37. package/dist/responses/llm.test.d.ts.map +1 -0
  38. package/dist/responses/llm.test.js +21 -0
  39. package/dist/responses/llm.test.js.map +1 -0
  40. package/package.json +5 -5
  41. package/src/index.ts +3 -2
  42. package/src/realtime/realtime_model.ts +1 -0
  43. package/src/realtime/realtime_model_beta.ts +1 -0
  44. package/src/responses/index.ts +4 -0
  45. package/src/responses/llm.test.ts +25 -0
  46. package/src/responses/llm.ts +327 -0
@@ -0,0 +1,274 @@
1
+ "use strict";
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+ var llm_exports = {};
30
+ __export(llm_exports, {
31
+ LLM: () => LLM,
32
+ LLMStream: () => LLMStream
33
+ });
34
+ module.exports = __toCommonJS(llm_exports);
35
+ var import_agents = require("@livekit/agents");
36
+ var import_openai = __toESM(require("openai"), 1);
37
+ const defaultLLMOptions = {
38
+ model: "gpt-4.1",
39
+ apiKey: process.env.OPENAI_API_KEY,
40
+ strictToolSchema: true
41
+ };
42
+ class LLM extends import_agents.llm.LLM {
43
+ #client;
44
+ #opts;
45
+ /**
46
+ * Create a new instance of OpenAI Responses LLM.
47
+ *
48
+ * @remarks
49
+ * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the
50
+ * `OPENAI_API_KEY` environment variable.
51
+ */
52
+ constructor(opts = defaultLLMOptions) {
53
+ super();
54
+ this.#opts = { ...defaultLLMOptions, ...opts };
55
+ if (this.#opts.apiKey === void 0) {
56
+ throw new Error("OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY");
57
+ }
58
+ this.#client = this.#opts.client || new import_openai.default({
59
+ baseURL: this.#opts.baseURL,
60
+ apiKey: this.#opts.apiKey
61
+ });
62
+ }
63
+ label() {
64
+ return "openai.responses.LLM";
65
+ }
66
+ get model() {
67
+ return this.#opts.model;
68
+ }
69
+ chat({
70
+ chatCtx,
71
+ toolCtx,
72
+ connOptions = import_agents.DEFAULT_API_CONNECT_OPTIONS,
73
+ parallelToolCalls,
74
+ toolChoice,
75
+ extraKwargs
76
+ }) {
77
+ const modelOptions = { ...extraKwargs || {} };
78
+ parallelToolCalls = parallelToolCalls !== void 0 ? parallelToolCalls : this.#opts.parallelToolCalls;
79
+ if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== void 0) {
80
+ modelOptions.parallel_tool_calls = parallelToolCalls;
81
+ }
82
+ toolChoice = toolChoice !== void 0 ? toolChoice : this.#opts.toolChoice;
83
+ if (toolChoice) {
84
+ modelOptions.tool_choice = toolChoice;
85
+ }
86
+ if (this.#opts.temperature !== void 0) {
87
+ modelOptions.temperature = this.#opts.temperature;
88
+ }
89
+ if (this.#opts.store !== void 0) {
90
+ modelOptions.store = this.#opts.store;
91
+ }
92
+ if (this.#opts.metadata) {
93
+ modelOptions.metadata = this.#opts.metadata;
94
+ }
95
+ return new LLMStream(this, {
96
+ model: this.#opts.model,
97
+ client: this.#client,
98
+ chatCtx,
99
+ toolCtx,
100
+ connOptions,
101
+ modelOptions,
102
+ strictToolSchema: this.#opts.strictToolSchema ?? true
103
+ });
104
+ }
105
+ }
106
+ class LLMStream extends import_agents.llm.LLMStream {
107
+ model;
108
+ client;
109
+ modelOptions;
110
+ strictToolSchema;
111
+ responseId;
112
+ constructor(llm2, {
113
+ model,
114
+ client,
115
+ chatCtx,
116
+ toolCtx,
117
+ connOptions,
118
+ modelOptions,
119
+ strictToolSchema
120
+ }) {
121
+ super(llm2, { chatCtx, toolCtx, connOptions });
122
+ this.model = model;
123
+ this.client = client;
124
+ this.modelOptions = modelOptions;
125
+ this.strictToolSchema = strictToolSchema;
126
+ this.responseId = "";
127
+ }
128
+ async run() {
129
+ let retryable = true;
130
+ try {
131
+ const messages = await this.chatCtx.toProviderFormat(
132
+ "openai.responses"
133
+ );
134
+ const tools = this.toolCtx ? Object.entries(this.toolCtx).map(([name, func]) => {
135
+ const oaiParams = {
136
+ type: "function",
137
+ name,
138
+ description: func.description,
139
+ parameters: import_agents.llm.toJsonSchema(
140
+ func.parameters,
141
+ true,
142
+ this.strictToolSchema
143
+ )
144
+ };
145
+ if (this.strictToolSchema) {
146
+ oaiParams.strict = true;
147
+ }
148
+ return oaiParams;
149
+ }) : void 0;
150
+ const requestOptions = { ...this.modelOptions };
151
+ if (!tools) {
152
+ delete requestOptions.tool_choice;
153
+ }
154
+ const stream = await this.client.responses.create(
155
+ {
156
+ model: this.model,
157
+ input: messages,
158
+ tools,
159
+ stream: true,
160
+ ...requestOptions
161
+ },
162
+ {
163
+ timeout: this.connOptions.timeoutMs
164
+ }
165
+ );
166
+ for await (const event of stream) {
167
+ retryable = false;
168
+ let chunk;
169
+ switch (event.type) {
170
+ case "error":
171
+ this.handleError(event);
172
+ break;
173
+ case "response.created":
174
+ this.handleResponseCreated(event);
175
+ break;
176
+ case "response.output_item.done":
177
+ chunk = this.handleResponseOutputItemDone(event);
178
+ break;
179
+ case "response.output_text.delta":
180
+ chunk = this.handleResponseOutputTextDelta(event);
181
+ break;
182
+ case "response.completed":
183
+ chunk = this.handleResponseCompleted(event);
184
+ break;
185
+ }
186
+ if (chunk) {
187
+ this.queue.put(chunk);
188
+ }
189
+ }
190
+ } catch (error) {
191
+ if (error instanceof import_agents.APIStatusError || error instanceof import_agents.APITimeoutError || error instanceof import_agents.APIConnectionError) {
192
+ throw error;
193
+ } else if (error instanceof import_openai.default.APIConnectionTimeoutError) {
194
+ throw new import_agents.APITimeoutError({ options: { retryable } });
195
+ } else if (error instanceof import_openai.default.APIError) {
196
+ throw new import_agents.APIStatusError({
197
+ message: error.message,
198
+ options: {
199
+ statusCode: error.status,
200
+ body: error.error,
201
+ requestId: error.requestID,
202
+ retryable
203
+ }
204
+ });
205
+ } else {
206
+ throw new import_agents.APIConnectionError({
207
+ message: (0, import_agents.toError)(error).message,
208
+ options: { retryable }
209
+ });
210
+ }
211
+ }
212
+ }
213
+ handleError(event) {
214
+ throw new import_agents.APIStatusError({
215
+ message: event.message,
216
+ options: {
217
+ statusCode: -1,
218
+ retryable: false
219
+ }
220
+ });
221
+ }
222
+ handleResponseCreated(event) {
223
+ this.responseId = event.response.id;
224
+ }
225
+ handleResponseOutputItemDone(event) {
226
+ let chunk;
227
+ if (event.item.type === "function_call") {
228
+ chunk = {
229
+ id: this.responseId,
230
+ delta: {
231
+ role: "assistant",
232
+ content: void 0,
233
+ toolCalls: [
234
+ import_agents.llm.FunctionCall.create({
235
+ callId: event.item.call_id || "",
236
+ name: event.item.name,
237
+ args: event.item.arguments
238
+ })
239
+ ]
240
+ }
241
+ };
242
+ }
243
+ return chunk;
244
+ }
245
+ handleResponseOutputTextDelta(event) {
246
+ return {
247
+ id: this.responseId,
248
+ delta: {
249
+ role: "assistant",
250
+ content: event.delta
251
+ }
252
+ };
253
+ }
254
+ handleResponseCompleted(event) {
255
+ if (event.response.usage) {
256
+ return {
257
+ id: this.responseId,
258
+ usage: {
259
+ completionTokens: event.response.usage.output_tokens,
260
+ promptTokens: event.response.usage.input_tokens,
261
+ promptCachedTokens: event.response.usage.input_tokens_details.cached_tokens,
262
+ totalTokens: event.response.usage.total_tokens
263
+ }
264
+ };
265
+ }
266
+ return void 0;
267
+ }
268
+ }
269
+ // Annotate the CommonJS export names for ESM import in node:
270
+ 0 && (module.exports = {
271
+ LLM,
272
+ LLMStream
273
+ });
274
+ //# sourceMappingURL=llm.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/responses/llm.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport type { APIConnectOptions } from '@livekit/agents';\nimport {\n APIConnectionError,\n APIStatusError,\n APITimeoutError,\n DEFAULT_API_CONNECT_OPTIONS,\n llm,\n toError,\n} from '@livekit/agents';\nimport OpenAI from 'openai';\nimport type { ChatModels } from '../models.js';\n\ninterface LLMOptions {\n model: ChatModels;\n apiKey?: string;\n baseURL?: string;\n client?: OpenAI;\n temperature?: number;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n store?: boolean;\n metadata?: Record<string, string>;\n strictToolSchema?: boolean;\n}\n\nconst defaultLLMOptions: LLMOptions = {\n model: 'gpt-4.1',\n apiKey: process.env.OPENAI_API_KEY,\n strictToolSchema: true,\n};\n\nexport class LLM extends llm.LLM {\n #client: OpenAI;\n #opts: LLMOptions;\n\n /**\n * Create a new instance of OpenAI Responses LLM.\n *\n * @remarks\n * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the\n * `OPENAI_API_KEY` environment variable.\n */\n constructor(opts: Partial<LLMOptions> = defaultLLMOptions) {\n super();\n\n this.#opts = { ...defaultLLMOptions, ...opts };\n if (this.#opts.apiKey === undefined) {\n throw new Error('OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY');\n }\n\n this.#client =\n this.#opts.client ||\n new OpenAI({\n baseURL: this.#opts.baseURL,\n apiKey: this.#opts.apiKey,\n });\n }\n\n label(): string {\n return 'openai.responses.LLM';\n }\n\n get model(): string {\n return this.#opts.model;\n }\n\n chat({\n chatCtx,\n toolCtx,\n connOptions = DEFAULT_API_CONNECT_OPTIONS,\n parallelToolCalls,\n toolChoice,\n extraKwargs,\n }: {\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions?: APIConnectOptions;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n extraKwargs?: Record<string, unknown>;\n }): LLMStream {\n const modelOptions: Record<string, unknown> = { ...(extraKwargs || {}) };\n\n parallelToolCalls =\n parallelToolCalls !== undefined ? parallelToolCalls : this.#opts.parallelToolCalls;\n\n if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== undefined) {\n modelOptions.parallel_tool_calls = parallelToolCalls;\n }\n\n toolChoice =\n toolChoice !== undefined ? toolChoice : (this.#opts.toolChoice as llm.ToolChoice | undefined);\n\n if (toolChoice) {\n modelOptions.tool_choice = toolChoice;\n }\n\n if (this.#opts.temperature !== undefined) {\n modelOptions.temperature = this.#opts.temperature;\n }\n\n if (this.#opts.store !== undefined) {\n modelOptions.store = this.#opts.store;\n }\n\n if (this.#opts.metadata) {\n modelOptions.metadata = this.#opts.metadata;\n }\n\n return new LLMStream(this, {\n model: this.#opts.model,\n client: this.#client,\n chatCtx,\n toolCtx,\n connOptions,\n modelOptions,\n strictToolSchema: this.#opts.strictToolSchema ?? true,\n });\n }\n}\n\nexport class LLMStream extends llm.LLMStream {\n private model: string | ChatModels;\n private client: OpenAI;\n private modelOptions: Record<string, unknown>;\n private strictToolSchema: boolean;\n private responseId: string;\n\n constructor(\n llm: LLM,\n {\n model,\n client,\n chatCtx,\n toolCtx,\n connOptions,\n modelOptions,\n strictToolSchema,\n }: {\n model: ChatModels;\n client: OpenAI;\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions: APIConnectOptions;\n modelOptions: Record<string, unknown>;\n strictToolSchema: boolean;\n },\n ) {\n super(llm, { chatCtx, toolCtx, connOptions });\n this.model = model;\n this.client = client;\n this.modelOptions = modelOptions;\n this.strictToolSchema = strictToolSchema;\n this.responseId = '';\n }\n\n protected async run(): Promise<void> {\n let retryable = true;\n\n try {\n const messages = (await this.chatCtx.toProviderFormat(\n 'openai.responses',\n )) as OpenAI.Responses.ResponseInputItem[];\n\n const tools = this.toolCtx\n ? Object.entries(this.toolCtx).map(([name, func]) => {\n const oaiParams = {\n type: 'function' as const,\n name: name,\n description: func.description,\n parameters: llm.toJsonSchema(\n func.parameters,\n true,\n this.strictToolSchema,\n ) as unknown as OpenAI.Responses.FunctionTool['parameters'],\n } as OpenAI.Responses.FunctionTool;\n\n if (this.strictToolSchema) {\n oaiParams.strict = true;\n }\n\n return oaiParams;\n })\n : undefined;\n\n const requestOptions: Record<string, unknown> = { ...this.modelOptions };\n if (!tools) {\n delete requestOptions.tool_choice;\n }\n\n const stream = await this.client.responses.create(\n {\n model: this.model,\n input: messages,\n tools: tools,\n stream: true,\n ...requestOptions,\n },\n {\n timeout: this.connOptions.timeoutMs,\n },\n );\n\n for await (const event of stream) {\n retryable = false;\n let chunk: llm.ChatChunk | undefined;\n\n switch (event.type) {\n case 'error':\n this.handleError(event);\n break;\n case 'response.created':\n this.handleResponseCreated(event);\n break;\n case 'response.output_item.done':\n chunk = this.handleResponseOutputItemDone(event);\n break;\n case 'response.output_text.delta':\n chunk = this.handleResponseOutputTextDelta(event);\n break;\n case 'response.completed':\n chunk = this.handleResponseCompleted(event);\n break;\n }\n\n if (chunk) {\n this.queue.put(chunk);\n }\n }\n } catch (error) {\n if (\n error instanceof APIStatusError ||\n error instanceof APITimeoutError ||\n error instanceof APIConnectionError\n ) {\n throw error;\n } else if (error instanceof OpenAI.APIConnectionTimeoutError) {\n throw new APITimeoutError({ options: { retryable } });\n } else if (error instanceof OpenAI.APIError) {\n throw new APIStatusError({\n message: error.message,\n options: {\n statusCode: error.status,\n body: error.error,\n requestId: error.requestID,\n retryable,\n },\n });\n } else {\n throw new APIConnectionError({\n message: toError(error).message,\n options: { retryable },\n });\n }\n }\n }\n\n private handleError(event: OpenAI.Responses.ResponseErrorEvent): void {\n throw new APIStatusError({\n message: event.message,\n options: {\n statusCode: -1,\n retryable: false,\n },\n });\n }\n\n private handleResponseCreated(event: OpenAI.Responses.ResponseCreatedEvent): void {\n this.responseId = event.response.id;\n }\n\n private handleResponseOutputItemDone(\n event: OpenAI.Responses.ResponseOutputItemDoneEvent,\n ): llm.ChatChunk | undefined {\n let chunk: llm.ChatChunk | undefined;\n\n if (event.item.type === 'function_call') {\n chunk = {\n id: this.responseId,\n delta: {\n role: 'assistant',\n content: undefined,\n toolCalls: [\n llm.FunctionCall.create({\n callId: event.item.call_id || '',\n name: event.item.name,\n args: event.item.arguments,\n }),\n ],\n },\n };\n }\n return chunk;\n }\n\n private handleResponseOutputTextDelta(\n event: OpenAI.Responses.ResponseTextDeltaEvent,\n ): llm.ChatChunk {\n return {\n id: this.responseId,\n delta: {\n role: 'assistant',\n content: event.delta,\n },\n };\n }\n\n private handleResponseCompleted(\n event: OpenAI.Responses.ResponseCompletedEvent,\n ): llm.ChatChunk | undefined {\n if (event.response.usage) {\n return {\n id: this.responseId,\n usage: {\n completionTokens: event.response.usage.output_tokens,\n promptTokens: event.response.usage.input_tokens,\n promptCachedTokens: event.response.usage.input_tokens_details.cached_tokens,\n totalTokens: event.response.usage.total_tokens,\n },\n };\n }\n return undefined;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAIA,oBAOO;AACP,oBAAmB;AAgBnB,MAAM,oBAAgC;AAAA,EACpC,OAAO;AAAA,EACP,QAAQ,QAAQ,IAAI;AAAA,EACpB,kBAAkB;AACpB;AAEO,MAAM,YAAY,kBAAI,IAAI;AAAA,EAC/B;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,YAAY,OAA4B,mBAAmB;AACzD,UAAM;AAEN,SAAK,QAAQ,EAAE,GAAG,mBAAmB,GAAG,KAAK;AAC7C,QAAI,KAAK,MAAM,WAAW,QAAW;AACnC,YAAM,IAAI,MAAM,0EAA0E;AAAA,IAC5F;AAEA,SAAK,UACH,KAAK,MAAM,UACX,IAAI,cAAAA,QAAO;AAAA,MACT,SAAS,KAAK,MAAM;AAAA,MACpB,QAAQ,KAAK,MAAM;AAAA,IACrB,CAAC;AAAA,EACL;AAAA,EAEA,QAAgB;AACd,WAAO;AAAA,EACT;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO,KAAK,MAAM;AAAA,EACpB;AAAA,EAEA,KAAK;AAAA,IACH;AAAA,IACA;AAAA,IACA,cAAc;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAOc;AACZ,UAAM,eAAwC,EAAE,GAAI,eAAe,CAAC,EAAG;AAEvE,wBACE,sBAAsB,SAAY,oBAAoB,KAAK,MAAM;AAEnE,QAAI,WAAW,OAAO,KAAK,OAAO,EAAE,SAAS,KAAK,sBAAsB,QAAW;AACjF,mBAAa,sBAAsB;AAAA,IACrC;AAEA,iBACE,eAAe,SAAY,aAAc,KAAK,MAAM;AAEtD,QAAI,YAAY;AACd,mBAAa,cAAc;AAAA,IAC7B;AAEA,QAAI,KAAK,MAAM,gBAAgB,QAAW;AACxC,mBAAa,cAAc,KAAK,MAAM;AAAA,IACxC;AAEA,QAAI,KAAK,MAAM,UAAU,QAAW;AAClC,mBAAa,QAAQ,KAAK,MAAM;AAAA,IAClC;AAEA,QAAI,KAAK,MAAM,UAAU;AACvB,mBAAa,WAAW,KAAK,MAAM;AAAA,IACrC;AAEA,WAAO,IAAI,UAAU,MAAM;AAAA,MACzB,OAAO,KAAK,MAAM;AAAA,MAClB,QAAQ,KAAK;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,kBAAkB,KAAK,MAAM,oBAAoB;AAAA,IACnD,CAAC;AAAA,EACH;AACF;AAEO,MAAM,kBAAkB,kBAAI,UAAU;AAAA,EACnC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAER,YACEC,MACA;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GASA;AACA,UAAMA,MAAK,EAAE,SAAS,SAAS,YAAY,CAAC;AAC5C,SAAK,QAAQ;AACb,SAAK,SAAS;AACd,SAAK,eAAe;AACpB,SAAK,mBAAmB;AACxB,SAAK,aAAa;AAAA,EACpB;AAAA,EAEA,MAAgB,MAAqB;AACnC,QAAI,YAAY;AAEhB,QAAI;AACF,YAAM,WAAY,MAAM,KAAK,QAAQ;AAAA,QACnC;AAAA,MACF;AAEA,YAAM,QAAQ,KAAK,UACf,OAAO,QAAQ,KAAK,OAAO,EAAE,IAAI,CAAC,CAAC,MAAM,IAAI,MAAM;AACjD,cAAM,YAAY;AAAA,UAChB,MAAM;AAAA,UACN;AAAA,UACA,aAAa,KAAK;AAAA,UAClB,YAAY,kBAAI;AAAA,YACd,KAAK;AAAA,YACL;AAAA,YACA,KAAK;AAAA,UACP;AAAA,QACF;AAEA,YAAI,KAAK,kBAAkB;AACzB,oBAAU,SAAS;AAAA,QACrB;AAEA,eAAO;AAAA,MACT,CAAC,IACD;AAEJ,YAAM,iBAA0C,EAAE,GAAG,KAAK,aAAa;AACvE,UAAI,CAAC,OAAO;AACV,eAAO,eAAe;AAAA,MACxB;AAEA,YAAM,SAAS,MAAM,KAAK,OAAO,UAAU;AAAA,QACzC;AAAA,UACE,OAAO,KAAK;AAAA,UACZ,OAAO;AAAA,UACP;AAAA,UACA,QAAQ;AAAA,UACR,GAAG;AAAA,QACL;AAAA,QACA;AAAA,UACE,SAAS,KAAK,YAAY;AAAA,QAC5B;AAAA,MACF;AAEA,uBAAiB,SAAS,QAAQ;AAChC,oBAAY;AACZ,YAAI;AAEJ,gBAAQ,MAAM,MAAM;AAAA,UAClB,KAAK;AACH,iBAAK,YAAY,KAAK;AACtB;AAAA,UACF,KAAK;AACH,iBAAK,sBAAsB,KAAK;AAChC;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,6BAA6B,KAAK;AAC/C;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,8BAA8B,KAAK;AAChD;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,wBAAwB,KAAK;AAC1C;AAAA,QACJ;AAEA,YAAI,OAAO;AACT,eAAK,MAAM,IAAI,KAAK;AAAA,QACtB;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,UACE,iBAAiB,gCACjB,iBAAiB,iCACjB,iBAAiB,kCACjB;AACA,cAAM;AAAA,MACR,WAAW,iBAAiB,cAAAD,QAAO,2BAA2B;AAC5D,cAAM,IAAI,8BAAgB,EAAE,SAAS,EAAE,UAAU,EAAE,CAAC;AAAA,MACtD,WAAW,iBAAiB,cAAAA,QAAO,UAAU;AAC3C,cAAM,IAAI,6BAAe;AAAA,UACvB,SAAS,MAAM;AAAA,UACf,SAAS;AAAA,YACP,YAAY,MAAM;AAAA,YAClB,MAAM,MAAM;AAAA,YACZ,WAAW,MAAM;AAAA,YACjB;AAAA,UACF;AAAA,QACF,CAAC;AAAA,MACH,OAAO;AACL,cAAM,IAAI,iCAAmB;AAAA,UAC3B,aAAS,uBAAQ,KAAK,EAAE;AAAA,UACxB,SAAS,EAAE,UAAU;AAAA,QACvB,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAAA,EAEQ,YAAY,OAAkD;AACpE,UAAM,IAAI,6BAAe;AAAA,MACvB,SAAS,MAAM;AAAA,MACf,SAAS;AAAA,QACP,YAAY;AAAA,QACZ,WAAW;AAAA,MACb;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,sBAAsB,OAAoD;AAChF,SAAK,aAAa,MAAM,SAAS;AAAA,EACnC;AAAA,EAEQ,6BACN,OAC2B;AAC3B,QAAI;AAEJ,QAAI,MAAM,KAAK,SAAS,iBAAiB;AACvC,cAAQ;AAAA,QACN,IAAI,KAAK;AAAA,QACT,OAAO;AAAA,UACL,MAAM;AAAA,UACN,SAAS;AAAA,UACT,WAAW;AAAA,YACT,kBAAI,aAAa,OAAO;AAAA,cACtB,QAAQ,MAAM,KAAK,WAAW;AAAA,cAC9B,MAAM,MAAM,KAAK;AAAA,cACjB,MAAM,MAAM,KAAK;AAAA,YACnB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEQ,8BACN,OACe;AACf,WAAO;AAAA,MACL,IAAI,KAAK;AAAA,MACT,OAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS,MAAM;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAAA,EAEQ,wBACN,OAC2B;AAC3B,QAAI,MAAM,SAAS,OAAO;AACxB,aAAO;AAAA,QACL,IAAI,KAAK;AAAA,QACT,OAAO;AAAA,UACL,kBAAkB,MAAM,SAAS,MAAM;AAAA,UACvC,cAAc,MAAM,SAAS,MAAM;AAAA,UACnC,oBAAoB,MAAM,SAAS,MAAM,qBAAqB;AAAA,UAC9D,aAAa,MAAM,SAAS,MAAM;AAAA,QACpC;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;","names":["OpenAI","llm"]}
@@ -0,0 +1,61 @@
1
+ import type { APIConnectOptions } from '@livekit/agents';
2
+ import { llm } from '@livekit/agents';
3
+ import OpenAI from 'openai';
4
+ import type { ChatModels } from '../models.js';
5
+ interface LLMOptions {
6
+ model: ChatModels;
7
+ apiKey?: string;
8
+ baseURL?: string;
9
+ client?: OpenAI;
10
+ temperature?: number;
11
+ parallelToolCalls?: boolean;
12
+ toolChoice?: llm.ToolChoice;
13
+ store?: boolean;
14
+ metadata?: Record<string, string>;
15
+ strictToolSchema?: boolean;
16
+ }
17
+ export declare class LLM extends llm.LLM {
18
+ #private;
19
+ /**
20
+ * Create a new instance of OpenAI Responses LLM.
21
+ *
22
+ * @remarks
23
+ * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the
24
+ * `OPENAI_API_KEY` environment variable.
25
+ */
26
+ constructor(opts?: Partial<LLMOptions>);
27
+ label(): string;
28
+ get model(): string;
29
+ chat({ chatCtx, toolCtx, connOptions, parallelToolCalls, toolChoice, extraKwargs, }: {
30
+ chatCtx: llm.ChatContext;
31
+ toolCtx?: llm.ToolContext;
32
+ connOptions?: APIConnectOptions;
33
+ parallelToolCalls?: boolean;
34
+ toolChoice?: llm.ToolChoice;
35
+ extraKwargs?: Record<string, unknown>;
36
+ }): LLMStream;
37
+ }
38
+ export declare class LLMStream extends llm.LLMStream {
39
+ private model;
40
+ private client;
41
+ private modelOptions;
42
+ private strictToolSchema;
43
+ private responseId;
44
+ constructor(llm: LLM, { model, client, chatCtx, toolCtx, connOptions, modelOptions, strictToolSchema, }: {
45
+ model: ChatModels;
46
+ client: OpenAI;
47
+ chatCtx: llm.ChatContext;
48
+ toolCtx?: llm.ToolContext;
49
+ connOptions: APIConnectOptions;
50
+ modelOptions: Record<string, unknown>;
51
+ strictToolSchema: boolean;
52
+ });
53
+ protected run(): Promise<void>;
54
+ private handleError;
55
+ private handleResponseCreated;
56
+ private handleResponseOutputItemDone;
57
+ private handleResponseOutputTextDelta;
58
+ private handleResponseCompleted;
59
+ }
60
+ export {};
61
+ //# sourceMappingURL=llm.d.ts.map
@@ -0,0 +1,61 @@
1
+ import type { APIConnectOptions } from '@livekit/agents';
2
+ import { llm } from '@livekit/agents';
3
+ import OpenAI from 'openai';
4
+ import type { ChatModels } from '../models.js';
5
+ interface LLMOptions {
6
+ model: ChatModels;
7
+ apiKey?: string;
8
+ baseURL?: string;
9
+ client?: OpenAI;
10
+ temperature?: number;
11
+ parallelToolCalls?: boolean;
12
+ toolChoice?: llm.ToolChoice;
13
+ store?: boolean;
14
+ metadata?: Record<string, string>;
15
+ strictToolSchema?: boolean;
16
+ }
17
+ export declare class LLM extends llm.LLM {
18
+ #private;
19
+ /**
20
+ * Create a new instance of OpenAI Responses LLM.
21
+ *
22
+ * @remarks
23
+ * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the
24
+ * `OPENAI_API_KEY` environment variable.
25
+ */
26
+ constructor(opts?: Partial<LLMOptions>);
27
+ label(): string;
28
+ get model(): string;
29
+ chat({ chatCtx, toolCtx, connOptions, parallelToolCalls, toolChoice, extraKwargs, }: {
30
+ chatCtx: llm.ChatContext;
31
+ toolCtx?: llm.ToolContext;
32
+ connOptions?: APIConnectOptions;
33
+ parallelToolCalls?: boolean;
34
+ toolChoice?: llm.ToolChoice;
35
+ extraKwargs?: Record<string, unknown>;
36
+ }): LLMStream;
37
+ }
38
+ export declare class LLMStream extends llm.LLMStream {
39
+ private model;
40
+ private client;
41
+ private modelOptions;
42
+ private strictToolSchema;
43
+ private responseId;
44
+ constructor(llm: LLM, { model, client, chatCtx, toolCtx, connOptions, modelOptions, strictToolSchema, }: {
45
+ model: ChatModels;
46
+ client: OpenAI;
47
+ chatCtx: llm.ChatContext;
48
+ toolCtx?: llm.ToolContext;
49
+ connOptions: APIConnectOptions;
50
+ modelOptions: Record<string, unknown>;
51
+ strictToolSchema: boolean;
52
+ });
53
+ protected run(): Promise<void>;
54
+ private handleError;
55
+ private handleResponseCreated;
56
+ private handleResponseOutputItemDone;
57
+ private handleResponseOutputTextDelta;
58
+ private handleResponseCompleted;
59
+ }
60
+ export {};
61
+ //# sourceMappingURL=llm.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm.d.ts","sourceRoot":"","sources":["../../src/responses/llm.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AACzD,OAAO,EAKL,GAAG,EAEJ,MAAM,iBAAiB,CAAC;AACzB,OAAO,MAAM,MAAM,QAAQ,CAAC;AAC5B,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,cAAc,CAAC;AAE/C,UAAU,UAAU;IAClB,KAAK,EAAE,UAAU,CAAC;IAClB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,iBAAiB,CAAC,EAAE,OAAO,CAAC;IAC5B,UAAU,CAAC,EAAE,GAAG,CAAC,UAAU,CAAC;IAC5B,KAAK,CAAC,EAAE,OAAO,CAAC;IAChB,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAClC,gBAAgB,CAAC,EAAE,OAAO,CAAC;CAC5B;AAQD,qBAAa,GAAI,SAAQ,GAAG,CAAC,GAAG;;IAI9B;;;;;;OAMG;gBACS,IAAI,GAAE,OAAO,CAAC,UAAU,CAAqB;IAgBzD,KAAK,IAAI,MAAM;IAIf,IAAI,KAAK,IAAI,MAAM,CAElB;IAED,IAAI,CAAC,EACH,OAAO,EACP,OAAO,EACP,WAAyC,EACzC,iBAAiB,EACjB,UAAU,EACV,WAAW,GACZ,EAAE;QACD,OAAO,EAAE,GAAG,CAAC,WAAW,CAAC;QACzB,OAAO,CAAC,EAAE,GAAG,CAAC,WAAW,CAAC;QAC1B,WAAW,CAAC,EAAE,iBAAiB,CAAC;QAChC,iBAAiB,CAAC,EAAE,OAAO,CAAC;QAC5B,UAAU,CAAC,EAAE,GAAG,CAAC,UAAU,CAAC;QAC5B,WAAW,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;KACvC,GAAG,SAAS;CAuCd;AAED,qBAAa,SAAU,SAAQ,GAAG,CAAC,SAAS;IAC1C,OAAO,CAAC,KAAK,CAAsB;IACnC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,YAAY,CAA0B;IAC9C,OAAO,CAAC,gBAAgB,CAAU;IAClC,OAAO,CAAC,UAAU,CAAS;gBAGzB,GAAG,EAAE,GAAG,EACR,EACE,KAAK,EACL,MAAM,EACN,OAAO,EACP,OAAO,EACP,WAAW,EACX,YAAY,EACZ,gBAAgB,GACjB,EAAE;QACD,KAAK,EAAE,UAAU,CAAC;QAClB,MAAM,EAAE,MAAM,CAAC;QACf,OAAO,EAAE,GAAG,CAAC,WAAW,CAAC;QACzB,OAAO,CAAC,EAAE,GAAG,CAAC,WAAW,CAAC;QAC1B,WAAW,EAAE,iBAAiB,CAAC;QAC/B,YAAY,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;QACtC,gBAAgB,EAAE,OAAO,CAAC;KAC3B;cAUa,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IAqGpC,OAAO,CAAC,WAAW;IAUnB,OAAO,CAAC,qBAAqB;IAI7B,OAAO,CAAC,4BAA4B;IAwBpC,OAAO,CAAC,6BAA6B;IAYrC,OAAO,CAAC,uBAAuB;CAgBhC"}
@@ -0,0 +1,246 @@
1
+ import {
2
+ APIConnectionError,
3
+ APIStatusError,
4
+ APITimeoutError,
5
+ DEFAULT_API_CONNECT_OPTIONS,
6
+ llm,
7
+ toError
8
+ } from "@livekit/agents";
9
+ import OpenAI from "openai";
10
+ const defaultLLMOptions = {
11
+ model: "gpt-4.1",
12
+ apiKey: process.env.OPENAI_API_KEY,
13
+ strictToolSchema: true
14
+ };
15
+ class LLM extends llm.LLM {
16
+ #client;
17
+ #opts;
18
+ /**
19
+ * Create a new instance of OpenAI Responses LLM.
20
+ *
21
+ * @remarks
22
+ * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the
23
+ * `OPENAI_API_KEY` environment variable.
24
+ */
25
+ constructor(opts = defaultLLMOptions) {
26
+ super();
27
+ this.#opts = { ...defaultLLMOptions, ...opts };
28
+ if (this.#opts.apiKey === void 0) {
29
+ throw new Error("OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY");
30
+ }
31
+ this.#client = this.#opts.client || new OpenAI({
32
+ baseURL: this.#opts.baseURL,
33
+ apiKey: this.#opts.apiKey
34
+ });
35
+ }
36
+ label() {
37
+ return "openai.responses.LLM";
38
+ }
39
+ get model() {
40
+ return this.#opts.model;
41
+ }
42
+ chat({
43
+ chatCtx,
44
+ toolCtx,
45
+ connOptions = DEFAULT_API_CONNECT_OPTIONS,
46
+ parallelToolCalls,
47
+ toolChoice,
48
+ extraKwargs
49
+ }) {
50
+ const modelOptions = { ...extraKwargs || {} };
51
+ parallelToolCalls = parallelToolCalls !== void 0 ? parallelToolCalls : this.#opts.parallelToolCalls;
52
+ if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== void 0) {
53
+ modelOptions.parallel_tool_calls = parallelToolCalls;
54
+ }
55
+ toolChoice = toolChoice !== void 0 ? toolChoice : this.#opts.toolChoice;
56
+ if (toolChoice) {
57
+ modelOptions.tool_choice = toolChoice;
58
+ }
59
+ if (this.#opts.temperature !== void 0) {
60
+ modelOptions.temperature = this.#opts.temperature;
61
+ }
62
+ if (this.#opts.store !== void 0) {
63
+ modelOptions.store = this.#opts.store;
64
+ }
65
+ if (this.#opts.metadata) {
66
+ modelOptions.metadata = this.#opts.metadata;
67
+ }
68
+ return new LLMStream(this, {
69
+ model: this.#opts.model,
70
+ client: this.#client,
71
+ chatCtx,
72
+ toolCtx,
73
+ connOptions,
74
+ modelOptions,
75
+ strictToolSchema: this.#opts.strictToolSchema ?? true
76
+ });
77
+ }
78
+ }
79
+ class LLMStream extends llm.LLMStream {
80
+ model;
81
+ client;
82
+ modelOptions;
83
+ strictToolSchema;
84
+ responseId;
85
+ constructor(llm2, {
86
+ model,
87
+ client,
88
+ chatCtx,
89
+ toolCtx,
90
+ connOptions,
91
+ modelOptions,
92
+ strictToolSchema
93
+ }) {
94
+ super(llm2, { chatCtx, toolCtx, connOptions });
95
+ this.model = model;
96
+ this.client = client;
97
+ this.modelOptions = modelOptions;
98
+ this.strictToolSchema = strictToolSchema;
99
+ this.responseId = "";
100
+ }
101
+ async run() {
102
+ let retryable = true;
103
+ try {
104
+ const messages = await this.chatCtx.toProviderFormat(
105
+ "openai.responses"
106
+ );
107
+ const tools = this.toolCtx ? Object.entries(this.toolCtx).map(([name, func]) => {
108
+ const oaiParams = {
109
+ type: "function",
110
+ name,
111
+ description: func.description,
112
+ parameters: llm.toJsonSchema(
113
+ func.parameters,
114
+ true,
115
+ this.strictToolSchema
116
+ )
117
+ };
118
+ if (this.strictToolSchema) {
119
+ oaiParams.strict = true;
120
+ }
121
+ return oaiParams;
122
+ }) : void 0;
123
+ const requestOptions = { ...this.modelOptions };
124
+ if (!tools) {
125
+ delete requestOptions.tool_choice;
126
+ }
127
+ const stream = await this.client.responses.create(
128
+ {
129
+ model: this.model,
130
+ input: messages,
131
+ tools,
132
+ stream: true,
133
+ ...requestOptions
134
+ },
135
+ {
136
+ timeout: this.connOptions.timeoutMs
137
+ }
138
+ );
139
+ for await (const event of stream) {
140
+ retryable = false;
141
+ let chunk;
142
+ switch (event.type) {
143
+ case "error":
144
+ this.handleError(event);
145
+ break;
146
+ case "response.created":
147
+ this.handleResponseCreated(event);
148
+ break;
149
+ case "response.output_item.done":
150
+ chunk = this.handleResponseOutputItemDone(event);
151
+ break;
152
+ case "response.output_text.delta":
153
+ chunk = this.handleResponseOutputTextDelta(event);
154
+ break;
155
+ case "response.completed":
156
+ chunk = this.handleResponseCompleted(event);
157
+ break;
158
+ }
159
+ if (chunk) {
160
+ this.queue.put(chunk);
161
+ }
162
+ }
163
+ } catch (error) {
164
+ if (error instanceof APIStatusError || error instanceof APITimeoutError || error instanceof APIConnectionError) {
165
+ throw error;
166
+ } else if (error instanceof OpenAI.APIConnectionTimeoutError) {
167
+ throw new APITimeoutError({ options: { retryable } });
168
+ } else if (error instanceof OpenAI.APIError) {
169
+ throw new APIStatusError({
170
+ message: error.message,
171
+ options: {
172
+ statusCode: error.status,
173
+ body: error.error,
174
+ requestId: error.requestID,
175
+ retryable
176
+ }
177
+ });
178
+ } else {
179
+ throw new APIConnectionError({
180
+ message: toError(error).message,
181
+ options: { retryable }
182
+ });
183
+ }
184
+ }
185
+ }
186
+ handleError(event) {
187
+ throw new APIStatusError({
188
+ message: event.message,
189
+ options: {
190
+ statusCode: -1,
191
+ retryable: false
192
+ }
193
+ });
194
+ }
195
+ handleResponseCreated(event) {
196
+ this.responseId = event.response.id;
197
+ }
198
+ handleResponseOutputItemDone(event) {
199
+ let chunk;
200
+ if (event.item.type === "function_call") {
201
+ chunk = {
202
+ id: this.responseId,
203
+ delta: {
204
+ role: "assistant",
205
+ content: void 0,
206
+ toolCalls: [
207
+ llm.FunctionCall.create({
208
+ callId: event.item.call_id || "",
209
+ name: event.item.name,
210
+ args: event.item.arguments
211
+ })
212
+ ]
213
+ }
214
+ };
215
+ }
216
+ return chunk;
217
+ }
218
+ handleResponseOutputTextDelta(event) {
219
+ return {
220
+ id: this.responseId,
221
+ delta: {
222
+ role: "assistant",
223
+ content: event.delta
224
+ }
225
+ };
226
+ }
227
+ handleResponseCompleted(event) {
228
+ if (event.response.usage) {
229
+ return {
230
+ id: this.responseId,
231
+ usage: {
232
+ completionTokens: event.response.usage.output_tokens,
233
+ promptTokens: event.response.usage.input_tokens,
234
+ promptCachedTokens: event.response.usage.input_tokens_details.cached_tokens,
235
+ totalTokens: event.response.usage.total_tokens
236
+ }
237
+ };
238
+ }
239
+ return void 0;
240
+ }
241
+ }
242
+ export {
243
+ LLM,
244
+ LLMStream
245
+ };
246
+ //# sourceMappingURL=llm.js.map