@livekit/agents-plugin-openai 1.0.45 → 1.0.47
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.cjs +5 -2
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +4 -2
- package/dist/index.js.map +1 -1
- package/dist/realtime/realtime_model.cjs +2 -1
- package/dist/realtime/realtime_model.cjs.map +1 -1
- package/dist/realtime/realtime_model.d.ts.map +1 -1
- package/dist/realtime/realtime_model.js +2 -1
- package/dist/realtime/realtime_model.js.map +1 -1
- package/dist/realtime/realtime_model_beta.cjs +2 -1
- package/dist/realtime/realtime_model_beta.cjs.map +1 -1
- package/dist/realtime/realtime_model_beta.d.ts.map +1 -1
- package/dist/realtime/realtime_model_beta.js +2 -1
- package/dist/realtime/realtime_model_beta.js.map +1 -1
- package/dist/responses/index.cjs +23 -0
- package/dist/responses/index.cjs.map +1 -0
- package/dist/responses/index.d.cts +2 -0
- package/dist/responses/index.d.ts +2 -0
- package/dist/responses/index.d.ts.map +1 -0
- package/dist/responses/index.js +2 -0
- package/dist/responses/index.js.map +1 -0
- package/dist/responses/llm.cjs +274 -0
- package/dist/responses/llm.cjs.map +1 -0
- package/dist/responses/llm.d.cts +61 -0
- package/dist/responses/llm.d.ts +61 -0
- package/dist/responses/llm.d.ts.map +1 -0
- package/dist/responses/llm.js +246 -0
- package/dist/responses/llm.js.map +1 -0
- package/dist/responses/llm.test.cjs +22 -0
- package/dist/responses/llm.test.cjs.map +1 -0
- package/dist/responses/llm.test.d.cts +2 -0
- package/dist/responses/llm.test.d.ts +2 -0
- package/dist/responses/llm.test.d.ts.map +1 -0
- package/dist/responses/llm.test.js +21 -0
- package/dist/responses/llm.test.js.map +1 -0
- package/package.json +5 -5
- package/src/index.ts +3 -2
- package/src/realtime/realtime_model.ts +1 -0
- package/src/realtime/realtime_model_beta.ts +1 -0
- package/src/responses/index.ts +4 -0
- package/src/responses/llm.test.ts +25 -0
- package/src/responses/llm.ts +327 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/responses/llm.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport type { APIConnectOptions } from '@livekit/agents';\nimport {\n APIConnectionError,\n APIStatusError,\n APITimeoutError,\n DEFAULT_API_CONNECT_OPTIONS,\n llm,\n toError,\n} from '@livekit/agents';\nimport OpenAI from 'openai';\nimport type { ChatModels } from '../models.js';\n\ninterface LLMOptions {\n model: ChatModels;\n apiKey?: string;\n baseURL?: string;\n client?: OpenAI;\n temperature?: number;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n store?: boolean;\n metadata?: Record<string, string>;\n strictToolSchema?: boolean;\n}\n\nconst defaultLLMOptions: LLMOptions = {\n model: 'gpt-4.1',\n apiKey: process.env.OPENAI_API_KEY,\n strictToolSchema: true,\n};\n\nexport class LLM extends llm.LLM {\n #client: OpenAI;\n #opts: LLMOptions;\n\n /**\n * Create a new instance of OpenAI Responses LLM.\n *\n * @remarks\n * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the\n * `OPENAI_API_KEY` environment variable.\n */\n constructor(opts: Partial<LLMOptions> = defaultLLMOptions) {\n super();\n\n this.#opts = { ...defaultLLMOptions, ...opts };\n if (this.#opts.apiKey === undefined) {\n throw new Error('OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY');\n }\n\n this.#client =\n this.#opts.client ||\n new OpenAI({\n baseURL: this.#opts.baseURL,\n apiKey: this.#opts.apiKey,\n });\n }\n\n label(): string {\n return 'openai.responses.LLM';\n }\n\n get model(): string {\n return this.#opts.model;\n }\n\n chat({\n chatCtx,\n toolCtx,\n connOptions = DEFAULT_API_CONNECT_OPTIONS,\n parallelToolCalls,\n toolChoice,\n extraKwargs,\n }: {\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions?: APIConnectOptions;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n extraKwargs?: Record<string, unknown>;\n }): LLMStream {\n const modelOptions: Record<string, unknown> = { ...(extraKwargs || {}) };\n\n parallelToolCalls =\n parallelToolCalls !== undefined ? parallelToolCalls : this.#opts.parallelToolCalls;\n\n if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== undefined) {\n modelOptions.parallel_tool_calls = parallelToolCalls;\n }\n\n toolChoice =\n toolChoice !== undefined ? toolChoice : (this.#opts.toolChoice as llm.ToolChoice | undefined);\n\n if (toolChoice) {\n modelOptions.tool_choice = toolChoice;\n }\n\n if (this.#opts.temperature !== undefined) {\n modelOptions.temperature = this.#opts.temperature;\n }\n\n if (this.#opts.store !== undefined) {\n modelOptions.store = this.#opts.store;\n }\n\n if (this.#opts.metadata) {\n modelOptions.metadata = this.#opts.metadata;\n }\n\n return new LLMStream(this, {\n model: this.#opts.model,\n client: this.#client,\n chatCtx,\n toolCtx,\n connOptions,\n modelOptions,\n strictToolSchema: this.#opts.strictToolSchema ?? true,\n });\n }\n}\n\nexport class LLMStream extends llm.LLMStream {\n private model: string | ChatModels;\n private client: OpenAI;\n private modelOptions: Record<string, unknown>;\n private strictToolSchema: boolean;\n private responseId: string;\n\n constructor(\n llm: LLM,\n {\n model,\n client,\n chatCtx,\n toolCtx,\n connOptions,\n modelOptions,\n strictToolSchema,\n }: {\n model: ChatModels;\n client: OpenAI;\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions: APIConnectOptions;\n modelOptions: Record<string, unknown>;\n strictToolSchema: boolean;\n },\n ) {\n super(llm, { chatCtx, toolCtx, connOptions });\n this.model = model;\n this.client = client;\n this.modelOptions = modelOptions;\n this.strictToolSchema = strictToolSchema;\n this.responseId = '';\n }\n\n protected async run(): Promise<void> {\n let retryable = true;\n\n try {\n const messages = (await this.chatCtx.toProviderFormat(\n 'openai.responses',\n )) as OpenAI.Responses.ResponseInputItem[];\n\n const tools = this.toolCtx\n ? Object.entries(this.toolCtx).map(([name, func]) => {\n const oaiParams = {\n type: 'function' as const,\n name: name,\n description: func.description,\n parameters: llm.toJsonSchema(\n func.parameters,\n true,\n this.strictToolSchema,\n ) as unknown as OpenAI.Responses.FunctionTool['parameters'],\n } as OpenAI.Responses.FunctionTool;\n\n if (this.strictToolSchema) {\n oaiParams.strict = true;\n }\n\n return oaiParams;\n })\n : undefined;\n\n const requestOptions: Record<string, unknown> = { ...this.modelOptions };\n if (!tools) {\n delete requestOptions.tool_choice;\n }\n\n const stream = await this.client.responses.create(\n {\n model: this.model,\n input: messages,\n tools: tools,\n stream: true,\n ...requestOptions,\n },\n {\n timeout: this.connOptions.timeoutMs,\n },\n );\n\n for await (const event of stream) {\n retryable = false;\n let chunk: llm.ChatChunk | undefined;\n\n switch (event.type) {\n case 'error':\n this.handleError(event);\n break;\n case 'response.created':\n this.handleResponseCreated(event);\n break;\n case 'response.output_item.done':\n chunk = this.handleResponseOutputItemDone(event);\n break;\n case 'response.output_text.delta':\n chunk = this.handleResponseOutputTextDelta(event);\n break;\n case 'response.completed':\n chunk = this.handleResponseCompleted(event);\n break;\n }\n\n if (chunk) {\n this.queue.put(chunk);\n }\n }\n } catch (error) {\n if (\n error instanceof APIStatusError ||\n error instanceof APITimeoutError ||\n error instanceof APIConnectionError\n ) {\n throw error;\n } else if (error instanceof OpenAI.APIConnectionTimeoutError) {\n throw new APITimeoutError({ options: { retryable } });\n } else if (error instanceof OpenAI.APIError) {\n throw new APIStatusError({\n message: error.message,\n options: {\n statusCode: error.status,\n body: error.error,\n requestId: error.requestID,\n retryable,\n },\n });\n } else {\n throw new APIConnectionError({\n message: toError(error).message,\n options: { retryable },\n });\n }\n }\n }\n\n private handleError(event: OpenAI.Responses.ResponseErrorEvent): void {\n throw new APIStatusError({\n message: event.message,\n options: {\n statusCode: -1,\n retryable: false,\n },\n });\n }\n\n private handleResponseCreated(event: OpenAI.Responses.ResponseCreatedEvent): void {\n this.responseId = event.response.id;\n }\n\n private handleResponseOutputItemDone(\n event: OpenAI.Responses.ResponseOutputItemDoneEvent,\n ): llm.ChatChunk | undefined {\n let chunk: llm.ChatChunk | undefined;\n\n if (event.item.type === 'function_call') {\n chunk = {\n id: this.responseId,\n delta: {\n role: 'assistant',\n content: undefined,\n toolCalls: [\n llm.FunctionCall.create({\n callId: event.item.call_id || '',\n name: event.item.name,\n args: event.item.arguments,\n }),\n ],\n },\n };\n }\n return chunk;\n }\n\n private handleResponseOutputTextDelta(\n event: OpenAI.Responses.ResponseTextDeltaEvent,\n ): llm.ChatChunk {\n return {\n id: this.responseId,\n delta: {\n role: 'assistant',\n content: event.delta,\n },\n };\n }\n\n private handleResponseCompleted(\n event: OpenAI.Responses.ResponseCompletedEvent,\n ): llm.ChatChunk | undefined {\n if (event.response.usage) {\n return {\n id: this.responseId,\n usage: {\n completionTokens: event.response.usage.output_tokens,\n promptTokens: event.response.usage.input_tokens,\n promptCachedTokens: event.response.usage.input_tokens_details.cached_tokens,\n totalTokens: event.response.usage.total_tokens,\n },\n };\n }\n return undefined;\n }\n}\n"],"mappings":"AAIA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,OAAO,YAAY;AAgBnB,MAAM,oBAAgC;AAAA,EACpC,OAAO;AAAA,EACP,QAAQ,QAAQ,IAAI;AAAA,EACpB,kBAAkB;AACpB;AAEO,MAAM,YAAY,IAAI,IAAI;AAAA,EAC/B;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,YAAY,OAA4B,mBAAmB;AACzD,UAAM;AAEN,SAAK,QAAQ,EAAE,GAAG,mBAAmB,GAAG,KAAK;AAC7C,QAAI,KAAK,MAAM,WAAW,QAAW;AACnC,YAAM,IAAI,MAAM,0EAA0E;AAAA,IAC5F;AAEA,SAAK,UACH,KAAK,MAAM,UACX,IAAI,OAAO;AAAA,MACT,SAAS,KAAK,MAAM;AAAA,MACpB,QAAQ,KAAK,MAAM;AAAA,IACrB,CAAC;AAAA,EACL;AAAA,EAEA,QAAgB;AACd,WAAO;AAAA,EACT;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO,KAAK,MAAM;AAAA,EACpB;AAAA,EAEA,KAAK;AAAA,IACH;AAAA,IACA;AAAA,IACA,cAAc;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAOc;AACZ,UAAM,eAAwC,EAAE,GAAI,eAAe,CAAC,EAAG;AAEvE,wBACE,sBAAsB,SAAY,oBAAoB,KAAK,MAAM;AAEnE,QAAI,WAAW,OAAO,KAAK,OAAO,EAAE,SAAS,KAAK,sBAAsB,QAAW;AACjF,mBAAa,sBAAsB;AAAA,IACrC;AAEA,iBACE,eAAe,SAAY,aAAc,KAAK,MAAM;AAEtD,QAAI,YAAY;AACd,mBAAa,cAAc;AAAA,IAC7B;AAEA,QAAI,KAAK,MAAM,gBAAgB,QAAW;AACxC,mBAAa,cAAc,KAAK,MAAM;AAAA,IACxC;AAEA,QAAI,KAAK,MAAM,UAAU,QAAW;AAClC,mBAAa,QAAQ,KAAK,MAAM;AAAA,IAClC;AAEA,QAAI,KAAK,MAAM,UAAU;AACvB,mBAAa,WAAW,KAAK,MAAM;AAAA,IACrC;AAEA,WAAO,IAAI,UAAU,MAAM;AAAA,MACzB,OAAO,KAAK,MAAM;AAAA,MAClB,QAAQ,KAAK;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,kBAAkB,KAAK,MAAM,oBAAoB;AAAA,IACnD,CAAC;AAAA,EACH;AACF;AAEO,MAAM,kBAAkB,IAAI,UAAU;AAAA,EACnC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAER,YACEA,MACA;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GASA;AACA,UAAMA,MAAK,EAAE,SAAS,SAAS,YAAY,CAAC;AAC5C,SAAK,QAAQ;AACb,SAAK,SAAS;AACd,SAAK,eAAe;AACpB,SAAK,mBAAmB;AACxB,SAAK,aAAa;AAAA,EACpB;AAAA,EAEA,MAAgB,MAAqB;AACnC,QAAI,YAAY;AAEhB,QAAI;AACF,YAAM,WAAY,MAAM,KAAK,QAAQ;AAAA,QACnC;AAAA,MACF;AAEA,YAAM,QAAQ,KAAK,UACf,OAAO,QAAQ,KAAK,OAAO,EAAE,IAAI,CAAC,CAAC,MAAM,IAAI,MAAM;AACjD,cAAM,YAAY;AAAA,UAChB,MAAM;AAAA,UACN;AAAA,UACA,aAAa,KAAK;AAAA,UAClB,YAAY,IAAI;AAAA,YACd,KAAK;AAAA,YACL;AAAA,YACA,KAAK;AAAA,UACP;AAAA,QACF;AAEA,YAAI,KAAK,kBAAkB;AACzB,oBAAU,SAAS;AAAA,QACrB;AAEA,eAAO;AAAA,MACT,CAAC,IACD;AAEJ,YAAM,iBAA0C,EAAE,GAAG,KAAK,aAAa;AACvE,UAAI,CAAC,OAAO;AACV,eAAO,eAAe;AAAA,MACxB;AAEA,YAAM,SAAS,MAAM,KAAK,OAAO,UAAU;AAAA,QACzC;AAAA,UACE,OAAO,KAAK;AAAA,UACZ,OAAO;AAAA,UACP;AAAA,UACA,QAAQ;AAAA,UACR,GAAG;AAAA,QACL;AAAA,QACA;AAAA,UACE,SAAS,KAAK,YAAY;AAAA,QAC5B;AAAA,MACF;AAEA,uBAAiB,SAAS,QAAQ;AAChC,oBAAY;AACZ,YAAI;AAEJ,gBAAQ,MAAM,MAAM;AAAA,UAClB,KAAK;AACH,iBAAK,YAAY,KAAK;AACtB;AAAA,UACF,KAAK;AACH,iBAAK,sBAAsB,KAAK;AAChC;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,6BAA6B,KAAK;AAC/C;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,8BAA8B,KAAK;AAChD;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,wBAAwB,KAAK;AAC1C;AAAA,QACJ;AAEA,YAAI,OAAO;AACT,eAAK,MAAM,IAAI,KAAK;AAAA,QACtB;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,UACE,iBAAiB,kBACjB,iBAAiB,mBACjB,iBAAiB,oBACjB;AACA,cAAM;AAAA,MACR,WAAW,iBAAiB,OAAO,2BAA2B;AAC5D,cAAM,IAAI,gBAAgB,EAAE,SAAS,EAAE,UAAU,EAAE,CAAC;AAAA,MACtD,WAAW,iBAAiB,OAAO,UAAU;AAC3C,cAAM,IAAI,eAAe;AAAA,UACvB,SAAS,MAAM;AAAA,UACf,SAAS;AAAA,YACP,YAAY,MAAM;AAAA,YAClB,MAAM,MAAM;AAAA,YACZ,WAAW,MAAM;AAAA,YACjB;AAAA,UACF;AAAA,QACF,CAAC;AAAA,MACH,OAAO;AACL,cAAM,IAAI,mBAAmB;AAAA,UAC3B,SAAS,QAAQ,KAAK,EAAE;AAAA,UACxB,SAAS,EAAE,UAAU;AAAA,QACvB,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAAA,EAEQ,YAAY,OAAkD;AACpE,UAAM,IAAI,eAAe;AAAA,MACvB,SAAS,MAAM;AAAA,MACf,SAAS;AAAA,QACP,YAAY;AAAA,QACZ,WAAW;AAAA,MACb;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,sBAAsB,OAAoD;AAChF,SAAK,aAAa,MAAM,SAAS;AAAA,EACnC;AAAA,EAEQ,6BACN,OAC2B;AAC3B,QAAI;AAEJ,QAAI,MAAM,KAAK,SAAS,iBAAiB;AACvC,cAAQ;AAAA,QACN,IAAI,KAAK;AAAA,QACT,OAAO;AAAA,UACL,MAAM;AAAA,UACN,SAAS;AAAA,UACT,WAAW;AAAA,YACT,IAAI,aAAa,OAAO;AAAA,cACtB,QAAQ,MAAM,KAAK,WAAW;AAAA,cAC9B,MAAM,MAAM,KAAK;AAAA,cACjB,MAAM,MAAM,KAAK;AAAA,YACnB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEQ,8BACN,OACe;AACf,WAAO;AAAA,MACL,IAAI,KAAK;AAAA,MACT,OAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS,MAAM;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAAA,EAEQ,wBACN,OAC2B;AAC3B,QAAI,MAAM,SAAS,OAAO;AACxB,aAAO;AAAA,QACL,IAAI,KAAK;AAAA,QACT,OAAO;AAAA,UACL,kBAAkB,MAAM,SAAS,MAAM;AAAA,UACvC,cAAc,MAAM,SAAS,MAAM;AAAA,UACnC,oBAAoB,MAAM,SAAS,MAAM,qBAAqB;AAAA,UAC9D,aAAa,MAAM,SAAS,MAAM;AAAA,QACpC;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;","names":["llm"]}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var import_agents_plugins_test = require("@livekit/agents-plugins-test");
|
|
3
|
+
var import_vitest = require("vitest");
|
|
4
|
+
var import_llm = require("./llm.cjs");
|
|
5
|
+
(0, import_vitest.describe)("OpenAI Responses", async () => {
|
|
6
|
+
await (0, import_agents_plugins_test.llm)(
|
|
7
|
+
new import_llm.LLM({
|
|
8
|
+
temperature: 0,
|
|
9
|
+
strictToolSchema: false
|
|
10
|
+
}),
|
|
11
|
+
true
|
|
12
|
+
);
|
|
13
|
+
});
|
|
14
|
+
(0, import_vitest.describe)("OpenAI Responses strict tool schema", async () => {
|
|
15
|
+
await (0, import_agents_plugins_test.llmStrict)(
|
|
16
|
+
new import_llm.LLM({
|
|
17
|
+
temperature: 0,
|
|
18
|
+
strictToolSchema: true
|
|
19
|
+
})
|
|
20
|
+
);
|
|
21
|
+
});
|
|
22
|
+
//# sourceMappingURL=llm.test.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/responses/llm.test.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2024 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport { llm, llmStrict } from '@livekit/agents-plugins-test';\nimport { describe } from 'vitest';\nimport { LLM } from './llm.js';\n\ndescribe('OpenAI Responses', async () => {\n await llm(\n new LLM({\n temperature: 0,\n strictToolSchema: false,\n }),\n true,\n );\n});\n\ndescribe('OpenAI Responses strict tool schema', async () => {\n await llmStrict(\n new LLM({\n temperature: 0,\n strictToolSchema: true,\n }),\n );\n});\n"],"mappings":";AAGA,iCAA+B;AAC/B,oBAAyB;AACzB,iBAAoB;AAAA,IAEpB,wBAAS,oBAAoB,YAAY;AACvC,YAAM;AAAA,IACJ,IAAI,eAAI;AAAA,MACN,aAAa;AAAA,MACb,kBAAkB;AAAA,IACpB,CAAC;AAAA,IACD;AAAA,EACF;AACF,CAAC;AAAA,IAED,wBAAS,uCAAuC,YAAY;AAC1D,YAAM;AAAA,IACJ,IAAI,eAAI;AAAA,MACN,aAAa;AAAA,MACb,kBAAkB;AAAA,IACpB,CAAC;AAAA,EACH;AACF,CAAC;","names":[]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"llm.test.d.ts","sourceRoot":"","sources":["../../src/responses/llm.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { llm, llmStrict } from "@livekit/agents-plugins-test";
|
|
2
|
+
import { describe } from "vitest";
|
|
3
|
+
import { LLM } from "./llm.js";
|
|
4
|
+
describe("OpenAI Responses", async () => {
|
|
5
|
+
await llm(
|
|
6
|
+
new LLM({
|
|
7
|
+
temperature: 0,
|
|
8
|
+
strictToolSchema: false
|
|
9
|
+
}),
|
|
10
|
+
true
|
|
11
|
+
);
|
|
12
|
+
});
|
|
13
|
+
describe("OpenAI Responses strict tool schema", async () => {
|
|
14
|
+
await llmStrict(
|
|
15
|
+
new LLM({
|
|
16
|
+
temperature: 0,
|
|
17
|
+
strictToolSchema: true
|
|
18
|
+
})
|
|
19
|
+
);
|
|
20
|
+
});
|
|
21
|
+
//# sourceMappingURL=llm.test.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/responses/llm.test.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2024 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport { llm, llmStrict } from '@livekit/agents-plugins-test';\nimport { describe } from 'vitest';\nimport { LLM } from './llm.js';\n\ndescribe('OpenAI Responses', async () => {\n await llm(\n new LLM({\n temperature: 0,\n strictToolSchema: false,\n }),\n true,\n );\n});\n\ndescribe('OpenAI Responses strict tool schema', async () => {\n await llmStrict(\n new LLM({\n temperature: 0,\n strictToolSchema: true,\n }),\n );\n});\n"],"mappings":"AAGA,SAAS,KAAK,iBAAiB;AAC/B,SAAS,gBAAgB;AACzB,SAAS,WAAW;AAEpB,SAAS,oBAAoB,YAAY;AACvC,QAAM;AAAA,IACJ,IAAI,IAAI;AAAA,MACN,aAAa;AAAA,MACb,kBAAkB;AAAA,IACpB,CAAC;AAAA,IACD;AAAA,EACF;AACF,CAAC;AAED,SAAS,uCAAuC,YAAY;AAC1D,QAAM;AAAA,IACJ,IAAI,IAAI;AAAA,MACN,aAAa;AAAA,MACb,kBAAkB;AAAA,IACpB,CAAC;AAAA,EACH;AACF,CAAC;","names":[]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@livekit/agents-plugin-openai",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.47",
|
|
4
4
|
"description": "OpenAI plugin for LiveKit Node Agents",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"require": "dist/index.cjs",
|
|
@@ -30,9 +30,9 @@
|
|
|
30
30
|
"@types/ws": "^8.5.10",
|
|
31
31
|
"tsup": "^8.3.5",
|
|
32
32
|
"typescript": "^5.0.0",
|
|
33
|
-
"@livekit/agents": "1.0.
|
|
34
|
-
"@livekit/agents-plugin-silero": "1.0.
|
|
35
|
-
"@livekit/agents-plugins-test": "1.0.
|
|
33
|
+
"@livekit/agents": "1.0.47",
|
|
34
|
+
"@livekit/agents-plugin-silero": "1.0.47",
|
|
35
|
+
"@livekit/agents-plugins-test": "1.0.47"
|
|
36
36
|
},
|
|
37
37
|
"dependencies": {
|
|
38
38
|
"@livekit/mutex": "^1.1.1",
|
|
@@ -41,7 +41,7 @@
|
|
|
41
41
|
},
|
|
42
42
|
"peerDependencies": {
|
|
43
43
|
"@livekit/rtc-node": "^0.13.24",
|
|
44
|
-
"@livekit/agents": "1.0.
|
|
44
|
+
"@livekit/agents": "1.0.47"
|
|
45
45
|
},
|
|
46
46
|
"scripts": {
|
|
47
47
|
"build": "tsup --onSuccess \"pnpm build:types\"",
|
package/src/index.ts
CHANGED
|
@@ -6,6 +6,7 @@ import { Plugin } from '@livekit/agents';
|
|
|
6
6
|
export { LLM, LLMStream, type LLMOptions } from './llm.js';
|
|
7
7
|
export * from './models.js';
|
|
8
8
|
export * as realtime from './realtime/index.js';
|
|
9
|
+
export * as responses from './responses/index.js';
|
|
9
10
|
export { STT, type STTOptions } from './stt.js';
|
|
10
11
|
export { ChunkedStream, TTS, type TTSOptions } from './tts.js';
|
|
11
12
|
|
|
@@ -13,8 +14,8 @@ class OpenAIPlugin extends Plugin {
|
|
|
13
14
|
constructor() {
|
|
14
15
|
super({
|
|
15
16
|
title: 'openai',
|
|
16
|
-
version:
|
|
17
|
-
package:
|
|
17
|
+
version: __PACKAGE_VERSION__,
|
|
18
|
+
package: __PACKAGE_NAME__,
|
|
18
19
|
});
|
|
19
20
|
}
|
|
20
21
|
}
|
|
@@ -175,6 +175,7 @@ export class RealtimeModel extends llm.RealtimeModel {
|
|
|
175
175
|
userTranscription: options.inputAudioTranscription !== null,
|
|
176
176
|
autoToolReplyGeneration: false,
|
|
177
177
|
audioOutput: modalities.includes('audio'),
|
|
178
|
+
manualFunctionCalls: true,
|
|
178
179
|
});
|
|
179
180
|
|
|
180
181
|
const isAzure = !!(options.apiVersion || options.entraToken || options.azureDeployment);
|
|
@@ -176,6 +176,7 @@ export class RealtimeModel extends llm.RealtimeModel {
|
|
|
176
176
|
userTranscription: options.inputAudioTranscription !== null,
|
|
177
177
|
autoToolReplyGeneration: false,
|
|
178
178
|
audioOutput: modalities.includes('audio'),
|
|
179
|
+
manualFunctionCalls: true,
|
|
179
180
|
});
|
|
180
181
|
|
|
181
182
|
const isAzure = !!(options.apiVersion || options.entraToken || options.azureDeployment);
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
// SPDX-FileCopyrightText: 2024 LiveKit, Inc.
|
|
2
|
+
//
|
|
3
|
+
// SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
import { llm, llmStrict } from '@livekit/agents-plugins-test';
|
|
5
|
+
import { describe } from 'vitest';
|
|
6
|
+
import { LLM } from './llm.js';
|
|
7
|
+
|
|
8
|
+
describe('OpenAI Responses', async () => {
|
|
9
|
+
await llm(
|
|
10
|
+
new LLM({
|
|
11
|
+
temperature: 0,
|
|
12
|
+
strictToolSchema: false,
|
|
13
|
+
}),
|
|
14
|
+
true,
|
|
15
|
+
);
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
describe('OpenAI Responses strict tool schema', async () => {
|
|
19
|
+
await llmStrict(
|
|
20
|
+
new LLM({
|
|
21
|
+
temperature: 0,
|
|
22
|
+
strictToolSchema: true,
|
|
23
|
+
}),
|
|
24
|
+
);
|
|
25
|
+
});
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
// SPDX-FileCopyrightText: 2025 LiveKit, Inc.
|
|
2
|
+
//
|
|
3
|
+
// SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
import type { APIConnectOptions } from '@livekit/agents';
|
|
5
|
+
import {
|
|
6
|
+
APIConnectionError,
|
|
7
|
+
APIStatusError,
|
|
8
|
+
APITimeoutError,
|
|
9
|
+
DEFAULT_API_CONNECT_OPTIONS,
|
|
10
|
+
llm,
|
|
11
|
+
toError,
|
|
12
|
+
} from '@livekit/agents';
|
|
13
|
+
import OpenAI from 'openai';
|
|
14
|
+
import type { ChatModels } from '../models.js';
|
|
15
|
+
|
|
16
|
+
interface LLMOptions {
|
|
17
|
+
model: ChatModels;
|
|
18
|
+
apiKey?: string;
|
|
19
|
+
baseURL?: string;
|
|
20
|
+
client?: OpenAI;
|
|
21
|
+
temperature?: number;
|
|
22
|
+
parallelToolCalls?: boolean;
|
|
23
|
+
toolChoice?: llm.ToolChoice;
|
|
24
|
+
store?: boolean;
|
|
25
|
+
metadata?: Record<string, string>;
|
|
26
|
+
strictToolSchema?: boolean;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const defaultLLMOptions: LLMOptions = {
|
|
30
|
+
model: 'gpt-4.1',
|
|
31
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
32
|
+
strictToolSchema: true,
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
export class LLM extends llm.LLM {
|
|
36
|
+
#client: OpenAI;
|
|
37
|
+
#opts: LLMOptions;
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Create a new instance of OpenAI Responses LLM.
|
|
41
|
+
*
|
|
42
|
+
* @remarks
|
|
43
|
+
* `apiKey` must be set to your OpenAI API key, either using the argument or by setting the
|
|
44
|
+
* `OPENAI_API_KEY` environment variable.
|
|
45
|
+
*/
|
|
46
|
+
constructor(opts: Partial<LLMOptions> = defaultLLMOptions) {
|
|
47
|
+
super();
|
|
48
|
+
|
|
49
|
+
this.#opts = { ...defaultLLMOptions, ...opts };
|
|
50
|
+
if (this.#opts.apiKey === undefined) {
|
|
51
|
+
throw new Error('OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY');
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
this.#client =
|
|
55
|
+
this.#opts.client ||
|
|
56
|
+
new OpenAI({
|
|
57
|
+
baseURL: this.#opts.baseURL,
|
|
58
|
+
apiKey: this.#opts.apiKey,
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
label(): string {
|
|
63
|
+
return 'openai.responses.LLM';
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
get model(): string {
|
|
67
|
+
return this.#opts.model;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
chat({
|
|
71
|
+
chatCtx,
|
|
72
|
+
toolCtx,
|
|
73
|
+
connOptions = DEFAULT_API_CONNECT_OPTIONS,
|
|
74
|
+
parallelToolCalls,
|
|
75
|
+
toolChoice,
|
|
76
|
+
extraKwargs,
|
|
77
|
+
}: {
|
|
78
|
+
chatCtx: llm.ChatContext;
|
|
79
|
+
toolCtx?: llm.ToolContext;
|
|
80
|
+
connOptions?: APIConnectOptions;
|
|
81
|
+
parallelToolCalls?: boolean;
|
|
82
|
+
toolChoice?: llm.ToolChoice;
|
|
83
|
+
extraKwargs?: Record<string, unknown>;
|
|
84
|
+
}): LLMStream {
|
|
85
|
+
const modelOptions: Record<string, unknown> = { ...(extraKwargs || {}) };
|
|
86
|
+
|
|
87
|
+
parallelToolCalls =
|
|
88
|
+
parallelToolCalls !== undefined ? parallelToolCalls : this.#opts.parallelToolCalls;
|
|
89
|
+
|
|
90
|
+
if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== undefined) {
|
|
91
|
+
modelOptions.parallel_tool_calls = parallelToolCalls;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
toolChoice =
|
|
95
|
+
toolChoice !== undefined ? toolChoice : (this.#opts.toolChoice as llm.ToolChoice | undefined);
|
|
96
|
+
|
|
97
|
+
if (toolChoice) {
|
|
98
|
+
modelOptions.tool_choice = toolChoice;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
if (this.#opts.temperature !== undefined) {
|
|
102
|
+
modelOptions.temperature = this.#opts.temperature;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (this.#opts.store !== undefined) {
|
|
106
|
+
modelOptions.store = this.#opts.store;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
if (this.#opts.metadata) {
|
|
110
|
+
modelOptions.metadata = this.#opts.metadata;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return new LLMStream(this, {
|
|
114
|
+
model: this.#opts.model,
|
|
115
|
+
client: this.#client,
|
|
116
|
+
chatCtx,
|
|
117
|
+
toolCtx,
|
|
118
|
+
connOptions,
|
|
119
|
+
modelOptions,
|
|
120
|
+
strictToolSchema: this.#opts.strictToolSchema ?? true,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
export class LLMStream extends llm.LLMStream {
|
|
126
|
+
private model: string | ChatModels;
|
|
127
|
+
private client: OpenAI;
|
|
128
|
+
private modelOptions: Record<string, unknown>;
|
|
129
|
+
private strictToolSchema: boolean;
|
|
130
|
+
private responseId: string;
|
|
131
|
+
|
|
132
|
+
constructor(
|
|
133
|
+
llm: LLM,
|
|
134
|
+
{
|
|
135
|
+
model,
|
|
136
|
+
client,
|
|
137
|
+
chatCtx,
|
|
138
|
+
toolCtx,
|
|
139
|
+
connOptions,
|
|
140
|
+
modelOptions,
|
|
141
|
+
strictToolSchema,
|
|
142
|
+
}: {
|
|
143
|
+
model: ChatModels;
|
|
144
|
+
client: OpenAI;
|
|
145
|
+
chatCtx: llm.ChatContext;
|
|
146
|
+
toolCtx?: llm.ToolContext;
|
|
147
|
+
connOptions: APIConnectOptions;
|
|
148
|
+
modelOptions: Record<string, unknown>;
|
|
149
|
+
strictToolSchema: boolean;
|
|
150
|
+
},
|
|
151
|
+
) {
|
|
152
|
+
super(llm, { chatCtx, toolCtx, connOptions });
|
|
153
|
+
this.model = model;
|
|
154
|
+
this.client = client;
|
|
155
|
+
this.modelOptions = modelOptions;
|
|
156
|
+
this.strictToolSchema = strictToolSchema;
|
|
157
|
+
this.responseId = '';
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
protected async run(): Promise<void> {
|
|
161
|
+
let retryable = true;
|
|
162
|
+
|
|
163
|
+
try {
|
|
164
|
+
const messages = (await this.chatCtx.toProviderFormat(
|
|
165
|
+
'openai.responses',
|
|
166
|
+
)) as OpenAI.Responses.ResponseInputItem[];
|
|
167
|
+
|
|
168
|
+
const tools = this.toolCtx
|
|
169
|
+
? Object.entries(this.toolCtx).map(([name, func]) => {
|
|
170
|
+
const oaiParams = {
|
|
171
|
+
type: 'function' as const,
|
|
172
|
+
name: name,
|
|
173
|
+
description: func.description,
|
|
174
|
+
parameters: llm.toJsonSchema(
|
|
175
|
+
func.parameters,
|
|
176
|
+
true,
|
|
177
|
+
this.strictToolSchema,
|
|
178
|
+
) as unknown as OpenAI.Responses.FunctionTool['parameters'],
|
|
179
|
+
} as OpenAI.Responses.FunctionTool;
|
|
180
|
+
|
|
181
|
+
if (this.strictToolSchema) {
|
|
182
|
+
oaiParams.strict = true;
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
return oaiParams;
|
|
186
|
+
})
|
|
187
|
+
: undefined;
|
|
188
|
+
|
|
189
|
+
const requestOptions: Record<string, unknown> = { ...this.modelOptions };
|
|
190
|
+
if (!tools) {
|
|
191
|
+
delete requestOptions.tool_choice;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
const stream = await this.client.responses.create(
|
|
195
|
+
{
|
|
196
|
+
model: this.model,
|
|
197
|
+
input: messages,
|
|
198
|
+
tools: tools,
|
|
199
|
+
stream: true,
|
|
200
|
+
...requestOptions,
|
|
201
|
+
},
|
|
202
|
+
{
|
|
203
|
+
timeout: this.connOptions.timeoutMs,
|
|
204
|
+
},
|
|
205
|
+
);
|
|
206
|
+
|
|
207
|
+
for await (const event of stream) {
|
|
208
|
+
retryable = false;
|
|
209
|
+
let chunk: llm.ChatChunk | undefined;
|
|
210
|
+
|
|
211
|
+
switch (event.type) {
|
|
212
|
+
case 'error':
|
|
213
|
+
this.handleError(event);
|
|
214
|
+
break;
|
|
215
|
+
case 'response.created':
|
|
216
|
+
this.handleResponseCreated(event);
|
|
217
|
+
break;
|
|
218
|
+
case 'response.output_item.done':
|
|
219
|
+
chunk = this.handleResponseOutputItemDone(event);
|
|
220
|
+
break;
|
|
221
|
+
case 'response.output_text.delta':
|
|
222
|
+
chunk = this.handleResponseOutputTextDelta(event);
|
|
223
|
+
break;
|
|
224
|
+
case 'response.completed':
|
|
225
|
+
chunk = this.handleResponseCompleted(event);
|
|
226
|
+
break;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
if (chunk) {
|
|
230
|
+
this.queue.put(chunk);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
} catch (error) {
|
|
234
|
+
if (
|
|
235
|
+
error instanceof APIStatusError ||
|
|
236
|
+
error instanceof APITimeoutError ||
|
|
237
|
+
error instanceof APIConnectionError
|
|
238
|
+
) {
|
|
239
|
+
throw error;
|
|
240
|
+
} else if (error instanceof OpenAI.APIConnectionTimeoutError) {
|
|
241
|
+
throw new APITimeoutError({ options: { retryable } });
|
|
242
|
+
} else if (error instanceof OpenAI.APIError) {
|
|
243
|
+
throw new APIStatusError({
|
|
244
|
+
message: error.message,
|
|
245
|
+
options: {
|
|
246
|
+
statusCode: error.status,
|
|
247
|
+
body: error.error,
|
|
248
|
+
requestId: error.requestID,
|
|
249
|
+
retryable,
|
|
250
|
+
},
|
|
251
|
+
});
|
|
252
|
+
} else {
|
|
253
|
+
throw new APIConnectionError({
|
|
254
|
+
message: toError(error).message,
|
|
255
|
+
options: { retryable },
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
private handleError(event: OpenAI.Responses.ResponseErrorEvent): void {
|
|
262
|
+
throw new APIStatusError({
|
|
263
|
+
message: event.message,
|
|
264
|
+
options: {
|
|
265
|
+
statusCode: -1,
|
|
266
|
+
retryable: false,
|
|
267
|
+
},
|
|
268
|
+
});
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
private handleResponseCreated(event: OpenAI.Responses.ResponseCreatedEvent): void {
|
|
272
|
+
this.responseId = event.response.id;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
private handleResponseOutputItemDone(
|
|
276
|
+
event: OpenAI.Responses.ResponseOutputItemDoneEvent,
|
|
277
|
+
): llm.ChatChunk | undefined {
|
|
278
|
+
let chunk: llm.ChatChunk | undefined;
|
|
279
|
+
|
|
280
|
+
if (event.item.type === 'function_call') {
|
|
281
|
+
chunk = {
|
|
282
|
+
id: this.responseId,
|
|
283
|
+
delta: {
|
|
284
|
+
role: 'assistant',
|
|
285
|
+
content: undefined,
|
|
286
|
+
toolCalls: [
|
|
287
|
+
llm.FunctionCall.create({
|
|
288
|
+
callId: event.item.call_id || '',
|
|
289
|
+
name: event.item.name,
|
|
290
|
+
args: event.item.arguments,
|
|
291
|
+
}),
|
|
292
|
+
],
|
|
293
|
+
},
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
return chunk;
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
private handleResponseOutputTextDelta(
|
|
300
|
+
event: OpenAI.Responses.ResponseTextDeltaEvent,
|
|
301
|
+
): llm.ChatChunk {
|
|
302
|
+
return {
|
|
303
|
+
id: this.responseId,
|
|
304
|
+
delta: {
|
|
305
|
+
role: 'assistant',
|
|
306
|
+
content: event.delta,
|
|
307
|
+
},
|
|
308
|
+
};
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
private handleResponseCompleted(
|
|
312
|
+
event: OpenAI.Responses.ResponseCompletedEvent,
|
|
313
|
+
): llm.ChatChunk | undefined {
|
|
314
|
+
if (event.response.usage) {
|
|
315
|
+
return {
|
|
316
|
+
id: this.responseId,
|
|
317
|
+
usage: {
|
|
318
|
+
completionTokens: event.response.usage.output_tokens,
|
|
319
|
+
promptTokens: event.response.usage.input_tokens,
|
|
320
|
+
promptCachedTokens: event.response.usage.input_tokens_details.cached_tokens,
|
|
321
|
+
totalTokens: event.response.usage.total_tokens,
|
|
322
|
+
},
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
return undefined;
|
|
326
|
+
}
|
|
327
|
+
}
|