@livekit/agents-plugin-openai 0.9.2 → 1.0.0-next.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/dist/index.cjs +16 -5
  2. package/dist/index.cjs.map +1 -1
  3. package/dist/index.d.cts +4 -4
  4. package/dist/index.d.ts +4 -4
  5. package/dist/index.d.ts.map +1 -1
  6. package/dist/index.js +14 -3
  7. package/dist/index.js.map +1 -1
  8. package/dist/llm.cjs +156 -188
  9. package/dist/llm.cjs.map +1 -1
  10. package/dist/llm.d.cts +27 -8
  11. package/dist/llm.d.ts +27 -8
  12. package/dist/llm.d.ts.map +1 -1
  13. package/dist/llm.js +164 -179
  14. package/dist/llm.js.map +1 -1
  15. package/dist/models.cjs +14 -0
  16. package/dist/models.cjs.map +1 -1
  17. package/dist/models.d.cts +11 -6
  18. package/dist/models.d.ts +11 -6
  19. package/dist/models.d.ts.map +1 -1
  20. package/dist/models.js +6 -0
  21. package/dist/models.js.map +1 -1
  22. package/dist/realtime/api_proto.cjs.map +1 -1
  23. package/dist/realtime/api_proto.d.cts +15 -0
  24. package/dist/realtime/api_proto.d.ts +15 -0
  25. package/dist/realtime/api_proto.d.ts.map +1 -1
  26. package/dist/realtime/api_proto.js.map +1 -1
  27. package/dist/realtime/realtime_model.cjs +1057 -820
  28. package/dist/realtime/realtime_model.cjs.map +1 -1
  29. package/dist/realtime/realtime_model.d.cts +126 -160
  30. package/dist/realtime/realtime_model.d.ts +126 -160
  31. package/dist/realtime/realtime_model.d.ts.map +1 -1
  32. package/dist/realtime/realtime_model.js +1067 -825
  33. package/dist/realtime/realtime_model.js.map +1 -1
  34. package/dist/tts.cjs +5 -5
  35. package/dist/tts.cjs.map +1 -1
  36. package/dist/tts.d.cts +2 -1
  37. package/dist/tts.d.ts +2 -1
  38. package/dist/tts.d.ts.map +1 -1
  39. package/dist/tts.js +6 -6
  40. package/dist/tts.js.map +1 -1
  41. package/package.json +9 -7
  42. package/src/index.ts +19 -5
  43. package/src/llm.ts +227 -218
  44. package/src/models.ts +83 -5
  45. package/src/realtime/api_proto.ts +15 -1
  46. package/src/realtime/realtime_model.ts +1305 -996
  47. package/src/tts.ts +6 -6
package/dist/llm.js CHANGED
@@ -1,18 +1,25 @@
1
- import { llm, log } from "@livekit/agents";
2
- import { randomUUID } from "node:crypto";
1
+ import {
2
+ APIConnectionError,
3
+ APIStatusError,
4
+ APITimeoutError,
5
+ DEFAULT_API_CONNECT_OPTIONS,
6
+ llm,
7
+ toError
8
+ } from "@livekit/agents";
3
9
  import { AzureOpenAI, OpenAI } from "openai";
4
- import sharp from "sharp";
5
10
  const defaultLLMOptions = {
6
- model: "gpt-4o",
7
- apiKey: process.env.OPENAI_API_KEY
11
+ model: "gpt-4.1",
12
+ apiKey: process.env.OPENAI_API_KEY,
13
+ parallelToolCalls: true
8
14
  };
9
15
  const defaultAzureLLMOptions = {
10
- model: "gpt-4o",
16
+ model: "gpt-4.1",
11
17
  apiKey: process.env.AZURE_API_KEY
12
18
  };
13
19
  class LLM extends llm.LLM {
14
20
  #opts;
15
21
  #client;
22
+ #providerFmt;
16
23
  /**
17
24
  * Create a new instance of OpenAI LLM.
18
25
  *
@@ -20,9 +27,10 @@ class LLM extends llm.LLM {
20
27
  * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the
21
28
  * `OPENAI_API_KEY` environmental variable.
22
29
  */
23
- constructor(opts = defaultLLMOptions) {
30
+ constructor(opts = defaultLLMOptions, providerFmt = "openai") {
24
31
  super();
25
32
  this.#opts = { ...defaultLLMOptions, ...opts };
33
+ this.#providerFmt = providerFmt;
26
34
  if (this.#opts.apiKey === void 0) {
27
35
  throw new Error("OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY");
28
36
  }
@@ -31,6 +39,12 @@ class LLM extends llm.LLM {
31
39
  apiKey: opts.apiKey
32
40
  });
33
41
  }
42
+ label() {
43
+ return "openai.LLM";
44
+ }
45
+ get model() {
46
+ return this.#opts.model;
47
+ }
34
48
  /**
35
49
  * Create a new instance of OpenAI LLM with Azure.
36
50
  *
@@ -255,80 +269,142 @@ class LLM extends llm.LLM {
255
269
  }
256
270
  chat({
257
271
  chatCtx,
258
- fncCtx,
259
- temperature,
260
- n,
261
- parallelToolCalls
272
+ toolCtx,
273
+ connOptions = DEFAULT_API_CONNECT_OPTIONS,
274
+ parallelToolCalls,
275
+ toolChoice,
276
+ extraKwargs
262
277
  }) {
263
- temperature = temperature || this.#opts.temperature;
264
- return new LLMStream(
265
- this,
266
- this.#client,
278
+ const extras = { ...extraKwargs };
279
+ if (this.#opts.metadata) {
280
+ extras.metadata = this.#opts.metadata;
281
+ }
282
+ if (this.#opts.user) {
283
+ extras.user = this.#opts.user;
284
+ }
285
+ if (this.#opts.maxCompletionTokens) {
286
+ extras.max_completion_tokens = this.#opts.maxCompletionTokens;
287
+ }
288
+ if (this.#opts.temperature) {
289
+ extras.temperature = this.#opts.temperature;
290
+ }
291
+ if (this.#opts.serviceTier) {
292
+ extras.service_tier = this.#opts.serviceTier;
293
+ }
294
+ if (this.#opts.store !== void 0) {
295
+ extras.store = this.#opts.store;
296
+ }
297
+ parallelToolCalls = parallelToolCalls !== void 0 ? parallelToolCalls : this.#opts.parallelToolCalls;
298
+ if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== void 0) {
299
+ extras.parallel_tool_calls = parallelToolCalls;
300
+ }
301
+ toolChoice = toolChoice !== void 0 ? toolChoice : this.#opts.toolChoice;
302
+ if (toolChoice) {
303
+ extras.tool_choice = toolChoice;
304
+ }
305
+ return new LLMStream(this, {
306
+ model: this.#opts.model,
307
+ providerFmt: this.#providerFmt,
308
+ client: this.#client,
267
309
  chatCtx,
268
- fncCtx,
269
- this.#opts,
270
- parallelToolCalls,
271
- temperature,
272
- n
273
- );
310
+ toolCtx,
311
+ connOptions,
312
+ extraKwargs: extras
313
+ });
274
314
  }
275
315
  }
276
316
  class LLMStream extends llm.LLMStream {
277
317
  #toolCallId;
278
318
  #fncName;
279
319
  #fncRawArguments;
320
+ #toolIndex;
280
321
  #client;
281
- #logger = log();
282
- #id = randomUUID();
283
- label = "openai.LLMStream";
284
- constructor(llm2, client, chatCtx, fncCtx, opts, parallelToolCalls, temperature, n) {
285
- super(llm2, chatCtx, fncCtx);
322
+ #providerFmt;
323
+ #extraKwargs;
324
+ model;
325
+ constructor(llm2, {
326
+ model,
327
+ providerFmt,
328
+ client,
329
+ chatCtx,
330
+ toolCtx,
331
+ connOptions,
332
+ extraKwargs
333
+ }) {
334
+ super(llm2, { chatCtx, toolCtx, connOptions });
286
335
  this.#client = client;
287
- this.#run(opts, n, parallelToolCalls, temperature);
336
+ this.#providerFmt = providerFmt;
337
+ this.#extraKwargs = extraKwargs;
338
+ this.model = model;
288
339
  }
289
- async #run(opts, n, parallelToolCalls, temperature) {
290
- const tools = this.fncCtx ? Object.entries(this.fncCtx).map(([name, func]) => ({
291
- type: "function",
292
- function: {
293
- name,
294
- description: func.description,
295
- // don't format parameters if they are raw openai params
296
- parameters: func.parameters.type == "object" ? func.parameters : llm.oaiParams(func.parameters)
297
- }
298
- })) : void 0;
340
+ async run() {
341
+ var _a;
342
+ let retryable = true;
299
343
  try {
344
+ const messages = await this.chatCtx.toProviderFormat(
345
+ this.#providerFmt
346
+ );
347
+ const tools = this.toolCtx ? Object.entries(this.toolCtx).map(([name, func]) => ({
348
+ type: "function",
349
+ function: {
350
+ name,
351
+ description: func.description,
352
+ parameters: llm.toJsonSchema(
353
+ func.parameters
354
+ )
355
+ }
356
+ })) : void 0;
300
357
  const stream = await this.#client.chat.completions.create({
301
- model: opts.model,
302
- user: opts.user,
303
- n,
304
- messages: await Promise.all(
305
- this.chatCtx.messages.map(async (m) => await buildMessage(m, this.#id))
306
- ),
307
- temperature: temperature || opts.temperature,
308
- stream_options: { include_usage: true },
309
- stream: true,
358
+ model: this.model,
359
+ messages,
310
360
  tools,
311
- parallel_tool_calls: this.fncCtx && parallelToolCalls
361
+ stream: true,
362
+ stream_options: { include_usage: true },
363
+ ...this.#extraKwargs
312
364
  });
313
365
  for await (const chunk of stream) {
314
366
  for (const choice of chunk.choices) {
367
+ if (this.abortController.signal.aborted) {
368
+ break;
369
+ }
315
370
  const chatChunk = this.#parseChoice(chunk.id, choice);
316
371
  if (chatChunk) {
372
+ retryable = false;
317
373
  this.queue.put(chatChunk);
318
374
  }
319
- if (chunk.usage) {
320
- const usage = chunk.usage;
321
- this.queue.put({
322
- requestId: chunk.id,
323
- choices: [],
324
- usage: {
325
- completionTokens: usage.completion_tokens,
326
- promptTokens: usage.prompt_tokens,
327
- totalTokens: usage.total_tokens
328
- }
329
- });
330
- }
331
375
  }
376
+ if (chunk.usage) {
377
+ const usage = chunk.usage;
378
+ retryable = false;
379
+ this.queue.put({
380
+ id: chunk.id,
381
+ usage: {
382
+ completionTokens: usage.completion_tokens,
383
+ promptTokens: usage.prompt_tokens,
384
+ promptCachedTokens: ((_a = usage.prompt_tokens_details) == null ? void 0 : _a.cached_tokens) || 0,
385
+ totalTokens: usage.total_tokens
386
+ }
387
+ });
388
+ }
389
+ }
390
+ } catch (error) {
391
+ if (error instanceof OpenAI.APIConnectionTimeoutError) {
392
+ throw new APITimeoutError({ options: { retryable } });
393
+ } else if (error instanceof OpenAI.APIError) {
394
+ throw new APIStatusError({
395
+ message: error.message,
396
+ options: {
397
+ statusCode: error.status,
398
+ body: error.error,
399
+ requestId: error.request_id,
400
+ retryable
401
+ }
402
+ });
403
+ } else {
404
+ throw new APIConnectionError({
405
+ message: toError(error).message,
406
+ options: { retryable }
407
+ });
332
408
  }
333
409
  } finally {
334
410
  this.queue.close();
@@ -336,154 +412,63 @@ class LLMStream extends llm.LLMStream {
336
412
  }
337
413
  #parseChoice(id, choice) {
338
414
  const delta = choice.delta;
415
+ if (delta === void 0) return void 0;
339
416
  if (delta.tool_calls) {
340
417
  for (const tool of delta.tool_calls) {
341
418
  if (!tool.function) {
342
419
  continue;
343
420
  }
344
421
  let callChunk;
345
- if (this.#toolCallId && tool.id && tool.id !== this.#toolCallId) {
346
- callChunk = this.#tryBuildFunction(id, choice);
422
+ if (this.#toolCallId && tool.id && tool.index !== this.#toolIndex) {
423
+ callChunk = this.#createRunningToolCallChunk(id, delta);
424
+ this.#toolCallId = this.#fncName = this.#fncRawArguments = void 0;
347
425
  }
348
426
  if (tool.function.name) {
427
+ this.#toolIndex = tool.index;
349
428
  this.#toolCallId = tool.id;
350
429
  this.#fncName = tool.function.name;
351
430
  this.#fncRawArguments = tool.function.arguments || "";
352
431
  } else if (tool.function.arguments) {
353
- this.#fncRawArguments += tool.function.arguments;
432
+ this.#fncRawArguments = (this.#fncRawArguments || "") + tool.function.arguments;
354
433
  }
355
434
  if (callChunk) {
356
435
  return callChunk;
357
436
  }
358
437
  }
359
438
  }
360
- if (choice.finish_reason && ["tool_calls", "stop"].includes(choice.finish_reason) && this.#toolCallId) {
361
- return this.#tryBuildFunction(id, choice);
439
+ if (choice.finish_reason && ["tool_calls", "stop"].includes(choice.finish_reason) && this.#toolCallId !== void 0) {
440
+ const callChunk = this.#createRunningToolCallChunk(id, delta);
441
+ this.#toolCallId = this.#fncName = this.#fncRawArguments = void 0;
442
+ return callChunk;
362
443
  }
363
- return {
364
- requestId: id,
365
- choices: [
366
- {
367
- delta: { content: delta.content || void 0, role: llm.ChatRole.ASSISTANT },
368
- index: choice.index
369
- }
370
- ]
371
- };
372
- }
373
- #tryBuildFunction(id, choice) {
374
- if (!this.fncCtx) {
375
- this.#logger.warn("oai stream tried to run function without function context");
444
+ if (!delta.content) {
376
445
  return void 0;
377
446
  }
378
- if (!this.#toolCallId) {
379
- this.#logger.warn("oai stream tried to run function but toolCallId is not set");
380
- return void 0;
381
- }
382
- if (!this.#fncRawArguments || !this.#fncName) {
383
- this.#logger.warn("oai stream tried to run function but rawArguments or fncName are not set");
384
- return void 0;
385
- }
386
- const functionInfo = llm.oaiBuildFunctionInfo(
387
- this.fncCtx,
388
- this.#toolCallId,
389
- this.#fncName,
390
- this.#fncRawArguments
391
- );
392
- this.#toolCallId = this.#fncName = this.#fncRawArguments = void 0;
393
- this._functionCalls.push(functionInfo);
394
447
  return {
395
- requestId: id,
396
- choices: [
397
- {
398
- delta: {
399
- content: choice.delta.content || void 0,
400
- role: llm.ChatRole.ASSISTANT,
401
- toolCalls: this._functionCalls
402
- },
403
- index: choice.index
404
- }
405
- ]
406
- };
407
- }
408
- }
409
- const buildMessage = async (msg, cacheKey) => {
410
- const oaiMsg = {};
411
- switch (msg.role) {
412
- case llm.ChatRole.SYSTEM:
413
- oaiMsg.role = "system";
414
- break;
415
- case llm.ChatRole.USER:
416
- oaiMsg.role = "user";
417
- break;
418
- case llm.ChatRole.ASSISTANT:
419
- oaiMsg.role = "assistant";
420
- break;
421
- case llm.ChatRole.TOOL:
422
- oaiMsg.role = "tool";
423
- if (oaiMsg.role === "tool") {
424
- oaiMsg.tool_call_id = msg.toolCallId;
425
- }
426
- break;
427
- }
428
- if (typeof msg.content === "string") {
429
- oaiMsg.content = msg.content;
430
- } else if (Array.isArray(msg.content)) {
431
- oaiMsg.content = await Promise.all(
432
- msg.content.map(async (c) => {
433
- if (typeof c === "string") {
434
- return { type: "text", text: c };
435
- } else if (
436
- // typescript type guard for determining ChatAudio vs ChatImage
437
- ((c2) => {
438
- return c2.image !== void 0;
439
- })(c)
440
- ) {
441
- return await buildImageContent(c, cacheKey);
442
- } else {
443
- throw new Error("ChatAudio is not supported");
444
- }
445
- })
446
- );
447
- } else if (msg.content === void 0) {
448
- oaiMsg.content = "";
449
- }
450
- if (msg.toolCalls && oaiMsg.role === "assistant") {
451
- oaiMsg.tool_calls = Object.entries(msg.toolCalls).map(([name, func]) => ({
452
- id: func.toolCallId,
453
- type: "function",
454
- function: {
455
- name,
456
- arguments: func.rawParams
457
- }
458
- }));
459
- }
460
- return oaiMsg;
461
- };
462
- const buildImageContent = async (image, cacheKey) => {
463
- if (typeof image.image === "string") {
464
- return {
465
- type: "image_url",
466
- image_url: {
467
- url: image.image,
468
- detail: "auto"
448
+ id,
449
+ delta: {
450
+ role: "assistant",
451
+ content: delta.content
469
452
  }
470
453
  };
471
- } else {
472
- if (!image.cache[cacheKey]) {
473
- let encoded = sharp(image.image.data);
474
- if (image.inferenceHeight && image.inferenceHeight) {
475
- encoded = encoded.resize(image.inferenceWidth, image.inferenceHeight);
476
- }
477
- image.cache[cacheKey] = await encoded.jpeg().toBuffer().then((buffer) => buffer.toString("utf-8"));
478
- }
454
+ }
455
+ #createRunningToolCallChunk(id, delta) {
479
456
  return {
480
- type: "image_url",
481
- image_url: {
482
- url: `data:image/jpeg;base64,${image.cache[cacheKey]}`
457
+ id,
458
+ delta: {
459
+ role: "assistant",
460
+ content: delta.content || void 0,
461
+ toolCalls: [
462
+ llm.FunctionCall.create({
463
+ callId: this.#toolCallId,
464
+ name: this.#fncName || "",
465
+ args: this.#fncRawArguments || ""
466
+ })
467
+ ]
483
468
  }
484
469
  };
485
470
  }
486
- };
471
+ }
487
472
  export {
488
473
  LLM,
489
474
  LLMStream
package/dist/llm.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/llm.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2024 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport { llm, log } from '@livekit/agents';\nimport { randomUUID } from 'node:crypto';\nimport { AzureOpenAI, OpenAI } from 'openai';\nimport sharp from 'sharp';\nimport type {\n CerebrasChatModels,\n ChatModels,\n DeepSeekChatModels,\n GroqChatModels,\n MetaChatModels,\n OctoChatModels,\n PerplexityChatModels,\n TelnyxChatModels,\n TogetherChatModels,\n XAIChatModels,\n} from './models.js';\n\nexport interface LLMOptions {\n model: string | ChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client?: OpenAI;\n}\n\nconst defaultLLMOptions: LLMOptions = {\n model: 'gpt-4o',\n apiKey: process.env.OPENAI_API_KEY,\n};\n\nconst defaultAzureLLMOptions: LLMOptions = {\n model: 'gpt-4o',\n apiKey: process.env.AZURE_API_KEY,\n};\n\nexport class LLM extends llm.LLM {\n #opts: LLMOptions;\n #client: OpenAI;\n\n /**\n * Create a new instance of OpenAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the\n * `OPENAI_API_KEY` environmental variable.\n */\n constructor(opts: Partial<LLMOptions> = defaultLLMOptions) {\n super();\n\n this.#opts = { ...defaultLLMOptions, ...opts };\n if (this.#opts.apiKey === undefined) {\n throw new Error('OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY');\n }\n\n this.#client =\n this.#opts.client ||\n new OpenAI({\n baseURL: opts.baseURL,\n apiKey: opts.apiKey,\n });\n }\n\n /**\n * Create a new instance of OpenAI LLM with Azure.\n *\n * @remarks\n * This automatically infers the following arguments from their corresponding environment variables if they are not provided:\n * - `apiKey` from `AZURE_OPENAI_API_KEY`\n * - `organization` from `OPENAI_ORG_ID`\n * - `project` from `OPENAI_PROJECT_ID`\n * - `azureAdToken` from `AZURE_OPENAI_AD_TOKEN`\n * - `apiVersion` from `OPENAI_API_VERSION`\n * - `azureEndpoint` from `AZURE_OPENAI_ENDPOINT`\n */\n static withAzure(\n opts: {\n model: string | ChatModels;\n azureEndpoint?: string;\n azureDeployment?: string;\n apiVersion?: string;\n apiKey?: string;\n azureAdToken?: string;\n azureAdTokenProvider?: () => Promise<string>;\n organization?: string;\n project?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n } = defaultAzureLLMOptions,\n ): LLM {\n opts = { ...defaultLLMOptions, ...opts };\n if (opts.apiKey === undefined) {\n throw new Error('Azure API key is required, whether as an argument or as $AZURE_API_KEY');\n }\n\n return new LLM({\n temperature: opts.temperature,\n user: opts.user,\n client: new AzureOpenAI(opts),\n });\n }\n\n /**\n * Create a new instance of Cerebras LLM.\n *\n * @remarks\n * `apiKey` must be set to your Cerebras API key, either using the argument or by setting the\n * `CEREBRAS_API_KEY` environmental variable.\n */\n static withCerebras(\n opts: Partial<{\n model: string | CerebrasChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.CEREBRAS_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'Cerebras API key is required, whether as an argument or as $CEREBRAS_API_KEY',\n );\n }\n\n return new LLM({\n model: 'llama3.1-8b',\n baseURL: 'https://api.cerebras.ai/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Fireworks LLM.\n *\n * @remarks\n * `apiKey` must be set to your Fireworks API key, either using the argument or by setting the\n * `FIREWORKS_API_KEY` environmental variable.\n */\n static withFireworks(opts: Partial<LLMOptions> = {}): LLM {\n opts.apiKey = opts.apiKey || process.env.FIREWORKS_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'Fireworks API key is required, whether as an argument or as $FIREWORKS_API_KEY',\n );\n }\n\n return new LLM({\n model: 'accounts/fireworks/models/llama-v3p1-70b-instruct',\n baseURL: 'https://api.fireworks.ai/inference/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of xAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your xAI API key, either using the argument or by setting the\n * `XAI_API_KEY` environmental variable.\n */\n static withXAI(\n opts: Partial<{\n model: string | XAIChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.XAI_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error('xAI API key is required, whether as an argument or as $XAI_API_KEY');\n }\n\n return new LLM({\n model: 'grok-2-public',\n baseURL: 'https://api.x.ai/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Groq LLM.\n *\n * @remarks\n * `apiKey` must be set to your Groq API key, either using the argument or by setting the\n * `GROQ_API_KEY` environmental variable.\n */\n static withGroq(\n opts: Partial<{\n model: string | GroqChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.GROQ_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error('Groq API key is required, whether as an argument or as $GROQ_API_KEY');\n }\n\n return new LLM({\n model: 'llama3-8b-8192',\n baseURL: 'https://api.groq.com/openai/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of DeepSeek LLM.\n *\n * @remarks\n * `apiKey` must be set to your DeepSeek API key, either using the argument or by setting the\n * `DEEPSEEK_API_KEY` environmental variable.\n */\n static withDeepSeek(\n opts: Partial<{\n model: string | DeepSeekChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.DEEPSEEK_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'DeepSeek API key is required, whether as an argument or as $DEEPSEEK_API_KEY',\n );\n }\n\n return new LLM({\n model: 'deepseek-chat',\n baseURL: 'https://api.deepseek.com/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of OctoAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your OctoAI API key, either using the argument or by setting the\n * `OCTOAI_TOKEN` environmental variable.\n */\n static withOcto(\n opts: Partial<{\n model: string | OctoChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.OCTOAI_TOKEN;\n if (opts.apiKey === undefined) {\n throw new Error('OctoAI API key is required, whether as an argument or as $OCTOAI_TOKEN');\n }\n\n return new LLM({\n model: 'llama-2-13b-chat',\n baseURL: 'https://text.octoai.run/v1',\n ...opts,\n });\n }\n\n /** Create a new instance of Ollama LLM. */\n static withOllama(\n opts: Partial<{\n model: string;\n baseURL?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n return new LLM({\n model: 'llama-2-13b-chat',\n baseURL: 'https://text.octoai.run/v1',\n apiKey: 'ollama',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of PerplexityAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your PerplexityAI API key, either using the argument or by setting the\n * `PERPLEXITY_API_KEY` environmental variable.\n */\n static withPerplexity(\n opts: Partial<{\n model: string | PerplexityChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.PERPLEXITY_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'PerplexityAI API key is required, whether as an argument or as $PERPLEXITY_API_KEY',\n );\n }\n\n return new LLM({\n model: 'llama-3.1-sonar-small-128k-chat',\n baseURL: 'https://api.perplexity.ai',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of TogetherAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your TogetherAI API key, either using the argument or by setting the\n * `TOGETHER_API_KEY` environmental variable.\n */\n static withTogether(\n opts: Partial<{\n model: string | TogetherChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.TOGETHER_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'TogetherAI API key is required, whether as an argument or as $TOGETHER_API_KEY',\n );\n }\n\n return new LLM({\n model: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',\n baseURL: 'https://api.together.xyz/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Telnyx LLM.\n *\n * @remarks\n * `apiKey` must be set to your Telnyx API key, either using the argument or by setting the\n * `TELNYX_API_KEY` environmental variable.\n */\n static withTelnyx(\n opts: Partial<{\n model: string | TelnyxChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.TELNYX_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error('Telnyx API key is required, whether as an argument or as $TELNYX_API_KEY');\n }\n\n return new LLM({\n model: 'meta-llama/Meta-Llama-3.1-70B-Instruct',\n baseURL: 'https://api.telnyx.com/v2/ai',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Meta Llama LLM.\n *\n * @remarks\n * `apiKey` must be set to your Meta Llama API key, either using the argument or by setting the\n * `LLAMA_API_KEY` environmental variable.\n */\n static withMeta(\n opts: Partial<{\n apiKey?: string;\n baseURL?: string;\n client?: OpenAI;\n model?: string | MetaChatModels;\n temperature?: number;\n user?: string;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.LLAMA_API_KEY;\n opts.baseURL = opts.baseURL || 'https://api.llama.com/compat/v1/';\n opts.model = opts.model || 'Llama-4-Maverick-17B-128E-Instruct-FP8';\n\n if (opts.apiKey === undefined) {\n throw new Error(\n 'Meta Llama API key is required, either as argument or set LLAMA_API_KEY environmental variable',\n );\n }\n\n return new LLM(opts);\n }\n\n chat({\n chatCtx,\n fncCtx,\n temperature,\n n,\n parallelToolCalls,\n }: {\n chatCtx: llm.ChatContext;\n fncCtx?: llm.FunctionContext | undefined;\n temperature?: number | undefined;\n n?: number | undefined;\n parallelToolCalls?: boolean | undefined;\n }): LLMStream {\n temperature = temperature || this.#opts.temperature;\n\n return new LLMStream(\n this,\n this.#client,\n chatCtx,\n fncCtx,\n this.#opts,\n parallelToolCalls,\n temperature,\n n,\n );\n }\n}\n\nexport class LLMStream extends llm.LLMStream {\n #toolCallId?: string;\n #fncName?: string;\n #fncRawArguments?: string;\n #client: OpenAI;\n #logger = log();\n #id = randomUUID();\n label = 'openai.LLMStream';\n\n constructor(\n llm: LLM,\n client: OpenAI,\n chatCtx: llm.ChatContext,\n fncCtx: llm.FunctionContext | undefined,\n opts: LLMOptions,\n parallelToolCalls?: boolean,\n temperature?: number,\n n?: number,\n ) {\n super(llm, chatCtx, fncCtx);\n this.#client = client;\n this.#run(opts, n, parallelToolCalls, temperature);\n }\n\n async #run(opts: LLMOptions, n?: number, parallelToolCalls?: boolean, temperature?: number) {\n const tools = this.fncCtx\n ? Object.entries(this.fncCtx).map(([name, func]) => ({\n type: 'function' as const,\n function: {\n name,\n description: func.description,\n // don't format parameters if they are raw openai params\n parameters:\n func.parameters.type == ('object' as const)\n ? func.parameters\n : llm.oaiParams(func.parameters),\n },\n }))\n : undefined;\n\n try {\n const stream = await this.#client.chat.completions.create({\n model: opts.model,\n user: opts.user,\n n,\n messages: await Promise.all(\n this.chatCtx.messages.map(async (m) => await buildMessage(m, this.#id)),\n ),\n temperature: temperature || opts.temperature,\n stream_options: { include_usage: true },\n stream: true,\n tools,\n parallel_tool_calls: this.fncCtx && parallelToolCalls,\n });\n\n for await (const chunk of stream) {\n for (const choice of chunk.choices) {\n const chatChunk = this.#parseChoice(chunk.id, choice);\n if (chatChunk) {\n this.queue.put(chatChunk);\n }\n\n if (chunk.usage) {\n const usage = chunk.usage;\n this.queue.put({\n requestId: chunk.id,\n choices: [],\n usage: {\n completionTokens: usage.completion_tokens,\n promptTokens: usage.prompt_tokens,\n totalTokens: usage.total_tokens,\n },\n });\n }\n }\n }\n } finally {\n this.queue.close();\n }\n }\n\n #parseChoice(id: string, choice: OpenAI.ChatCompletionChunk.Choice): llm.ChatChunk | undefined {\n const delta = choice.delta;\n\n if (delta.tool_calls) {\n // check if we have functions to calls\n for (const tool of delta.tool_calls) {\n if (!tool.function) {\n continue; // oai may add other tools in the future\n }\n\n let callChunk: llm.ChatChunk | undefined;\n if (this.#toolCallId && tool.id && tool.id !== this.#toolCallId) {\n callChunk = this.#tryBuildFunction(id, choice);\n }\n\n if (tool.function.name) {\n this.#toolCallId = tool.id;\n this.#fncName = tool.function.name;\n this.#fncRawArguments = tool.function.arguments || '';\n } else if (tool.function.arguments) {\n this.#fncRawArguments += tool.function.arguments;\n }\n\n if (callChunk) {\n return callChunk;\n }\n }\n }\n\n if (\n choice.finish_reason &&\n ['tool_calls', 'stop'].includes(choice.finish_reason) &&\n this.#toolCallId\n ) {\n // we're done with the tool calls, run the last one\n return this.#tryBuildFunction(id, choice);\n }\n\n return {\n requestId: id,\n choices: [\n {\n delta: { content: delta.content || undefined, role: llm.ChatRole.ASSISTANT },\n index: choice.index,\n },\n ],\n };\n }\n\n #tryBuildFunction(\n id: string,\n choice: OpenAI.ChatCompletionChunk.Choice,\n ): llm.ChatChunk | undefined {\n if (!this.fncCtx) {\n this.#logger.warn('oai stream tried to run function without function context');\n return undefined;\n }\n\n if (!this.#toolCallId) {\n this.#logger.warn('oai stream tried to run function but toolCallId is not set');\n return undefined;\n }\n\n if (!this.#fncRawArguments || !this.#fncName) {\n this.#logger.warn('oai stream tried to run function but rawArguments or fncName are not set');\n return undefined;\n }\n\n const functionInfo = llm.oaiBuildFunctionInfo(\n this.fncCtx,\n this.#toolCallId,\n this.#fncName,\n this.#fncRawArguments,\n );\n this.#toolCallId = this.#fncName = this.#fncRawArguments = undefined;\n this._functionCalls.push(functionInfo);\n\n return {\n requestId: id,\n choices: [\n {\n delta: {\n content: choice.delta.content || undefined,\n role: llm.ChatRole.ASSISTANT,\n toolCalls: this._functionCalls,\n },\n index: choice.index,\n },\n ],\n };\n }\n}\n\nconst buildMessage = async (msg: llm.ChatMessage, cacheKey: any) => {\n const oaiMsg: Partial<OpenAI.ChatCompletionMessageParam> = {};\n\n switch (msg.role) {\n case llm.ChatRole.SYSTEM:\n oaiMsg.role = 'system';\n break;\n case llm.ChatRole.USER:\n oaiMsg.role = 'user';\n break;\n case llm.ChatRole.ASSISTANT:\n oaiMsg.role = 'assistant';\n break;\n case llm.ChatRole.TOOL:\n oaiMsg.role = 'tool';\n if (oaiMsg.role === 'tool') {\n oaiMsg.tool_call_id = msg.toolCallId;\n }\n break;\n }\n\n if (typeof msg.content === 'string') {\n oaiMsg.content = msg.content;\n } else if (Array.isArray(msg.content)) {\n oaiMsg.content = (await Promise.all(\n msg.content.map(async (c) => {\n if (typeof c === 'string') {\n return { type: 'text', text: c };\n } else if (\n // typescript type guard for determining ChatAudio vs ChatImage\n ((c: llm.ChatAudio | llm.ChatImage): c is llm.ChatImage => {\n return (c as llm.ChatImage).image !== undefined;\n })(c)\n ) {\n return await buildImageContent(c, cacheKey);\n } else {\n throw new Error('ChatAudio is not supported');\n }\n }),\n )) as OpenAI.ChatCompletionContentPart[];\n } else if (msg.content === undefined) {\n oaiMsg.content = '';\n }\n\n // make sure to provide when function has been called inside the context\n // (+ raw_arguments)\n if (msg.toolCalls && oaiMsg.role === 'assistant') {\n oaiMsg.tool_calls = Object.entries(msg.toolCalls).map(([name, func]) => ({\n id: func.toolCallId,\n type: 'function' as const,\n function: {\n name: name,\n arguments: func.rawParams,\n },\n }));\n }\n\n return oaiMsg as OpenAI.ChatCompletionMessageParam;\n};\n\nconst buildImageContent = async (image: llm.ChatImage, cacheKey: any) => {\n if (typeof image.image === 'string') {\n // image url\n return {\n type: 'image_url',\n image_url: {\n url: image.image,\n detail: 'auto',\n },\n };\n } else {\n if (!image.cache[cacheKey]) {\n // inside our internal implementation, we allow to put extra metadata to\n // each ChatImage (avoid to reencode each time we do a chatcompletion request)\n let encoded = sharp(image.image.data);\n\n if (image.inferenceHeight && image.inferenceHeight) {\n encoded = encoded.resize(image.inferenceWidth, image.inferenceHeight);\n }\n\n image.cache[cacheKey] = await encoded\n .jpeg()\n .toBuffer()\n .then((buffer) => buffer.toString('utf-8'));\n }\n\n return {\n type: 'image_url',\n image_url: {\n url: `data:image/jpeg;base64,${image.cache[cacheKey]}`,\n },\n };\n }\n};\n"],"mappings":"AAGA,SAAS,KAAK,WAAW;AACzB,SAAS,kBAAkB;AAC3B,SAAS,aAAa,cAAc;AACpC,OAAO,WAAW;AAuBlB,MAAM,oBAAgC;AAAA,EACpC,OAAO;AAAA,EACP,QAAQ,QAAQ,IAAI;AACtB;AAEA,MAAM,yBAAqC;AAAA,EACzC,OAAO;AAAA,EACP,QAAQ,QAAQ,IAAI;AACtB;AAEO,MAAM,YAAY,IAAI,IAAI;AAAA,EAC/B;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,YAAY,OAA4B,mBAAmB;AACzD,UAAM;AAEN,SAAK,QAAQ,EAAE,GAAG,mBAAmB,GAAG,KAAK;AAC7C,QAAI,KAAK,MAAM,WAAW,QAAW;AACnC,YAAM,IAAI,MAAM,0EAA0E;AAAA,IAC5F;AAEA,SAAK,UACH,KAAK,MAAM,UACX,IAAI,OAAO;AAAA,MACT,SAAS,KAAK;AAAA,MACd,QAAQ,KAAK;AAAA,IACf,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,UACL,OAaI,wBACC;AACL,WAAO,EAAE,GAAG,mBAAmB,GAAG,KAAK;AACvC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,wEAAwE;AAAA,IAC1F;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,aAAa,KAAK;AAAA,MAClB,MAAM,KAAK;AAAA,MACX,QAAQ,IAAI,YAAY,IAAI;AAAA,IAC9B,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,aACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,cAAc,OAA4B,CAAC,GAAQ;AACxD,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,QACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,oEAAoE;AAAA,IACtF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,SACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,sEAAsE;AAAA,IACxF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,aACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,SACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,wEAAwE;AAAA,IAC1F;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA,EAGA,OAAO,WACL,OAKK,CAAC,GACD;AACL,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,QAAQ;AAAA,MACR,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,eACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,aACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,WACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,0EAA0E;AAAA,IAC5F;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,SACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,SAAK,UAAU,KAAK,WAAW;AAC/B,SAAK,QAAQ,KAAK,SAAS;AAE3B,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI,IAAI;AAAA,EACrB;AAAA,EAEA,KAAK;AAAA,IACH;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAMc;AACZ,kBAAc,eAAe,KAAK,MAAM;AAExC,WAAO,IAAI;AAAA,MACT;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AACF;AAEO,MAAM,kBAAkB,IAAI,UAAU;AAAA,EAC3C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,UAAU,IAAI;AAAA,EACd,MAAM,WAAW;AAAA,EACjB,QAAQ;AAAA,EAER,YACEA,MACA,QACA,SACA,QACA,MACA,mBACA,aACA,GACA;AACA,UAAMA,MAAK,SAAS,MAAM;AAC1B,SAAK,UAAU;AACf,SAAK,KAAK,MAAM,GAAG,mBAAmB,WAAW;AAAA,EACnD;AAAA,EAEA,MAAM,KAAK,MAAkB,GAAY,mBAA6B,aAAsB;AAC1F,UAAM,QAAQ,KAAK,SACf,OAAO,QAAQ,KAAK,MAAM,EAAE,IAAI,CAAC,CAAC,MAAM,IAAI,OAAO;AAAA,MACjD,MAAM;AAAA,MACN,UAAU;AAAA,QACR;AAAA,QACA,aAAa,KAAK;AAAA;AAAA,QAElB,YACE,KAAK,WAAW,QAAS,WACrB,KAAK,aACL,IAAI,UAAU,KAAK,UAAU;AAAA,MACrC;AAAA,IACF,EAAE,IACF;AAEJ,QAAI;AACF,YAAM,SAAS,MAAM,KAAK,QAAQ,KAAK,YAAY,OAAO;AAAA,QACxD,OAAO,KAAK;AAAA,QACZ,MAAM,KAAK;AAAA,QACX;AAAA,QACA,UAAU,MAAM,QAAQ;AAAA,UACtB,KAAK,QAAQ,SAAS,IAAI,OAAO,MAAM,MAAM,aAAa,GAAG,KAAK,GAAG,CAAC;AAAA,QACxE;AAAA,QACA,aAAa,eAAe,KAAK;AAAA,QACjC,gBAAgB,EAAE,eAAe,KAAK;AAAA,QACtC,QAAQ;AAAA,QACR;AAAA,QACA,qBAAqB,KAAK,UAAU;AAAA,MACtC,CAAC;AAED,uBAAiB,SAAS,QAAQ;AAChC,mBAAW,UAAU,MAAM,SAAS;AAClC,gBAAM,YAAY,KAAK,aAAa,MAAM,IAAI,MAAM;AACpD,cAAI,WAAW;AACb,iBAAK,MAAM,IAAI,SAAS;AAAA,UAC1B;AAEA,cAAI,MAAM,OAAO;AACf,kBAAM,QAAQ,MAAM;AACpB,iBAAK,MAAM,IAAI;AAAA,cACb,WAAW,MAAM;AAAA,cACjB,SAAS,CAAC;AAAA,cACV,OAAO;AAAA,gBACL,kBAAkB,MAAM;AAAA,gBACxB,cAAc,MAAM;AAAA,gBACpB,aAAa,MAAM;AAAA,cACrB;AAAA,YACF,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF,UAAE;AACA,WAAK,MAAM,MAAM;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,aAAa,IAAY,QAAsE;AAC7F,UAAM,QAAQ,OAAO;AAErB,QAAI,MAAM,YAAY;AAEpB,iBAAW,QAAQ,MAAM,YAAY;AACnC,YAAI,CAAC,KAAK,UAAU;AAClB;AAAA,QACF;AAEA,YAAI;AACJ,YAAI,KAAK,eAAe,KAAK,MAAM,KAAK,OAAO,KAAK,aAAa;AAC/D,sBAAY,KAAK,kBAAkB,IAAI,MAAM;AAAA,QAC/C;AAEA,YAAI,KAAK,SAAS,MAAM;AACtB,eAAK,cAAc,KAAK;AACxB,eAAK,WAAW,KAAK,SAAS;AAC9B,eAAK,mBAAmB,KAAK,SAAS,aAAa;AAAA,QACrD,WAAW,KAAK,SAAS,WAAW;AAClC,eAAK,oBAAoB,KAAK,SAAS;AAAA,QACzC;AAEA,YAAI,WAAW;AACb,iBAAO;AAAA,QACT;AAAA,MACF;AAAA,IACF;AAEA,QACE,OAAO,iBACP,CAAC,cAAc,MAAM,EAAE,SAAS,OAAO,aAAa,KACpD,KAAK,aACL;AAEA,aAAO,KAAK,kBAAkB,IAAI,MAAM;AAAA,IAC1C;AAEA,WAAO;AAAA,MACL,WAAW;AAAA,MACX,SAAS;AAAA,QACP;AAAA,UACE,OAAO,EAAE,SAAS,MAAM,WAAW,QAAW,MAAM,IAAI,SAAS,UAAU;AAAA,UAC3E,OAAO,OAAO;AAAA,QAChB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA,EAEA,kBACE,IACA,QAC2B;AAC3B,QAAI,CAAC,KAAK,QAAQ;AAChB,WAAK,QAAQ,KAAK,2DAA2D;AAC7E,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,aAAa;AACrB,WAAK,QAAQ,KAAK,4DAA4D;AAC9E,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,oBAAoB,CAAC,KAAK,UAAU;AAC5C,WAAK,QAAQ,KAAK,0EAA0E;AAC5F,aAAO;AAAA,IACT;AAEA,UAAM,eAAe,IAAI;AAAA,MACvB,KAAK;AAAA,MACL,KAAK;AAAA,MACL,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AACA,SAAK,cAAc,KAAK,WAAW,KAAK,mBAAmB;AAC3D,SAAK,eAAe,KAAK,YAAY;AAErC,WAAO;AAAA,MACL,WAAW;AAAA,MACX,SAAS;AAAA,QACP;AAAA,UACE,OAAO;AAAA,YACL,SAAS,OAAO,MAAM,WAAW;AAAA,YACjC,MAAM,IAAI,SAAS;AAAA,YACnB,WAAW,KAAK;AAAA,UAClB;AAAA,UACA,OAAO,OAAO;AAAA,QAChB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEA,MAAM,eAAe,OAAO,KAAsB,aAAkB;AAClE,QAAM,SAAqD,CAAC;AAE5D,UAAQ,IAAI,MAAM;AAAA,IAChB,KAAK,IAAI,SAAS;AAChB,aAAO,OAAO;AACd;AAAA,IACF,KAAK,IAAI,SAAS;AAChB,aAAO,OAAO;AACd;AAAA,IACF,KAAK,IAAI,SAAS;AAChB,aAAO,OAAO;AACd;AAAA,IACF,KAAK,IAAI,SAAS;AAChB,aAAO,OAAO;AACd,UAAI,OAAO,SAAS,QAAQ;AAC1B,eAAO,eAAe,IAAI;AAAA,MAC5B;AACA;AAAA,EACJ;AAEA,MAAI,OAAO,IAAI,YAAY,UAAU;AACnC,WAAO,UAAU,IAAI;AAAA,EACvB,WAAW,MAAM,QAAQ,IAAI,OAAO,GAAG;AACrC,WAAO,UAAW,MAAM,QAAQ;AAAA,MAC9B,IAAI,QAAQ,IAAI,OAAO,MAAM;AAC3B,YAAI,OAAO,MAAM,UAAU;AACzB,iBAAO,EAAE,MAAM,QAAQ,MAAM,EAAE;AAAA,QACjC;AAAA;AAAA,WAEG,CAACC,OAAyD;AACzD,mBAAQA,GAAoB,UAAU;AAAA,UACxC,GAAG,CAAC;AAAA,UACJ;AACA,iBAAO,MAAM,kBAAkB,GAAG,QAAQ;AAAA,QAC5C,OAAO;AACL,gBAAM,IAAI,MAAM,4BAA4B;AAAA,QAC9C;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF,WAAW,IAAI,YAAY,QAAW;AACpC,WAAO,UAAU;AAAA,EACnB;AAIA,MAAI,IAAI,aAAa,OAAO,SAAS,aAAa;AAChD,WAAO,aAAa,OAAO,QAAQ,IAAI,SAAS,EAAE,IAAI,CAAC,CAAC,MAAM,IAAI,OAAO;AAAA,MACvE,IAAI,KAAK;AAAA,MACT,MAAM;AAAA,MACN,UAAU;AAAA,QACR;AAAA,QACA,WAAW,KAAK;AAAA,MAClB;AAAA,IACF,EAAE;AAAA,EACJ;AAEA,SAAO;AACT;AAEA,MAAM,oBAAoB,OAAO,OAAsB,aAAkB;AACvE,MAAI,OAAO,MAAM,UAAU,UAAU;AAEnC,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,QACT,KAAK,MAAM;AAAA,QACX,QAAQ;AAAA,MACV;AAAA,IACF;AAAA,EACF,OAAO;AACL,QAAI,CAAC,MAAM,MAAM,QAAQ,GAAG;AAG1B,UAAI,UAAU,MAAM,MAAM,MAAM,IAAI;AAEpC,UAAI,MAAM,mBAAmB,MAAM,iBAAiB;AAClD,kBAAU,QAAQ,OAAO,MAAM,gBAAgB,MAAM,eAAe;AAAA,MACtE;AAEA,YAAM,MAAM,QAAQ,IAAI,MAAM,QAC3B,KAAK,EACL,SAAS,EACT,KAAK,CAAC,WAAW,OAAO,SAAS,OAAO,CAAC;AAAA,IAC9C;AAEA,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,QACT,KAAK,0BAA0B,MAAM,MAAM,QAAQ,CAAC;AAAA,MACtD;AAAA,IACF;AAAA,EACF;AACF;","names":["llm","c"]}
1
+ {"version":3,"sources":["../src/llm.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport type { APIConnectOptions } from '@livekit/agents';\nimport {\n APIConnectionError,\n APIStatusError,\n APITimeoutError,\n DEFAULT_API_CONNECT_OPTIONS,\n llm,\n toError,\n} from '@livekit/agents';\nimport { AzureOpenAI, OpenAI } from 'openai';\nimport type {\n CerebrasChatModels,\n ChatModels,\n DeepSeekChatModels,\n GroqChatModels,\n MetaChatModels,\n OctoChatModels,\n PerplexityChatModels,\n TelnyxChatModels,\n TogetherChatModels,\n XAIChatModels,\n} from './models.js';\n\nexport interface LLMOptions {\n model: string | ChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client?: OpenAI;\n toolChoice?: llm.ToolChoice;\n parallelToolCalls?: boolean;\n metadata?: Record<string, string>;\n maxCompletionTokens?: number;\n serviceTier?: string;\n store?: boolean;\n}\n\nconst defaultLLMOptions: LLMOptions = {\n model: 'gpt-4.1',\n apiKey: process.env.OPENAI_API_KEY,\n parallelToolCalls: true,\n};\n\nconst defaultAzureLLMOptions: LLMOptions = {\n model: 'gpt-4.1',\n apiKey: process.env.AZURE_API_KEY,\n};\n\nexport class LLM extends llm.LLM {\n #opts: LLMOptions;\n #client: OpenAI;\n #providerFmt: llm.ProviderFormat;\n\n /**\n * Create a new instance of OpenAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the\n * `OPENAI_API_KEY` environmental variable.\n */\n constructor(\n opts: Partial<LLMOptions> = defaultLLMOptions,\n providerFmt: llm.ProviderFormat = 'openai',\n ) {\n super();\n\n this.#opts = { ...defaultLLMOptions, ...opts };\n this.#providerFmt = providerFmt;\n if (this.#opts.apiKey === undefined) {\n throw new Error('OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY');\n }\n\n this.#client =\n this.#opts.client ||\n new OpenAI({\n baseURL: opts.baseURL,\n apiKey: opts.apiKey,\n });\n }\n\n label(): string {\n return 'openai.LLM';\n }\n\n get model(): string {\n return this.#opts.model;\n }\n\n /**\n * Create a new instance of OpenAI LLM with Azure.\n *\n * @remarks\n * This automatically infers the following arguments from their corresponding environment variables if they are not provided:\n * - `apiKey` from `AZURE_OPENAI_API_KEY`\n * - `organization` from `OPENAI_ORG_ID`\n * - `project` from `OPENAI_PROJECT_ID`\n * - `azureAdToken` from `AZURE_OPENAI_AD_TOKEN`\n * - `apiVersion` from `OPENAI_API_VERSION`\n * - `azureEndpoint` from `AZURE_OPENAI_ENDPOINT`\n */\n static withAzure(\n opts: {\n model: string | ChatModels;\n azureEndpoint?: string;\n azureDeployment?: string;\n apiVersion?: string;\n apiKey?: string;\n azureAdToken?: string;\n azureAdTokenProvider?: () => Promise<string>;\n organization?: string;\n project?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n } = defaultAzureLLMOptions,\n ): LLM {\n opts = { ...defaultLLMOptions, ...opts };\n if (opts.apiKey === undefined) {\n throw new Error('Azure API key is required, whether as an argument or as $AZURE_API_KEY');\n }\n\n return new LLM({\n temperature: opts.temperature,\n user: opts.user,\n client: new AzureOpenAI(opts),\n });\n }\n\n /**\n * Create a new instance of Cerebras LLM.\n *\n * @remarks\n * `apiKey` must be set to your Cerebras API key, either using the argument or by setting the\n * `CEREBRAS_API_KEY` environmental variable.\n */\n static withCerebras(\n opts: Partial<{\n model: string | CerebrasChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.CEREBRAS_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'Cerebras API key is required, whether as an argument or as $CEREBRAS_API_KEY',\n );\n }\n\n return new LLM({\n model: 'llama3.1-8b',\n baseURL: 'https://api.cerebras.ai/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Fireworks LLM.\n *\n * @remarks\n * `apiKey` must be set to your Fireworks API key, either using the argument or by setting the\n * `FIREWORKS_API_KEY` environmental variable.\n */\n static withFireworks(opts: Partial<LLMOptions> = {}): LLM {\n opts.apiKey = opts.apiKey || process.env.FIREWORKS_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'Fireworks API key is required, whether as an argument or as $FIREWORKS_API_KEY',\n );\n }\n\n return new LLM({\n model: 'accounts/fireworks/models/llama-v3p1-70b-instruct',\n baseURL: 'https://api.fireworks.ai/inference/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of xAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your xAI API key, either using the argument or by setting the\n * `XAI_API_KEY` environmental variable.\n */\n static withXAI(\n opts: Partial<{\n model: string | XAIChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.XAI_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error('xAI API key is required, whether as an argument or as $XAI_API_KEY');\n }\n\n return new LLM({\n model: 'grok-2-public',\n baseURL: 'https://api.x.ai/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Groq LLM.\n *\n * @remarks\n * `apiKey` must be set to your Groq API key, either using the argument or by setting the\n * `GROQ_API_KEY` environmental variable.\n */\n static withGroq(\n opts: Partial<{\n model: string | GroqChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.GROQ_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error('Groq API key is required, whether as an argument or as $GROQ_API_KEY');\n }\n\n return new LLM({\n model: 'llama3-8b-8192',\n baseURL: 'https://api.groq.com/openai/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of DeepSeek LLM.\n *\n * @remarks\n * `apiKey` must be set to your DeepSeek API key, either using the argument or by setting the\n * `DEEPSEEK_API_KEY` environmental variable.\n */\n static withDeepSeek(\n opts: Partial<{\n model: string | DeepSeekChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.DEEPSEEK_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'DeepSeek API key is required, whether as an argument or as $DEEPSEEK_API_KEY',\n );\n }\n\n return new LLM({\n model: 'deepseek-chat',\n baseURL: 'https://api.deepseek.com/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of OctoAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your OctoAI API key, either using the argument or by setting the\n * `OCTOAI_TOKEN` environmental variable.\n */\n static withOcto(\n opts: Partial<{\n model: string | OctoChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.OCTOAI_TOKEN;\n if (opts.apiKey === undefined) {\n throw new Error('OctoAI API key is required, whether as an argument or as $OCTOAI_TOKEN');\n }\n\n return new LLM({\n model: 'llama-2-13b-chat',\n baseURL: 'https://text.octoai.run/v1',\n ...opts,\n });\n }\n\n /** Create a new instance of Ollama LLM. */\n static withOllama(\n opts: Partial<{\n model: string;\n baseURL?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n return new LLM({\n model: 'llama-2-13b-chat',\n baseURL: 'https://text.octoai.run/v1',\n apiKey: 'ollama',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of PerplexityAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your PerplexityAI API key, either using the argument or by setting the\n * `PERPLEXITY_API_KEY` environmental variable.\n */\n static withPerplexity(\n opts: Partial<{\n model: string | PerplexityChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.PERPLEXITY_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'PerplexityAI API key is required, whether as an argument or as $PERPLEXITY_API_KEY',\n );\n }\n\n return new LLM({\n model: 'llama-3.1-sonar-small-128k-chat',\n baseURL: 'https://api.perplexity.ai',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of TogetherAI LLM.\n *\n * @remarks\n * `apiKey` must be set to your TogetherAI API key, either using the argument or by setting the\n * `TOGETHER_API_KEY` environmental variable.\n */\n static withTogether(\n opts: Partial<{\n model: string | TogetherChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.TOGETHER_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error(\n 'TogetherAI API key is required, whether as an argument or as $TOGETHER_API_KEY',\n );\n }\n\n return new LLM({\n model: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',\n baseURL: 'https://api.together.xyz/v1',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Telnyx LLM.\n *\n * @remarks\n * `apiKey` must be set to your Telnyx API key, either using the argument or by setting the\n * `TELNYX_API_KEY` environmental variable.\n */\n static withTelnyx(\n opts: Partial<{\n model: string | TelnyxChatModels;\n apiKey?: string;\n baseURL?: string;\n user?: string;\n temperature?: number;\n client: OpenAI;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.TELNYX_API_KEY;\n if (opts.apiKey === undefined) {\n throw new Error('Telnyx API key is required, whether as an argument or as $TELNYX_API_KEY');\n }\n\n return new LLM({\n model: 'meta-llama/Meta-Llama-3.1-70B-Instruct',\n baseURL: 'https://api.telnyx.com/v2/ai',\n ...opts,\n });\n }\n\n /**\n * Create a new instance of Meta Llama LLM.\n *\n * @remarks\n * `apiKey` must be set to your Meta Llama API key, either using the argument or by setting the\n * `LLAMA_API_KEY` environmental variable.\n */\n static withMeta(\n opts: Partial<{\n apiKey?: string;\n baseURL?: string;\n client?: OpenAI;\n model?: string | MetaChatModels;\n temperature?: number;\n user?: string;\n }> = {},\n ): LLM {\n opts.apiKey = opts.apiKey || process.env.LLAMA_API_KEY;\n opts.baseURL = opts.baseURL || 'https://api.llama.com/compat/v1/';\n opts.model = opts.model || 'Llama-4-Maverick-17B-128E-Instruct-FP8';\n\n if (opts.apiKey === undefined) {\n throw new Error(\n 'Meta Llama API key is required, either as argument or set LLAMA_API_KEY environmental variable',\n );\n }\n\n return new LLM(opts);\n }\n\n chat({\n chatCtx,\n toolCtx,\n connOptions = DEFAULT_API_CONNECT_OPTIONS,\n parallelToolCalls,\n toolChoice,\n extraKwargs,\n }: {\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions?: APIConnectOptions;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n extraKwargs?: Record<string, any>;\n }): LLMStream {\n const extras: Record<string, any> = { ...extraKwargs }; // eslint-disable-line @typescript-eslint/no-explicit-any\n\n if (this.#opts.metadata) {\n extras.metadata = this.#opts.metadata;\n }\n\n if (this.#opts.user) {\n extras.user = this.#opts.user;\n }\n\n if (this.#opts.maxCompletionTokens) {\n extras.max_completion_tokens = this.#opts.maxCompletionTokens;\n }\n\n if (this.#opts.temperature) {\n extras.temperature = this.#opts.temperature;\n }\n\n if (this.#opts.serviceTier) {\n extras.service_tier = this.#opts.serviceTier;\n }\n\n if (this.#opts.store !== undefined) {\n extras.store = this.#opts.store;\n }\n\n parallelToolCalls =\n parallelToolCalls !== undefined ? parallelToolCalls : this.#opts.parallelToolCalls;\n if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== undefined) {\n extras.parallel_tool_calls = parallelToolCalls;\n }\n\n toolChoice = toolChoice !== undefined ? toolChoice : this.#opts.toolChoice;\n if (toolChoice) {\n extras.tool_choice = toolChoice;\n }\n\n return new LLMStream(this, {\n model: this.#opts.model,\n providerFmt: this.#providerFmt,\n client: this.#client,\n chatCtx,\n toolCtx,\n connOptions,\n extraKwargs: extras,\n });\n }\n}\n\nexport class LLMStream extends llm.LLMStream {\n #toolCallId?: string;\n #fncName?: string;\n #fncRawArguments?: string;\n #toolIndex?: number;\n #client: OpenAI;\n #providerFmt: llm.ProviderFormat;\n #extraKwargs: Record<string, any>;\n private model: string | ChatModels;\n\n constructor(\n llm: LLM,\n {\n model,\n providerFmt,\n client,\n chatCtx,\n toolCtx,\n connOptions,\n extraKwargs,\n }: {\n model: string | ChatModels;\n providerFmt: llm.ProviderFormat;\n client: OpenAI;\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions: APIConnectOptions;\n extraKwargs: Record<string, any>;\n },\n ) {\n super(llm, { chatCtx, toolCtx, connOptions });\n this.#client = client;\n this.#providerFmt = providerFmt;\n this.#extraKwargs = extraKwargs;\n this.model = model;\n }\n\n protected async run(): Promise<void> {\n let retryable = true;\n try {\n const messages = (await this.chatCtx.toProviderFormat(\n this.#providerFmt,\n )) as OpenAI.ChatCompletionMessageParam[];\n\n const tools = this.toolCtx\n ? Object.entries(this.toolCtx).map(([name, func]) => ({\n type: 'function' as const,\n function: {\n name,\n description: func.description,\n parameters: llm.toJsonSchema(\n func.parameters,\n ) as unknown as OpenAI.Chat.Completions.ChatCompletionTool['function']['parameters'],\n },\n }))\n : undefined;\n\n const stream = await this.#client.chat.completions.create({\n model: this.model,\n messages,\n tools,\n stream: true,\n stream_options: { include_usage: true },\n ...this.#extraKwargs,\n });\n\n for await (const chunk of stream) {\n for (const choice of chunk.choices) {\n if (this.abortController.signal.aborted) {\n break;\n }\n const chatChunk = this.#parseChoice(chunk.id, choice);\n if (chatChunk) {\n retryable = false;\n this.queue.put(chatChunk);\n }\n }\n\n if (chunk.usage) {\n const usage = chunk.usage;\n retryable = false;\n this.queue.put({\n id: chunk.id,\n usage: {\n completionTokens: usage.completion_tokens,\n promptTokens: usage.prompt_tokens,\n promptCachedTokens: usage.prompt_tokens_details?.cached_tokens || 0,\n totalTokens: usage.total_tokens,\n },\n });\n }\n }\n } catch (error) {\n if (error instanceof OpenAI.APIConnectionTimeoutError) {\n throw new APITimeoutError({ options: { retryable } });\n } else if (error instanceof OpenAI.APIError) {\n throw new APIStatusError({\n message: error.message,\n options: {\n statusCode: error.status,\n body: error.error,\n requestId: error.request_id,\n retryable,\n },\n });\n } else {\n throw new APIConnectionError({\n message: toError(error).message,\n options: { retryable },\n });\n }\n } finally {\n this.queue.close();\n }\n }\n\n #parseChoice(id: string, choice: OpenAI.ChatCompletionChunk.Choice): llm.ChatChunk | undefined {\n const delta = choice.delta;\n\n // https://github.com/livekit/agents/issues/688\n // the delta can be None when using Azure OpenAI (content filtering)\n if (delta === undefined) return undefined;\n\n if (delta.tool_calls) {\n // check if we have functions to calls\n for (const tool of delta.tool_calls) {\n if (!tool.function) {\n continue; // oai may add other tools in the future\n }\n\n /**\n * The way OpenAI streams tool calls is a bit tricky.\n *\n * For any new tool call, it first emits a delta tool call with id, and function name,\n * the rest of the delta chunks will only stream the remaining arguments string,\n * until a new tool call is started or the tool call is finished.\n * See below for an example.\n *\n * Choice(delta=ChoiceDelta(content=None, function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None)\n * [ChoiceDeltaToolCall(index=0, id='call_LaVeHWUHpef9K1sd5UO8TtLg', function=ChoiceDeltaToolCallFunction(arguments='', name='get_weather'), type='function')]\n * [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='{\"location\": \"P', name=None), type=None)]\n * [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='aris}', name=None), type=None)]\n * [ChoiceDeltaToolCall(index=1, id='call_ThU4OmMdQXnnVmpXGOCknXIB', function=ChoiceDeltaToolCallFunction(arguments='', name='get_weather'), type='function')]\n * [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments='{\"location\": \"T', name=None), type=None)]\n * [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments='okyo', name=None), type=None)]\n * Choice(delta=ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=None), finish_reason='tool_calls', index=0, logprobs=None)\n */\n let callChunk: llm.ChatChunk | undefined;\n // If we have a previous tool call and this is a new one, emit the previous\n if (this.#toolCallId && tool.id && tool.index !== this.#toolIndex) {\n callChunk = this.#createRunningToolCallChunk(id, delta);\n this.#toolCallId = this.#fncName = this.#fncRawArguments = undefined;\n }\n\n // Start or continue building the current tool call\n if (tool.function.name) {\n this.#toolIndex = tool.index;\n this.#toolCallId = tool.id;\n this.#fncName = tool.function.name;\n this.#fncRawArguments = tool.function.arguments || '';\n } else if (tool.function.arguments) {\n this.#fncRawArguments = (this.#fncRawArguments || '') + tool.function.arguments;\n }\n\n if (callChunk) {\n return callChunk;\n }\n }\n }\n\n // If we're done with tool calls, emit the final one\n if (\n choice.finish_reason &&\n ['tool_calls', 'stop'].includes(choice.finish_reason) &&\n this.#toolCallId !== undefined\n ) {\n const callChunk = this.#createRunningToolCallChunk(id, delta);\n this.#toolCallId = this.#fncName = this.#fncRawArguments = undefined;\n return callChunk;\n }\n\n // Regular content message\n if (!delta.content) {\n return undefined;\n }\n\n return {\n id,\n delta: {\n role: 'assistant',\n content: delta.content,\n },\n };\n }\n\n #createRunningToolCallChunk(\n id: string,\n delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta,\n ): llm.ChatChunk {\n return {\n id,\n delta: {\n role: 'assistant',\n content: delta.content || undefined,\n toolCalls: [\n llm.FunctionCall.create({\n callId: this.#toolCallId!,\n name: this.#fncName || '',\n args: this.#fncRawArguments || '',\n }),\n ],\n },\n };\n }\n}\n"],"mappings":"AAIA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,aAAa,cAAc;AA6BpC,MAAM,oBAAgC;AAAA,EACpC,OAAO;AAAA,EACP,QAAQ,QAAQ,IAAI;AAAA,EACpB,mBAAmB;AACrB;AAEA,MAAM,yBAAqC;AAAA,EACzC,OAAO;AAAA,EACP,QAAQ,QAAQ,IAAI;AACtB;AAEO,MAAM,YAAY,IAAI,IAAI;AAAA,EAC/B;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,YACE,OAA4B,mBAC5B,cAAkC,UAClC;AACA,UAAM;AAEN,SAAK,QAAQ,EAAE,GAAG,mBAAmB,GAAG,KAAK;AAC7C,SAAK,eAAe;AACpB,QAAI,KAAK,MAAM,WAAW,QAAW;AACnC,YAAM,IAAI,MAAM,0EAA0E;AAAA,IAC5F;AAEA,SAAK,UACH,KAAK,MAAM,UACX,IAAI,OAAO;AAAA,MACT,SAAS,KAAK;AAAA,MACd,QAAQ,KAAK;AAAA,IACf,CAAC;AAAA,EACL;AAAA,EAEA,QAAgB;AACd,WAAO;AAAA,EACT;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO,KAAK,MAAM;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,UACL,OAaI,wBACC;AACL,WAAO,EAAE,GAAG,mBAAmB,GAAG,KAAK;AACvC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,wEAAwE;AAAA,IAC1F;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,aAAa,KAAK;AAAA,MAClB,MAAM,KAAK;AAAA,MACX,QAAQ,IAAI,YAAY,IAAI;AAAA,IAC9B,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,aACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,cAAc,OAA4B,CAAC,GAAQ;AACxD,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,QACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,oEAAoE;AAAA,IACtF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,SACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,sEAAsE;AAAA,IACxF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,aACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,SACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,wEAAwE;AAAA,IAC1F;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA,EAGA,OAAO,WACL,OAKK,CAAC,GACD;AACL,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,QAAQ;AAAA,MACR,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,eACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,aACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,WACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI,MAAM,0EAA0E;AAAA,IAC5F;AAEA,WAAO,IAAI,IAAI;AAAA,MACb,OAAO;AAAA,MACP,SAAS;AAAA,MACT,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,SACL,OAOK,CAAC,GACD;AACL,SAAK,SAAS,KAAK,UAAU,QAAQ,IAAI;AACzC,SAAK,UAAU,KAAK,WAAW;AAC/B,SAAK,QAAQ,KAAK,SAAS;AAE3B,QAAI,KAAK,WAAW,QAAW;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,IAAI,IAAI,IAAI;AAAA,EACrB;AAAA,EAEA,KAAK;AAAA,IACH;AAAA,IACA;AAAA,IACA,cAAc;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAOc;AACZ,UAAM,SAA8B,EAAE,GAAG,YAAY;AAErD,QAAI,KAAK,MAAM,UAAU;AACvB,aAAO,WAAW,KAAK,MAAM;AAAA,IAC/B;AAEA,QAAI,KAAK,MAAM,MAAM;AACnB,aAAO,OAAO,KAAK,MAAM;AAAA,IAC3B;AAEA,QAAI,KAAK,MAAM,qBAAqB;AAClC,aAAO,wBAAwB,KAAK,MAAM;AAAA,IAC5C;AAEA,QAAI,KAAK,MAAM,aAAa;AAC1B,aAAO,cAAc,KAAK,MAAM;AAAA,IAClC;AAEA,QAAI,KAAK,MAAM,aAAa;AAC1B,aAAO,eAAe,KAAK,MAAM;AAAA,IACnC;AAEA,QAAI,KAAK,MAAM,UAAU,QAAW;AAClC,aAAO,QAAQ,KAAK,MAAM;AAAA,IAC5B;AAEA,wBACE,sBAAsB,SAAY,oBAAoB,KAAK,MAAM;AACnE,QAAI,WAAW,OAAO,KAAK,OAAO,EAAE,SAAS,KAAK,sBAAsB,QAAW;AACjF,aAAO,sBAAsB;AAAA,IAC/B;AAEA,iBAAa,eAAe,SAAY,aAAa,KAAK,MAAM;AAChE,QAAI,YAAY;AACd,aAAO,cAAc;AAAA,IACvB;AAEA,WAAO,IAAI,UAAU,MAAM;AAAA,MACzB,OAAO,KAAK,MAAM;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,QAAQ,KAAK;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,IACf,CAAC;AAAA,EACH;AACF;AAEO,MAAM,kBAAkB,IAAI,UAAU;AAAA,EAC3C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACQ;AAAA,EAER,YACEA,MACA;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GASA;AACA,UAAMA,MAAK,EAAE,SAAS,SAAS,YAAY,CAAC;AAC5C,SAAK,UAAU;AACf,SAAK,eAAe;AACpB,SAAK,eAAe;AACpB,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,MAAgB,MAAqB;AA9hBvC;AA+hBI,QAAI,YAAY;AAChB,QAAI;AACF,YAAM,WAAY,MAAM,KAAK,QAAQ;AAAA,QACnC,KAAK;AAAA,MACP;AAEA,YAAM,QAAQ,KAAK,UACf,OAAO,QAAQ,KAAK,OAAO,EAAE,IAAI,CAAC,CAAC,MAAM,IAAI,OAAO;AAAA,QAClD,MAAM;AAAA,QACN,UAAU;AAAA,UACR;AAAA,UACA,aAAa,KAAK;AAAA,UAClB,YAAY,IAAI;AAAA,YACd,KAAK;AAAA,UACP;AAAA,QACF;AAAA,MACF,EAAE,IACF;AAEJ,YAAM,SAAS,MAAM,KAAK,QAAQ,KAAK,YAAY,OAAO;AAAA,QACxD,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA,QAAQ;AAAA,QACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,QACtC,GAAG,KAAK;AAAA,MACV,CAAC;AAED,uBAAiB,SAAS,QAAQ;AAChC,mBAAW,UAAU,MAAM,SAAS;AAClC,cAAI,KAAK,gBAAgB,OAAO,SAAS;AACvC;AAAA,UACF;AACA,gBAAM,YAAY,KAAK,aAAa,MAAM,IAAI,MAAM;AACpD,cAAI,WAAW;AACb,wBAAY;AACZ,iBAAK,MAAM,IAAI,SAAS;AAAA,UAC1B;AAAA,QACF;AAEA,YAAI,MAAM,OAAO;AACf,gBAAM,QAAQ,MAAM;AACpB,sBAAY;AACZ,eAAK,MAAM,IAAI;AAAA,YACb,IAAI,MAAM;AAAA,YACV,OAAO;AAAA,cACL,kBAAkB,MAAM;AAAA,cACxB,cAAc,MAAM;AAAA,cACpB,sBAAoB,WAAM,0BAAN,mBAA6B,kBAAiB;AAAA,cAClE,aAAa,MAAM;AAAA,YACrB;AAAA,UACF,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,UAAI,iBAAiB,OAAO,2BAA2B;AACrD,cAAM,IAAI,gBAAgB,EAAE,SAAS,EAAE,UAAU,EAAE,CAAC;AAAA,MACtD,WAAW,iBAAiB,OAAO,UAAU;AAC3C,cAAM,IAAI,eAAe;AAAA,UACvB,SAAS,MAAM;AAAA,UACf,SAAS;AAAA,YACP,YAAY,MAAM;AAAA,YAClB,MAAM,MAAM;AAAA,YACZ,WAAW,MAAM;AAAA,YACjB;AAAA,UACF;AAAA,QACF,CAAC;AAAA,MACH,OAAO;AACL,cAAM,IAAI,mBAAmB;AAAA,UAC3B,SAAS,QAAQ,KAAK,EAAE;AAAA,UACxB,SAAS,EAAE,UAAU;AAAA,QACvB,CAAC;AAAA,MACH;AAAA,IACF,UAAE;AACA,WAAK,MAAM,MAAM;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,aAAa,IAAY,QAAsE;AAC7F,UAAM,QAAQ,OAAO;AAIrB,QAAI,UAAU,OAAW,QAAO;AAEhC,QAAI,MAAM,YAAY;AAEpB,iBAAW,QAAQ,MAAM,YAAY;AACnC,YAAI,CAAC,KAAK,UAAU;AAClB;AAAA,QACF;AAmBA,YAAI;AAEJ,YAAI,KAAK,eAAe,KAAK,MAAM,KAAK,UAAU,KAAK,YAAY;AACjE,sBAAY,KAAK,4BAA4B,IAAI,KAAK;AACtD,eAAK,cAAc,KAAK,WAAW,KAAK,mBAAmB;AAAA,QAC7D;AAGA,YAAI,KAAK,SAAS,MAAM;AACtB,eAAK,aAAa,KAAK;AACvB,eAAK,cAAc,KAAK;AACxB,eAAK,WAAW,KAAK,SAAS;AAC9B,eAAK,mBAAmB,KAAK,SAAS,aAAa;AAAA,QACrD,WAAW,KAAK,SAAS,WAAW;AAClC,eAAK,oBAAoB,KAAK,oBAAoB,MAAM,KAAK,SAAS;AAAA,QACxE;AAEA,YAAI,WAAW;AACb,iBAAO;AAAA,QACT;AAAA,MACF;AAAA,IACF;AAGA,QACE,OAAO,iBACP,CAAC,cAAc,MAAM,EAAE,SAAS,OAAO,aAAa,KACpD,KAAK,gBAAgB,QACrB;AACA,YAAM,YAAY,KAAK,4BAA4B,IAAI,KAAK;AAC5D,WAAK,cAAc,KAAK,WAAW,KAAK,mBAAmB;AAC3D,aAAO;AAAA,IACT;AAGA,QAAI,CAAC,MAAM,SAAS;AAClB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS,MAAM;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAAA,EAEA,4BACE,IACA,OACe;AACf,WAAO;AAAA,MACL;AAAA,MACA,OAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS,MAAM,WAAW;AAAA,QAC1B,WAAW;AAAA,UACT,IAAI,aAAa,OAAO;AAAA,YACtB,QAAQ,KAAK;AAAA,YACb,MAAM,KAAK,YAAY;AAAA,YACvB,MAAM,KAAK,oBAAoB;AAAA,UACjC,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;","names":["llm"]}
package/dist/models.cjs CHANGED
@@ -3,6 +3,10 @@ var __defProp = Object.defineProperty;
3
3
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
5
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
6
10
  var __copyProps = (to, from, except, desc) => {
7
11
  if (from && typeof from === "object" || typeof from === "function") {
8
12
  for (let key of __getOwnPropNames(from))
@@ -13,5 +17,15 @@ var __copyProps = (to, from, except, desc) => {
13
17
  };
14
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
15
19
  var models_exports = {};
20
+ __export(models_exports, {
21
+ supportsReasoningEffort: () => supportsReasoningEffort
22
+ });
16
23
  module.exports = __toCommonJS(models_exports);
24
+ function supportsReasoningEffort(model) {
25
+ return model === "gpt-5" || model === "gpt-5-mini" || model === "gpt-5-nano";
26
+ }
27
+ // Annotate the CommonJS export names for ESM import in node:
28
+ 0 && (module.exports = {
29
+ supportsReasoningEffort
30
+ });
17
31
  //# sourceMappingURL=models.cjs.map
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/models.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2024 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\n\nexport type ChatModels =\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4o-mini'\n | 'gpt-4o-mini-2024-07-18'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-0125-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4-1106-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0301'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-16k-0613';\n\nexport type WhisperModels = 'whisper-1';\n\nexport type TTSModels = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts';\n\nexport type TTSVoices =\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'nova'\n | 'onyx'\n | 'sage'\n | 'shimmer'\n | 'verse';\n\n// adapters for OpenAI-compatible LLMs, TTSs, STTs\n\nexport type TelnyxChatModels =\n | 'meta-llama/Meta-Llama-3.1-8B-Instruct'\n | 'meta-llama/Meta-Llama-3.1-70B-Instruct';\n\nexport type CerebrasChatModels = 'llama3.1-8b' | 'llama3.1-70b';\n\nexport type PerplexityChatModels =\n | 'llama-3.1-sonar-small-128k-online'\n | 'llama-3.1-sonar-small-128k-chat'\n | 'llama-3.1-sonar-large-128k-online'\n | 'llama-3.1-sonar-large-128k-chat'\n | 'llama-3.1-8b-instruct'\n | 'llama-3.1-70b-instruct';\n\nexport type GroqChatModels =\n | 'llama-3.1-405b-reasoning'\n | 'llama-3.1-70b-versatile'\n | 'llama-3.1-8b-instant'\n | 'llama-3.3-70b-versatile'\n | 'llama3-groq-70b-8192-tool-use-preview'\n | 'llama3-groq-8b-8192-tool-use-preview'\n | 'llama-guard-3-8b'\n | 'llama3-70b-8192'\n | 'llama3-8b-8192'\n | 'mixtral-8x7b-32768'\n | 'gemma-7b-it'\n | 'gemma2-9b-it';\n\nexport type GroqAudioModels =\n | 'whisper-large-v3'\n | 'distil-whisper-large-v3-en'\n | 'whisper-large-v3-turbo';\n\nexport type DeepSeekChatModels = 'deepseek-coder' | 'deepseek-chat';\n\nexport type TogetherChatModels =\n | 'garage-bAInd/Platypus2-70B-instruct'\n | 'google/gemma-2-27b-it'\n | 'google/gemma-2-9b-it'\n | 'google/gemma-2b-it'\n | 'google/gemma-7b-it'\n | 'lmsys/vicuna-13b-v1.5'\n | 'lmsys/vicuna-7b-v1.5'\n | 'meta-llama/Llama-2-13b-chat-hf'\n | 'meta-llama/Llama-2-70b-chat-hf'\n | 'meta-llama/Llama-2-7b-chat-hf'\n | 'meta-llama/Llama-3-70b-chat-hf'\n | 'meta-llama/Llama-3-8b-chat-hf'\n | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite'\n | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite'\n | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo'\n | 'mistralai/Mistral-7B-Instruct-v0.1'\n | 'mistralai/Mistral-7B-Instruct-v0.2'\n | 'mistralai/Mistral-7B-Instruct-v0.3'\n | 'mistralai/Mixtral-8x22B-Instruct-v0.1'\n | 'mistralai/Mixtral-8x7B-Instruct-v0.1'\n | 'openchat/openchat-3.5-1210'\n | 'snorkelai/Snorkel-Mistral-PairRM-DPO'\n | 'teknium/OpenHermes-2-Mistral-7B'\n | 'teknium/OpenHermes-2p5-Mistral-7B'\n | 'togethercomputer/Llama-2-7B-32K-Instruct'\n | 'togethercomputer/RedPajama-INCITE-7B-Chat'\n | 'togethercomputer/RedPajama-INCITE-Chat-3B-v1'\n | 'togethercomputer/StripedHyena-Nous-7B'\n | 'togethercomputer/alpaca-7b'\n | 'upstage/SOLAR-10.7B-Instruct-v1.0'\n | 'zero-one-ai/Yi-34B-Chat';\n\nexport type OctoChatModels =\n | 'meta-llama-3-70b-instruct'\n | 'meta-llama-3.1-405b-instruct'\n | 'meta-llama-3.1-70b-instruct'\n | 'meta-llama-3.1-8b-instruct'\n | 'mistral-7b-instruct'\n | 'mixtral-8x7b-instruct'\n | 'wizardlm-2-8x22bllamaguard-2-7b';\n\nexport type XAIChatModels = 'grok-2' | 'grok-2-mini' | 'grok-2-mini-public' | 'grok-2-public';\n\nexport type MetaChatModels =\n | 'Llama-4-Scout-17B-16E-Instruct-FP8'\n | 'Llama-4-Maverick-17B-128E-Instruct-FP8'\n | 'Llama-3.3-70B-Instruct'\n | 'Llama-3.3-8B-Instruct';\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]}
1
+ {"version":3,"sources":["../src/models.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2024 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\n\nexport type ChatModels =\n | 'gpt-5'\n | 'gpt-5-mini'\n | 'gpt-5-nano'\n | 'gpt-4.1'\n | 'gpt-4.1-mini'\n | 'gpt-4.1-nano'\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4o-mini'\n | 'gpt-4o-mini-2024-07-18'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-0125-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4-1106-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0301'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-16k-0613';\n\nexport type WhisperModels = 'whisper-1';\n\nexport type TTSModels = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts';\n\nexport type TTSVoices =\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'nova'\n | 'onyx'\n | 'sage'\n | 'shimmer';\n\nexport type DalleModels = 'dall-e-2' | 'dall-e-3';\n\nexport type EmbeddingModels =\n | 'text-embedding-ada-002'\n | 'text-embedding-3-small'\n | 'text-embedding-3-large';\n\nexport type AssistantTools = 'code_interpreter' | 'file_search' | 'function';\n\nexport type VertexModels =\n | 'google/gemini-2.0-flash-exp'\n | 'google/gemini-1.5-flash'\n | 'google/gemini-1.5-pro'\n | 'google/gemini-1.0-pro-vision'\n | 'google/gemini-1.0-pro-vision-001'\n | 'google/gemini-1.0-pro-002'\n | 'google/gemini-1.0-pro-001'\n | 'google/gemini-1.0-pro';\n\n// adapters for OpenAI-compatible LLMs, TTSs, STTs\n\nexport type TelnyxChatModels =\n | 'meta-llama/Meta-Llama-3.1-8B-Instruct'\n | 'meta-llama/Meta-Llama-3.1-70B-Instruct';\n\nexport type CerebrasChatModels =\n | 'llama3.1-8b'\n | 'llama-3.3-70b'\n | 'llama-4-scout-17b-16e-instruct'\n | 'llama-4-maverick-17b-128e-instruct'\n | 'qwen-3-32b'\n | 'qwen-3-235b-a22b-instruct-2507'\n | 'qwen-3-235b-a22b-thinking-2507'\n | 'qwen-3-coder-480b'\n | 'gpt-oss-120b';\n\nexport type PerplexityChatModels =\n | 'llama-3.1-sonar-small-128k-online'\n | 'llama-3.1-sonar-small-128k-chat'\n | 'llama-3.1-sonar-large-128k-online'\n | 'llama-3.1-sonar-large-128k-chat'\n | 'llama-3.1-8b-instruct'\n | 'llama-3.1-70b-instruct';\n\nexport type GroqChatModels =\n | 'llama-3.1-405b-reasoning'\n | 'llama-3.1-8b-instant'\n | 'llama-3.3-70b-versatile'\n | 'llama3-groq-70b-8192-tool-use-preview'\n | 'llama3-groq-8b-8192-tool-use-preview'\n | 'llama-guard-3-8b'\n | 'llama3-70b-8192'\n | 'llama3-8b-8192'\n | 'mixtral-8x7b-32768'\n | 'gemma-7b-it'\n | 'gemma2-9b-it';\n\nexport type GroqAudioModels =\n | 'whisper-large-v3'\n | 'distil-whisper-large-v3-en'\n | 'whisper-large-v3-turbo';\n\nexport type DeepSeekChatModels = 'deepseek-coder' | 'deepseek-chat';\n\nexport type TogetherChatModels =\n | 'Austism/chronos-hermes-13b'\n | 'Gryphe/MythoMax-L2-13b'\n | 'NousResearch/Nous-Capybara-7B-V1p9'\n | 'NousResearch/Nous-Hermes-2-Mistral-7B-DPO'\n | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO'\n | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT'\n | 'NousResearch/Nous-Hermes-2-Yi-34B'\n | 'NousResearch/Nous-Hermes-Llama2-13b'\n | 'NousResearch/Nous-Hermes-llama-2-7b'\n | 'Open-Orca/Mistral-7B-OpenOrca'\n | 'Qwen/Qwen1.5-0.5B-Chat'\n | 'Qwen/Qwen1.5-1.8B-Chat'\n | 'Qwen/Qwen1.5-110B-Chat'\n | 'Qwen/Qwen1.5-14B-Chat'\n | 'Qwen/Qwen1.5-32B-Chat'\n | 'Qwen/Qwen1.5-4B-Chat'\n | 'Qwen/Qwen1.5-72B-Chat'\n | 'Qwen/Qwen1.5-7B-Chat'\n | 'Qwen/Qwen2-72B-Instruct'\n | 'Snowflake/snowflake-arctic-instruct'\n | 'Undi95/ReMM-SLERP-L2-13B'\n | 'Undi95/Toppy-M-7B'\n | 'WizardLM/WizardLM-13B-V1.2'\n | 'allenai/OLMo-7B'\n | 'allenai/OLMo-7B-Instruct'\n | 'allenai/OLMo-7B-Twin-2T'\n | 'codellama/CodeLlama-13b-Instruct-hf'\n | 'codellama/CodeLlama-34b-Instruct-hf'\n | 'codellama/CodeLlama-70b-Instruct-hf'\n | 'codellama/CodeLlama-7b-Instruct-hf'\n | 'cognitivecomputations/dolphin-2.5-mixtral-8x7b'\n | 'databricks/dbrx-instruct'\n | 'deepseek-ai/deepseek-coder-33b-instruct'\n | 'deepseek-ai/deepseek-llm-67b-chat'\n | 'garage-bAInd/Platypus2-70B-instruct'\n | 'google/gemma-2-27b-it'\n | 'google/gemma-2-9b-it'\n | 'google/gemma-2b-it'\n | 'google/gemma-7b-it'\n | 'lmsys/vicuna-13b-v1.5'\n | 'lmsys/vicuna-7b-v1.5'\n | 'meta-llama/Llama-2-13b-chat-hf'\n | 'meta-llama/Llama-2-70b-chat-hf'\n | 'meta-llama/Llama-2-7b-chat-hf'\n | 'meta-llama/Llama-3-70b-chat-hf'\n | 'meta-llama/Llama-3-8b-chat-hf'\n | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite'\n | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite'\n | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'\n | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo'\n | 'meta-llama/Llama-3.3-70B-Instruct-Turbo'\n | 'mistralai/Mistral-7B-Instruct-v0.1'\n | 'mistralai/Mistral-7B-Instruct-v0.2'\n | 'mistralai/Mistral-7B-Instruct-v0.3'\n | 'mistralai/Mixtral-8x22B-Instruct-v0.1'\n | 'mistralai/Mixtral-8x7B-Instruct-v0.1'\n | 'openchat/openchat-3.5-1210'\n | 'snorkelai/Snorkel-Mistral-PairRM-DPO'\n | 'teknium/OpenHermes-2-Mistral-7B'\n | 'teknium/OpenHermes-2p5-Mistral-7B'\n | 'togethercomputer/Llama-2-7B-32K-Instruct'\n | 'togethercomputer/RedPajama-INCITE-7B-Chat'\n | 'togethercomputer/RedPajama-INCITE-Chat-3B-v1'\n | 'togethercomputer/StripedHyena-Nous-7B'\n | 'togethercomputer/alpaca-7b'\n | 'upstage/SOLAR-10.7B-Instruct-v1.0'\n | 'zero-one-ai/Yi-34B-Chat';\n\nexport type OctoChatModels =\n | 'meta-llama-3-70b-instruct'\n | 'meta-llama-3.1-405b-instruct'\n | 'meta-llama-3.1-70b-instruct'\n | 'meta-llama-3.1-8b-instruct'\n | 'mistral-7b-instruct'\n | 'mixtral-8x7b-instruct'\n | 'wizardlm-2-8x22bllamaguard-2-7b';\n\nexport type XAIChatModels =\n | 'grok-3'\n | 'grok-3-fast'\n | 'grok-3-mini'\n | 'grok-3-mini-fast'\n | 'grok-2-vision-1212'\n | 'grok-2-image-1212'\n | 'grok-2-1212';\n\nexport type MetaChatModels =\n | 'Llama-4-Scout-17B-16E-Instruct-FP8'\n | 'Llama-4-Maverick-17B-128E-Instruct-FP8'\n | 'Llama-3.3-70B-Instruct'\n | 'Llama-3.3-8B-Instruct';\n\nexport function supportsReasoningEffort(model: ChatModels | string): boolean {\n return model === 'gpt-5' || model === 'gpt-5-mini' || model === 'gpt-5-nano';\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAmNO,SAAS,wBAAwB,OAAqC;AAC3E,SAAO,UAAU,WAAW,UAAU,gBAAgB,UAAU;AAClE;","names":[]}