@livekit/agents-plugin-openai 0.9.3 → 1.0.0-next.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/dist/index.cjs +16 -5
  2. package/dist/index.cjs.map +1 -1
  3. package/dist/index.d.cts +4 -4
  4. package/dist/index.d.ts +4 -4
  5. package/dist/index.d.ts.map +1 -1
  6. package/dist/index.js +14 -3
  7. package/dist/index.js.map +1 -1
  8. package/dist/llm.cjs +156 -197
  9. package/dist/llm.cjs.map +1 -1
  10. package/dist/llm.d.cts +27 -8
  11. package/dist/llm.d.ts +27 -8
  12. package/dist/llm.d.ts.map +1 -1
  13. package/dist/llm.js +164 -188
  14. package/dist/llm.js.map +1 -1
  15. package/dist/models.cjs +14 -0
  16. package/dist/models.cjs.map +1 -1
  17. package/dist/models.d.cts +11 -6
  18. package/dist/models.d.ts +11 -6
  19. package/dist/models.d.ts.map +1 -1
  20. package/dist/models.js +6 -0
  21. package/dist/models.js.map +1 -1
  22. package/dist/realtime/api_proto.cjs.map +1 -1
  23. package/dist/realtime/api_proto.d.cts +15 -0
  24. package/dist/realtime/api_proto.d.ts +15 -0
  25. package/dist/realtime/api_proto.d.ts.map +1 -1
  26. package/dist/realtime/api_proto.js.map +1 -1
  27. package/dist/realtime/realtime_model.cjs +1057 -820
  28. package/dist/realtime/realtime_model.cjs.map +1 -1
  29. package/dist/realtime/realtime_model.d.cts +126 -160
  30. package/dist/realtime/realtime_model.d.ts +126 -160
  31. package/dist/realtime/realtime_model.d.ts.map +1 -1
  32. package/dist/realtime/realtime_model.js +1067 -825
  33. package/dist/realtime/realtime_model.js.map +1 -1
  34. package/dist/tts.cjs +5 -5
  35. package/dist/tts.cjs.map +1 -1
  36. package/dist/tts.d.cts +2 -1
  37. package/dist/tts.d.ts +2 -1
  38. package/dist/tts.d.ts.map +1 -1
  39. package/dist/tts.js +6 -6
  40. package/dist/tts.js.map +1 -1
  41. package/package.json +9 -7
  42. package/src/index.ts +19 -5
  43. package/src/llm.ts +227 -228
  44. package/src/models.ts +83 -5
  45. package/src/realtime/api_proto.ts +15 -1
  46. package/src/realtime/realtime_model.ts +1305 -996
  47. package/src/tts.ts +6 -6
package/src/llm.ts CHANGED
@@ -1,10 +1,16 @@
1
- // SPDX-FileCopyrightText: 2024 LiveKit, Inc.
1
+ // SPDX-FileCopyrightText: 2025 LiveKit, Inc.
2
2
  //
3
3
  // SPDX-License-Identifier: Apache-2.0
4
- import { llm, log } from '@livekit/agents';
5
- import { randomUUID } from 'node:crypto';
4
+ import type { APIConnectOptions } from '@livekit/agents';
5
+ import {
6
+ APIConnectionError,
7
+ APIStatusError,
8
+ APITimeoutError,
9
+ DEFAULT_API_CONNECT_OPTIONS,
10
+ llm,
11
+ toError,
12
+ } from '@livekit/agents';
6
13
  import { AzureOpenAI, OpenAI } from 'openai';
7
- import sharp from 'sharp';
8
14
  import type {
9
15
  CerebrasChatModels,
10
16
  ChatModels,
@@ -25,21 +31,29 @@ export interface LLMOptions {
25
31
  user?: string;
26
32
  temperature?: number;
27
33
  client?: OpenAI;
34
+ toolChoice?: llm.ToolChoice;
35
+ parallelToolCalls?: boolean;
36
+ metadata?: Record<string, string>;
37
+ maxCompletionTokens?: number;
38
+ serviceTier?: string;
39
+ store?: boolean;
28
40
  }
29
41
 
30
42
  const defaultLLMOptions: LLMOptions = {
31
- model: 'gpt-4o',
43
+ model: 'gpt-4.1',
32
44
  apiKey: process.env.OPENAI_API_KEY,
45
+ parallelToolCalls: true,
33
46
  };
34
47
 
35
48
  const defaultAzureLLMOptions: LLMOptions = {
36
- model: 'gpt-4o',
49
+ model: 'gpt-4.1',
37
50
  apiKey: process.env.AZURE_API_KEY,
38
51
  };
39
52
 
40
53
  export class LLM extends llm.LLM {
41
54
  #opts: LLMOptions;
42
55
  #client: OpenAI;
56
+ #providerFmt: llm.ProviderFormat;
43
57
 
44
58
  /**
45
59
  * Create a new instance of OpenAI LLM.
@@ -48,10 +62,14 @@ export class LLM extends llm.LLM {
48
62
  * `apiKey` must be set to your OpenAI API key, either using the argument or by setting the
49
63
  * `OPENAI_API_KEY` environmental variable.
50
64
  */
51
- constructor(opts: Partial<LLMOptions> = defaultLLMOptions) {
65
+ constructor(
66
+ opts: Partial<LLMOptions> = defaultLLMOptions,
67
+ providerFmt: llm.ProviderFormat = 'openai',
68
+ ) {
52
69
  super();
53
70
 
54
71
  this.#opts = { ...defaultLLMOptions, ...opts };
72
+ this.#providerFmt = providerFmt;
55
73
  if (this.#opts.apiKey === undefined) {
56
74
  throw new Error('OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY');
57
75
  }
@@ -64,6 +82,14 @@ export class LLM extends llm.LLM {
64
82
  });
65
83
  }
66
84
 
85
+ label(): string {
86
+ return 'openai.LLM';
87
+ }
88
+
89
+ get model(): string {
90
+ return this.#opts.model;
91
+ }
92
+
67
93
  /**
68
94
  * Create a new instance of OpenAI LLM with Azure.
69
95
  *
@@ -415,29 +441,65 @@ export class LLM extends llm.LLM {
415
441
 
416
442
  chat({
417
443
  chatCtx,
418
- fncCtx,
419
- temperature,
420
- n,
444
+ toolCtx,
445
+ connOptions = DEFAULT_API_CONNECT_OPTIONS,
421
446
  parallelToolCalls,
447
+ toolChoice,
448
+ extraKwargs,
422
449
  }: {
423
450
  chatCtx: llm.ChatContext;
424
- fncCtx?: llm.FunctionContext | undefined;
425
- temperature?: number | undefined;
426
- n?: number | undefined;
427
- parallelToolCalls?: boolean | undefined;
451
+ toolCtx?: llm.ToolContext;
452
+ connOptions?: APIConnectOptions;
453
+ parallelToolCalls?: boolean;
454
+ toolChoice?: llm.ToolChoice;
455
+ extraKwargs?: Record<string, any>;
428
456
  }): LLMStream {
429
- temperature = temperature || this.#opts.temperature;
457
+ const extras: Record<string, any> = { ...extraKwargs }; // eslint-disable-line @typescript-eslint/no-explicit-any
458
+
459
+ if (this.#opts.metadata) {
460
+ extras.metadata = this.#opts.metadata;
461
+ }
462
+
463
+ if (this.#opts.user) {
464
+ extras.user = this.#opts.user;
465
+ }
466
+
467
+ if (this.#opts.maxCompletionTokens) {
468
+ extras.max_completion_tokens = this.#opts.maxCompletionTokens;
469
+ }
470
+
471
+ if (this.#opts.temperature) {
472
+ extras.temperature = this.#opts.temperature;
473
+ }
474
+
475
+ if (this.#opts.serviceTier) {
476
+ extras.service_tier = this.#opts.serviceTier;
477
+ }
478
+
479
+ if (this.#opts.store !== undefined) {
480
+ extras.store = this.#opts.store;
481
+ }
482
+
483
+ parallelToolCalls =
484
+ parallelToolCalls !== undefined ? parallelToolCalls : this.#opts.parallelToolCalls;
485
+ if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== undefined) {
486
+ extras.parallel_tool_calls = parallelToolCalls;
487
+ }
430
488
 
431
- return new LLMStream(
432
- this,
433
- this.#client,
489
+ toolChoice = toolChoice !== undefined ? toolChoice : this.#opts.toolChoice;
490
+ if (toolChoice) {
491
+ extras.tool_choice = toolChoice;
492
+ }
493
+
494
+ return new LLMStream(this, {
495
+ model: this.#opts.model,
496
+ providerFmt: this.#providerFmt,
497
+ client: this.#client,
434
498
  chatCtx,
435
- fncCtx,
436
- this.#opts,
437
- parallelToolCalls,
438
- temperature,
439
- n,
440
- );
499
+ toolCtx,
500
+ connOptions,
501
+ extraKwargs: extras,
502
+ });
441
503
  }
442
504
  }
443
505
 
@@ -445,78 +507,113 @@ export class LLMStream extends llm.LLMStream {
445
507
  #toolCallId?: string;
446
508
  #fncName?: string;
447
509
  #fncRawArguments?: string;
510
+ #toolIndex?: number;
448
511
  #client: OpenAI;
449
- #logger = log();
450
- #id = randomUUID();
451
- label = 'openai.LLMStream';
512
+ #providerFmt: llm.ProviderFormat;
513
+ #extraKwargs: Record<string, any>;
514
+ private model: string | ChatModels;
452
515
 
453
516
  constructor(
454
517
  llm: LLM,
455
- client: OpenAI,
456
- chatCtx: llm.ChatContext,
457
- fncCtx: llm.FunctionContext | undefined,
458
- opts: LLMOptions,
459
- parallelToolCalls?: boolean,
460
- temperature?: number,
461
- n?: number,
518
+ {
519
+ model,
520
+ providerFmt,
521
+ client,
522
+ chatCtx,
523
+ toolCtx,
524
+ connOptions,
525
+ extraKwargs,
526
+ }: {
527
+ model: string | ChatModels;
528
+ providerFmt: llm.ProviderFormat;
529
+ client: OpenAI;
530
+ chatCtx: llm.ChatContext;
531
+ toolCtx?: llm.ToolContext;
532
+ connOptions: APIConnectOptions;
533
+ extraKwargs: Record<string, any>;
534
+ },
462
535
  ) {
463
- super(llm, chatCtx, fncCtx);
536
+ super(llm, { chatCtx, toolCtx, connOptions });
464
537
  this.#client = client;
465
- this.#run(opts, n, parallelToolCalls, temperature);
538
+ this.#providerFmt = providerFmt;
539
+ this.#extraKwargs = extraKwargs;
540
+ this.model = model;
466
541
  }
467
542
 
468
- async #run(opts: LLMOptions, n?: number, parallelToolCalls?: boolean, temperature?: number) {
469
- const tools = this.fncCtx
470
- ? Object.entries(this.fncCtx).map(([name, func]) => ({
471
- type: 'function' as const,
472
- function: {
473
- name,
474
- description: func.description,
475
- // don't format parameters if they are raw openai params
476
- parameters:
477
- func.parameters.type == ('object' as const)
478
- ? func.parameters
479
- : llm.oaiParams(func.parameters),
480
- },
481
- }))
482
- : undefined;
483
-
543
+ protected async run(): Promise<void> {
544
+ let retryable = true;
484
545
  try {
546
+ const messages = (await this.chatCtx.toProviderFormat(
547
+ this.#providerFmt,
548
+ )) as OpenAI.ChatCompletionMessageParam[];
549
+
550
+ const tools = this.toolCtx
551
+ ? Object.entries(this.toolCtx).map(([name, func]) => ({
552
+ type: 'function' as const,
553
+ function: {
554
+ name,
555
+ description: func.description,
556
+ parameters: llm.toJsonSchema(
557
+ func.parameters,
558
+ ) as unknown as OpenAI.Chat.Completions.ChatCompletionTool['function']['parameters'],
559
+ },
560
+ }))
561
+ : undefined;
562
+
485
563
  const stream = await this.#client.chat.completions.create({
486
- model: opts.model,
487
- user: opts.user,
488
- n,
489
- messages: await Promise.all(
490
- this.chatCtx.messages.map(async (m) => await buildMessage(m, this.#id)),
491
- ),
492
- temperature: temperature || opts.temperature,
493
- stream_options: { include_usage: true },
494
- stream: true,
564
+ model: this.model,
565
+ messages,
495
566
  tools,
496
- parallel_tool_calls: this.fncCtx && parallelToolCalls,
567
+ stream: true,
568
+ stream_options: { include_usage: true },
569
+ ...this.#extraKwargs,
497
570
  });
498
571
 
499
572
  for await (const chunk of stream) {
500
573
  for (const choice of chunk.choices) {
574
+ if (this.abortController.signal.aborted) {
575
+ break;
576
+ }
501
577
  const chatChunk = this.#parseChoice(chunk.id, choice);
502
578
  if (chatChunk) {
579
+ retryable = false;
503
580
  this.queue.put(chatChunk);
504
581
  }
582
+ }
505
583
 
506
- if (chunk.usage) {
507
- const usage = chunk.usage;
508
- this.queue.put({
509
- requestId: chunk.id,
510
- choices: [],
511
- usage: {
512
- completionTokens: usage.completion_tokens,
513
- promptTokens: usage.prompt_tokens,
514
- totalTokens: usage.total_tokens,
515
- },
516
- });
517
- }
584
+ if (chunk.usage) {
585
+ const usage = chunk.usage;
586
+ retryable = false;
587
+ this.queue.put({
588
+ id: chunk.id,
589
+ usage: {
590
+ completionTokens: usage.completion_tokens,
591
+ promptTokens: usage.prompt_tokens,
592
+ promptCachedTokens: usage.prompt_tokens_details?.cached_tokens || 0,
593
+ totalTokens: usage.total_tokens,
594
+ },
595
+ });
518
596
  }
519
597
  }
598
+ } catch (error) {
599
+ if (error instanceof OpenAI.APIConnectionTimeoutError) {
600
+ throw new APITimeoutError({ options: { retryable } });
601
+ } else if (error instanceof OpenAI.APIError) {
602
+ throw new APIStatusError({
603
+ message: error.message,
604
+ options: {
605
+ statusCode: error.status,
606
+ body: error.error,
607
+ requestId: error.request_id,
608
+ retryable,
609
+ },
610
+ });
611
+ } else {
612
+ throw new APIConnectionError({
613
+ message: toError(error).message,
614
+ options: { retryable },
615
+ });
616
+ }
520
617
  } finally {
521
618
  this.queue.close();
522
619
  }
@@ -525,6 +622,10 @@ export class LLMStream extends llm.LLMStream {
525
622
  #parseChoice(id: string, choice: OpenAI.ChatCompletionChunk.Choice): llm.ChatChunk | undefined {
526
623
  const delta = choice.delta;
527
624
 
625
+ // https://github.com/livekit/agents/issues/688
626
+ // the delta can be None when using Azure OpenAI (content filtering)
627
+ if (delta === undefined) return undefined;
628
+
528
629
  if (delta.tool_calls) {
529
630
  // check if we have functions to calls
530
631
  for (const tool of delta.tool_calls) {
@@ -532,17 +633,38 @@ export class LLMStream extends llm.LLMStream {
532
633
  continue; // oai may add other tools in the future
533
634
  }
534
635
 
636
+ /**
637
+ * The way OpenAI streams tool calls is a bit tricky.
638
+ *
639
+ * For any new tool call, it first emits a delta tool call with id, and function name,
640
+ * the rest of the delta chunks will only stream the remaining arguments string,
641
+ * until a new tool call is started or the tool call is finished.
642
+ * See below for an example.
643
+ *
644
+ * Choice(delta=ChoiceDelta(content=None, function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None)
645
+ * [ChoiceDeltaToolCall(index=0, id='call_LaVeHWUHpef9K1sd5UO8TtLg', function=ChoiceDeltaToolCallFunction(arguments='', name='get_weather'), type='function')]
646
+ * [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='{"location": "P', name=None), type=None)]
647
+ * [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='aris}', name=None), type=None)]
648
+ * [ChoiceDeltaToolCall(index=1, id='call_ThU4OmMdQXnnVmpXGOCknXIB', function=ChoiceDeltaToolCallFunction(arguments='', name='get_weather'), type='function')]
649
+ * [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments='{"location": "T', name=None), type=None)]
650
+ * [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments='okyo', name=None), type=None)]
651
+ * Choice(delta=ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=None), finish_reason='tool_calls', index=0, logprobs=None)
652
+ */
535
653
  let callChunk: llm.ChatChunk | undefined;
536
- if (this.#toolCallId && tool.id && tool.id !== this.#toolCallId) {
537
- callChunk = this.#tryBuildFunction(id, choice);
654
+ // If we have a previous tool call and this is a new one, emit the previous
655
+ if (this.#toolCallId && tool.id && tool.index !== this.#toolIndex) {
656
+ callChunk = this.#createRunningToolCallChunk(id, delta);
657
+ this.#toolCallId = this.#fncName = this.#fncRawArguments = undefined;
538
658
  }
539
659
 
660
+ // Start or continue building the current tool call
540
661
  if (tool.function.name) {
662
+ this.#toolIndex = tool.index;
541
663
  this.#toolCallId = tool.id;
542
664
  this.#fncName = tool.function.name;
543
665
  this.#fncRawArguments = tool.function.arguments || '';
544
666
  } else if (tool.function.arguments) {
545
- this.#fncRawArguments += tool.function.arguments;
667
+ this.#fncRawArguments = (this.#fncRawArguments || '') + tool.function.arguments;
546
668
  }
547
669
 
548
670
  if (callChunk) {
@@ -551,171 +673,48 @@ export class LLMStream extends llm.LLMStream {
551
673
  }
552
674
  }
553
675
 
676
+ // If we're done with tool calls, emit the final one
554
677
  if (
555
678
  choice.finish_reason &&
556
679
  ['tool_calls', 'stop'].includes(choice.finish_reason) &&
557
- this.#toolCallId
680
+ this.#toolCallId !== undefined
558
681
  ) {
559
- // we're done with the tool calls, run the last one
560
- return this.#tryBuildFunction(id, choice);
682
+ const callChunk = this.#createRunningToolCallChunk(id, delta);
683
+ this.#toolCallId = this.#fncName = this.#fncRawArguments = undefined;
684
+ return callChunk;
561
685
  }
562
686
 
563
- return {
564
- requestId: id,
565
- choices: [
566
- {
567
- delta: { content: delta.content || undefined, role: llm.ChatRole.ASSISTANT },
568
- index: choice.index,
569
- },
570
- ],
571
- };
572
- }
573
-
574
- #tryBuildFunction(
575
- id: string,
576
- choice: OpenAI.ChatCompletionChunk.Choice,
577
- ): llm.ChatChunk | undefined {
578
- if (!this.fncCtx) {
579
- this.#logger.warn('oai stream tried to run function without function context');
580
- return undefined;
581
- }
582
-
583
- if (!this.#toolCallId) {
584
- this.#logger.warn('oai stream tried to run function but toolCallId is not set');
585
- return undefined;
586
- }
587
-
588
- if (!this.#fncRawArguments || !this.#fncName) {
589
- this.#logger.warn('oai stream tried to run function but rawArguments or fncName are not set');
687
+ // Regular content message
688
+ if (!delta.content) {
590
689
  return undefined;
591
690
  }
592
691
 
593
- const functionInfo = llm.oaiBuildFunctionInfo(
594
- this.fncCtx,
595
- this.#toolCallId,
596
- this.#fncName,
597
- this.#fncRawArguments,
598
- );
599
- this.#toolCallId = this.#fncName = this.#fncRawArguments = undefined;
600
- this._functionCalls.push(functionInfo);
601
-
602
692
  return {
603
- requestId: id,
604
- choices: [
605
- {
606
- delta: {
607
- content: choice.delta.content || undefined,
608
- role: llm.ChatRole.ASSISTANT,
609
- toolCalls: this._functionCalls,
610
- },
611
- index: choice.index,
612
- },
613
- ],
614
- };
615
- }
616
- }
617
-
618
- const buildMessage = async (msg: llm.ChatMessage, cacheKey: any) => {
619
- const oaiMsg: Partial<OpenAI.ChatCompletionMessageParam> = {};
620
-
621
- switch (msg.role) {
622
- case llm.ChatRole.SYSTEM:
623
- oaiMsg.role = 'system';
624
- break;
625
- case llm.ChatRole.USER:
626
- oaiMsg.role = 'user';
627
- break;
628
- case llm.ChatRole.ASSISTANT:
629
- oaiMsg.role = 'assistant';
630
- break;
631
- case llm.ChatRole.TOOL:
632
- oaiMsg.role = 'tool';
633
- if (oaiMsg.role === 'tool') {
634
- oaiMsg.tool_call_id = msg.toolCallId;
635
- }
636
- break;
637
- }
638
-
639
- if (msg.role === llm.ChatRole.TOOL) {
640
- try {
641
- const serializedContent =
642
- typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content);
643
- oaiMsg.content = serializedContent;
644
- } catch (e) {
645
- throw Error(`Tool call output is not JSON serializable: ${e}`);
646
- }
647
- } else {
648
- if (typeof msg.content === 'string') {
649
- oaiMsg.content = msg.content;
650
- } else if (Array.isArray(msg.content)) {
651
- oaiMsg.content = (await Promise.all(
652
- msg.content.map(async (c) => {
653
- if (typeof c === 'string') {
654
- return { type: 'text', text: c };
655
- } else if (
656
- // typescript type guard for determining ChatAudio vs ChatImage
657
- ((c: llm.ChatAudio | llm.ChatImage): c is llm.ChatImage => {
658
- return (c as llm.ChatImage).image !== undefined;
659
- })(c)
660
- ) {
661
- return await buildImageContent(c, cacheKey);
662
- } else {
663
- throw new Error('ChatAudio is not supported');
664
- }
665
- }),
666
- )) as OpenAI.ChatCompletionContentPart[];
667
- } else if (msg.content === undefined) {
668
- oaiMsg.content = '';
669
- }
670
- }
671
-
672
- // make sure to provide when function has been called inside the context
673
- // (+ raw_arguments)
674
- if (msg.toolCalls && oaiMsg.role === 'assistant') {
675
- oaiMsg.tool_calls = Object.entries(msg.toolCalls).map(([name, func]) => ({
676
- id: func.toolCallId,
677
- type: 'function' as const,
678
- function: {
679
- name: name,
680
- arguments: func.rawParams,
681
- },
682
- }));
683
- }
684
-
685
- return oaiMsg as OpenAI.ChatCompletionMessageParam;
686
- };
687
-
688
- const buildImageContent = async (image: llm.ChatImage, cacheKey: any) => {
689
- if (typeof image.image === 'string') {
690
- // image url
691
- return {
692
- type: 'image_url',
693
- image_url: {
694
- url: image.image,
695
- detail: 'auto',
693
+ id,
694
+ delta: {
695
+ role: 'assistant',
696
+ content: delta.content,
696
697
  },
697
698
  };
698
- } else {
699
- if (!image.cache[cacheKey]) {
700
- // inside our internal implementation, we allow to put extra metadata to
701
- // each ChatImage (avoid to reencode each time we do a chatcompletion request)
702
- let encoded = sharp(image.image.data);
703
-
704
- if (image.inferenceHeight && image.inferenceHeight) {
705
- encoded = encoded.resize(image.inferenceWidth, image.inferenceHeight);
706
- }
707
-
708
- image.cache[cacheKey] = await encoded
709
- .jpeg()
710
- .toBuffer()
711
- .then((buffer) => buffer.toString('utf-8'));
712
- }
699
+ }
713
700
 
701
+ #createRunningToolCallChunk(
702
+ id: string,
703
+ delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta,
704
+ ): llm.ChatChunk {
714
705
  return {
715
- type: 'image_url',
716
- image_url: {
717
- url: `data:image/jpeg;base64,${image.cache[cacheKey]}`,
706
+ id,
707
+ delta: {
708
+ role: 'assistant',
709
+ content: delta.content || undefined,
710
+ toolCalls: [
711
+ llm.FunctionCall.create({
712
+ callId: this.#toolCallId!,
713
+ name: this.#fncName || '',
714
+ args: this.#fncRawArguments || '',
715
+ }),
716
+ ],
718
717
  },
719
718
  };
720
719
  }
721
- };
720
+ }