@livekit/agents 1.0.34 → 1.0.36-dev.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (187) hide show
  1. package/dist/cli.cjs.map +1 -1
  2. package/dist/index.cjs +3 -1
  3. package/dist/index.cjs.map +1 -1
  4. package/dist/index.d.cts +1 -0
  5. package/dist/index.d.ts +1 -0
  6. package/dist/index.d.ts.map +1 -1
  7. package/dist/index.js +1 -0
  8. package/dist/index.js.map +1 -1
  9. package/dist/inference/api_protos.d.cts +4 -4
  10. package/dist/inference/api_protos.d.ts +4 -4
  11. package/dist/inference/interruption/AdaptiveInterruptionDetector.cjs +152 -0
  12. package/dist/inference/interruption/AdaptiveInterruptionDetector.cjs.map +1 -0
  13. package/dist/inference/interruption/AdaptiveInterruptionDetector.d.cts +50 -0
  14. package/dist/inference/interruption/AdaptiveInterruptionDetector.d.ts +50 -0
  15. package/dist/inference/interruption/AdaptiveInterruptionDetector.d.ts.map +1 -0
  16. package/dist/inference/interruption/AdaptiveInterruptionDetector.js +125 -0
  17. package/dist/inference/interruption/AdaptiveInterruptionDetector.js.map +1 -0
  18. package/dist/inference/interruption/InterruptionStream.cjs +310 -0
  19. package/dist/inference/interruption/InterruptionStream.cjs.map +1 -0
  20. package/dist/inference/interruption/InterruptionStream.d.cts +57 -0
  21. package/dist/inference/interruption/InterruptionStream.d.ts +57 -0
  22. package/dist/inference/interruption/InterruptionStream.d.ts.map +1 -0
  23. package/dist/inference/interruption/InterruptionStream.js +288 -0
  24. package/dist/inference/interruption/InterruptionStream.js.map +1 -0
  25. package/dist/inference/interruption/defaults.cjs +76 -0
  26. package/dist/inference/interruption/defaults.cjs.map +1 -0
  27. package/dist/inference/interruption/defaults.d.cts +14 -0
  28. package/dist/inference/interruption/defaults.d.ts +14 -0
  29. package/dist/inference/interruption/defaults.d.ts.map +1 -0
  30. package/dist/inference/interruption/defaults.js +42 -0
  31. package/dist/inference/interruption/defaults.js.map +1 -0
  32. package/dist/inference/interruption/errors.cjs +2 -0
  33. package/dist/inference/interruption/errors.cjs.map +1 -0
  34. package/dist/inference/interruption/errors.d.cts +2 -0
  35. package/dist/inference/interruption/errors.d.ts +2 -0
  36. package/dist/inference/interruption/errors.d.ts.map +1 -0
  37. package/dist/inference/interruption/errors.js +1 -0
  38. package/dist/inference/interruption/errors.js.map +1 -0
  39. package/dist/inference/interruption/http_transport.cjs +57 -0
  40. package/dist/inference/interruption/http_transport.cjs.map +1 -0
  41. package/dist/inference/interruption/http_transport.d.cts +23 -0
  42. package/dist/inference/interruption/http_transport.d.ts +23 -0
  43. package/dist/inference/interruption/http_transport.d.ts.map +1 -0
  44. package/dist/inference/interruption/http_transport.js +33 -0
  45. package/dist/inference/interruption/http_transport.js.map +1 -0
  46. package/dist/inference/interruption/index.cjs +34 -0
  47. package/dist/inference/interruption/index.cjs.map +1 -0
  48. package/dist/inference/interruption/index.d.cts +5 -0
  49. package/dist/inference/interruption/index.d.ts +5 -0
  50. package/dist/inference/interruption/index.d.ts.map +1 -0
  51. package/dist/inference/interruption/index.js +7 -0
  52. package/dist/inference/interruption/index.js.map +1 -0
  53. package/dist/inference/interruption/interruption.cjs +85 -0
  54. package/dist/inference/interruption/interruption.cjs.map +1 -0
  55. package/dist/inference/interruption/interruption.d.cts +48 -0
  56. package/dist/inference/interruption/interruption.d.ts +48 -0
  57. package/dist/inference/interruption/interruption.d.ts.map +1 -0
  58. package/dist/inference/interruption/interruption.js +59 -0
  59. package/dist/inference/interruption/interruption.js.map +1 -0
  60. package/dist/inference/llm.cjs +30 -3
  61. package/dist/inference/llm.cjs.map +1 -1
  62. package/dist/inference/llm.d.cts +3 -1
  63. package/dist/inference/llm.d.ts +3 -1
  64. package/dist/inference/llm.d.ts.map +1 -1
  65. package/dist/inference/llm.js +30 -3
  66. package/dist/inference/llm.js.map +1 -1
  67. package/dist/inference/utils.cjs +15 -2
  68. package/dist/inference/utils.cjs.map +1 -1
  69. package/dist/inference/utils.d.cts +1 -0
  70. package/dist/inference/utils.d.ts +1 -0
  71. package/dist/inference/utils.d.ts.map +1 -1
  72. package/dist/inference/utils.js +13 -1
  73. package/dist/inference/utils.js.map +1 -1
  74. package/dist/inference/utils.test.cjs +20 -0
  75. package/dist/inference/utils.test.cjs.map +1 -0
  76. package/dist/inference/utils.test.js +19 -0
  77. package/dist/inference/utils.test.js.map +1 -0
  78. package/dist/ipc/inference_proc_executor.cjs.map +1 -1
  79. package/dist/ipc/job_proc_executor.cjs.map +1 -1
  80. package/dist/ipc/job_proc_lazy_main.cjs +1 -1
  81. package/dist/ipc/job_proc_lazy_main.cjs.map +1 -1
  82. package/dist/ipc/job_proc_lazy_main.js +1 -1
  83. package/dist/ipc/job_proc_lazy_main.js.map +1 -1
  84. package/dist/llm/chat_context.cjs +20 -2
  85. package/dist/llm/chat_context.cjs.map +1 -1
  86. package/dist/llm/chat_context.d.cts +9 -0
  87. package/dist/llm/chat_context.d.ts +9 -0
  88. package/dist/llm/chat_context.d.ts.map +1 -1
  89. package/dist/llm/chat_context.js +20 -2
  90. package/dist/llm/chat_context.js.map +1 -1
  91. package/dist/llm/llm.cjs.map +1 -1
  92. package/dist/llm/llm.d.cts +1 -0
  93. package/dist/llm/llm.d.ts +1 -0
  94. package/dist/llm/llm.d.ts.map +1 -1
  95. package/dist/llm/llm.js.map +1 -1
  96. package/dist/llm/provider_format/openai.cjs +43 -20
  97. package/dist/llm/provider_format/openai.cjs.map +1 -1
  98. package/dist/llm/provider_format/openai.d.ts.map +1 -1
  99. package/dist/llm/provider_format/openai.js +43 -20
  100. package/dist/llm/provider_format/openai.js.map +1 -1
  101. package/dist/llm/provider_format/openai.test.cjs +35 -0
  102. package/dist/llm/provider_format/openai.test.cjs.map +1 -1
  103. package/dist/llm/provider_format/openai.test.js +35 -0
  104. package/dist/llm/provider_format/openai.test.js.map +1 -1
  105. package/dist/llm/provider_format/utils.cjs +1 -1
  106. package/dist/llm/provider_format/utils.cjs.map +1 -1
  107. package/dist/llm/provider_format/utils.d.ts.map +1 -1
  108. package/dist/llm/provider_format/utils.js +1 -1
  109. package/dist/llm/provider_format/utils.js.map +1 -1
  110. package/dist/stream/stream_channel.cjs +3 -0
  111. package/dist/stream/stream_channel.cjs.map +1 -1
  112. package/dist/stream/stream_channel.d.cts +3 -2
  113. package/dist/stream/stream_channel.d.ts +3 -2
  114. package/dist/stream/stream_channel.d.ts.map +1 -1
  115. package/dist/stream/stream_channel.js +3 -0
  116. package/dist/stream/stream_channel.js.map +1 -1
  117. package/dist/telemetry/trace_types.cjs +15 -0
  118. package/dist/telemetry/trace_types.cjs.map +1 -1
  119. package/dist/telemetry/trace_types.d.cts +5 -0
  120. package/dist/telemetry/trace_types.d.ts +5 -0
  121. package/dist/telemetry/trace_types.d.ts.map +1 -1
  122. package/dist/telemetry/trace_types.js +10 -0
  123. package/dist/telemetry/trace_types.js.map +1 -1
  124. package/dist/utils/ws_transport.cjs +51 -0
  125. package/dist/utils/ws_transport.cjs.map +1 -0
  126. package/dist/utils/ws_transport.d.cts +9 -0
  127. package/dist/utils/ws_transport.d.ts +9 -0
  128. package/dist/utils/ws_transport.d.ts.map +1 -0
  129. package/dist/utils/ws_transport.js +17 -0
  130. package/dist/utils/ws_transport.js.map +1 -0
  131. package/dist/utils/ws_transport.test.cjs +212 -0
  132. package/dist/utils/ws_transport.test.cjs.map +1 -0
  133. package/dist/utils/ws_transport.test.js +211 -0
  134. package/dist/utils/ws_transport.test.js.map +1 -0
  135. package/dist/voice/agent_activity.cjs +49 -0
  136. package/dist/voice/agent_activity.cjs.map +1 -1
  137. package/dist/voice/agent_activity.d.cts +14 -0
  138. package/dist/voice/agent_activity.d.ts +14 -0
  139. package/dist/voice/agent_activity.d.ts.map +1 -1
  140. package/dist/voice/agent_activity.js +49 -0
  141. package/dist/voice/agent_activity.js.map +1 -1
  142. package/dist/voice/agent_session.cjs +12 -1
  143. package/dist/voice/agent_session.cjs.map +1 -1
  144. package/dist/voice/agent_session.d.cts +3 -0
  145. package/dist/voice/agent_session.d.ts +3 -0
  146. package/dist/voice/agent_session.d.ts.map +1 -1
  147. package/dist/voice/agent_session.js +12 -1
  148. package/dist/voice/agent_session.js.map +1 -1
  149. package/dist/voice/audio_recognition.cjs +124 -2
  150. package/dist/voice/audio_recognition.cjs.map +1 -1
  151. package/dist/voice/audio_recognition.d.cts +32 -1
  152. package/dist/voice/audio_recognition.d.ts +32 -1
  153. package/dist/voice/audio_recognition.d.ts.map +1 -1
  154. package/dist/voice/audio_recognition.js +127 -2
  155. package/dist/voice/audio_recognition.js.map +1 -1
  156. package/dist/voice/background_audio.cjs.map +1 -1
  157. package/dist/voice/generation.cjs +2 -1
  158. package/dist/voice/generation.cjs.map +1 -1
  159. package/dist/voice/generation.d.ts.map +1 -1
  160. package/dist/voice/generation.js +2 -1
  161. package/dist/voice/generation.js.map +1 -1
  162. package/package.json +2 -1
  163. package/src/index.ts +2 -0
  164. package/src/inference/interruption/AdaptiveInterruptionDetector.ts +166 -0
  165. package/src/inference/interruption/InterruptionStream.ts +397 -0
  166. package/src/inference/interruption/defaults.ts +33 -0
  167. package/src/inference/interruption/errors.ts +0 -0
  168. package/src/inference/interruption/http_transport.ts +61 -0
  169. package/src/inference/interruption/index.ts +4 -0
  170. package/src/inference/interruption/interruption.ts +88 -0
  171. package/src/inference/llm.ts +42 -3
  172. package/src/inference/utils.test.ts +31 -0
  173. package/src/inference/utils.ts +15 -0
  174. package/src/ipc/job_proc_lazy_main.ts +1 -1
  175. package/src/llm/chat_context.ts +32 -2
  176. package/src/llm/llm.ts +1 -0
  177. package/src/llm/provider_format/openai.test.ts +40 -0
  178. package/src/llm/provider_format/openai.ts +46 -19
  179. package/src/llm/provider_format/utils.ts +5 -1
  180. package/src/stream/stream_channel.ts +6 -2
  181. package/src/telemetry/trace_types.ts +7 -0
  182. package/src/utils/ws_transport.test.ts +282 -0
  183. package/src/utils/ws_transport.ts +22 -0
  184. package/src/voice/agent_activity.ts +61 -0
  185. package/src/voice/agent_session.ts +22 -2
  186. package/src/voice/audio_recognition.ts +161 -1
  187. package/src/voice/generation.ts +1 -0
@@ -0,0 +1,88 @@
1
+ import { slidingWindowMinMax } from '../utils.js';
2
+ import { MIN_INTERRUPTION_DURATION } from './defaults.js';
3
+
4
+ export enum InterruptionEventType {
5
+ INTERRUPTION = 'interruption',
6
+ OVERLAP_SPEECH_ENDED = 'overlap_speech_ended',
7
+ }
8
+ export interface InterruptionEvent {
9
+ type: InterruptionEventType;
10
+ timestamp: number;
11
+ isInterruption: boolean;
12
+ totalDuration: number;
13
+ predictionDuration: number;
14
+ detectionDelay: number;
15
+ overlapSpeechStartedAt?: number;
16
+ speechInput?: Int16Array;
17
+ probabilities?: Float32Array;
18
+ probability: number;
19
+ }
20
+
21
+ export class InterruptionDetectionError extends Error {
22
+ readonly type = 'InterruptionDetectionError';
23
+
24
+ readonly timestamp: number;
25
+ readonly label: string;
26
+ readonly recoverable: boolean;
27
+
28
+ constructor(message: string, timestamp: number, label: string, recoverable: boolean) {
29
+ super(message);
30
+ this.name = 'InterruptionDetectionError';
31
+ this.timestamp = timestamp;
32
+ this.label = label;
33
+ this.recoverable = recoverable;
34
+ }
35
+
36
+ toString(): string {
37
+ return `${this.name}: ${this.message} (label=${this.label}, timestamp=${this.timestamp}, recoverable=${this.recoverable})`;
38
+ }
39
+ }
40
+
41
+ function estimateProbability(
42
+ probabilities: Float32Array,
43
+ windowSize: number = MIN_INTERRUPTION_DURATION,
44
+ ): number {
45
+ const minWindow = Math.ceil(windowSize / 0.025); // 25ms per frame
46
+ if (probabilities.length < minWindow) {
47
+ return 0;
48
+ }
49
+
50
+ return slidingWindowMinMax(probabilities, windowSize);
51
+ }
52
+
53
+ /**
54
+ * Typed cache entry for interruption inference results.
55
+ */
56
+ export class InterruptionCacheEntry {
57
+ readonly createdAt: number;
58
+ readonly totalDuration: number;
59
+ readonly predictionDuration: number;
60
+ readonly detectionDelay: number;
61
+ readonly speechInput?: Int16Array;
62
+ readonly probabilities?: Float32Array;
63
+ readonly isInterruption?: boolean;
64
+ readonly probability: number;
65
+
66
+ constructor(params: {
67
+ createdAt: number;
68
+ speechInput?: Int16Array;
69
+ totalDuration?: number;
70
+ predictionDuration?: number;
71
+ detectionDelay?: number;
72
+ probabilities?: Float32Array;
73
+ isInterruption?: boolean;
74
+ }) {
75
+ this.createdAt = params.createdAt;
76
+ this.totalDuration = params.totalDuration ?? 0;
77
+ this.predictionDuration = params.predictionDuration ?? 0;
78
+ this.detectionDelay = params.detectionDelay ?? 0;
79
+ this.speechInput = params.speechInput;
80
+ this.probabilities = params.probabilities;
81
+ this.isInterruption = params.isInterruption;
82
+ this.probability = this.probabilities ? estimateProbability(this.probabilities) : 0;
83
+ }
84
+
85
+ static default(): InterruptionCacheEntry {
86
+ return new InterruptionCacheEntry({ createdAt: 0 });
87
+ }
88
+ }
@@ -27,7 +27,14 @@ export type OpenAIModels =
27
27
  | 'openai/gpt-4o-mini'
28
28
  | 'openai/gpt-oss-120b';
29
29
 
30
- export type GoogleModels = 'google/gemini-2.0-flash-lite';
30
+ export type GoogleModels =
31
+ | 'google/gemini-3-pro-preview'
32
+ | 'google/gemini-3-flash-preview'
33
+ | 'google/gemini-2.5-pro'
34
+ | 'google/gemini-2.5-flash'
35
+ | 'google/gemini-2.5-flash-lite'
36
+ | 'google/gemini-2.0-flash'
37
+ | 'google/gemini-2.0-flash-lite';
31
38
 
32
39
  export type QwenModels = 'qwen/qwen3-235b-a22b-instruct';
33
40
 
@@ -235,6 +242,7 @@ export class LLMStream extends llm.LLMStream {
235
242
  private toolIndex?: number;
236
243
  private fncName?: string;
237
244
  private fncRawArguments?: string;
245
+ private toolExtra?: Record<string, unknown>;
238
246
 
239
247
  constructor(
240
248
  llm: LLM,
@@ -277,6 +285,7 @@ export class LLMStream extends llm.LLMStream {
277
285
  // (defined inside the run method to make sure the state is reset for each run/attempt)
278
286
  let retryable = true;
279
287
  this.toolCallId = this.fncName = this.fncRawArguments = this.toolIndex = undefined;
288
+ this.toolExtra = undefined;
280
289
 
281
290
  try {
282
291
  const messages = (await this.chatCtx.toProviderFormat(
@@ -428,6 +437,7 @@ export class LLMStream extends llm.LLMStream {
428
437
  if (this.toolCallId && tool.id && tool.index !== this.toolIndex) {
429
438
  callChunk = this.createRunningToolCallChunk(id, delta);
430
439
  this.toolCallId = this.fncName = this.fncRawArguments = undefined;
440
+ this.toolExtra = undefined;
431
441
  }
432
442
 
433
443
  // Start or continue building the current tool call
@@ -436,6 +446,10 @@ export class LLMStream extends llm.LLMStream {
436
446
  this.toolCallId = tool.id;
437
447
  this.fncName = tool.function.name;
438
448
  this.fncRawArguments = tool.function.arguments || '';
449
+ // Extract extra from tool call (e.g., Google thought signatures)
450
+ this.toolExtra =
451
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
452
+ ((tool as any).extra_content as Record<string, unknown> | undefined) ?? undefined;
439
453
  } else if (tool.function.arguments) {
440
454
  this.fncRawArguments = (this.fncRawArguments || '') + tool.function.arguments;
441
455
  }
@@ -454,11 +468,17 @@ export class LLMStream extends llm.LLMStream {
454
468
  ) {
455
469
  const callChunk = this.createRunningToolCallChunk(id, delta);
456
470
  this.toolCallId = this.fncName = this.fncRawArguments = undefined;
471
+ this.toolExtra = undefined;
457
472
  return callChunk;
458
473
  }
459
474
 
475
+ // Extract extra from delta (e.g., Google thought signatures on text parts)
476
+ const deltaExtra =
477
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
478
+ ((delta as any).extra_content as Record<string, unknown> | undefined) ?? undefined;
479
+
460
480
  // Regular content message
461
- if (!delta.content) {
481
+ if (!delta.content && !deltaExtra) {
462
482
  return undefined;
463
483
  }
464
484
 
@@ -466,7 +486,8 @@ export class LLMStream extends llm.LLMStream {
466
486
  id,
467
487
  delta: {
468
488
  role: 'assistant',
469
- content: delta.content,
489
+ content: delta.content || undefined,
490
+ extra: deltaExtra,
470
491
  },
471
492
  };
472
493
  }
@@ -475,19 +496,37 @@ export class LLMStream extends llm.LLMStream {
475
496
  id: string,
476
497
  delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta,
477
498
  ): llm.ChatChunk {
499
+ const toolExtra = this.toolExtra ? { ...this.toolExtra } : {};
500
+ const thoughtSignature = this.extractThoughtSignature(toolExtra);
501
+ const deltaExtra =
502
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
503
+ ((delta as any).extra_content as Record<string, unknown> | undefined) ?? undefined;
504
+
478
505
  return {
479
506
  id,
480
507
  delta: {
481
508
  role: 'assistant',
482
509
  content: delta.content || undefined,
510
+ extra: deltaExtra,
483
511
  toolCalls: [
484
512
  llm.FunctionCall.create({
485
513
  callId: this.toolCallId || '',
486
514
  name: this.fncName || '',
487
515
  args: this.fncRawArguments || '',
516
+ extra: toolExtra,
517
+ thoughtSignature,
488
518
  }),
489
519
  ],
490
520
  },
491
521
  };
492
522
  }
523
+
524
+ private extractThoughtSignature(extra?: Record<string, unknown>): string | undefined {
525
+ const googleExtra = extra?.google;
526
+ if (googleExtra && typeof googleExtra === 'object') {
527
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
528
+ return (googleExtra as any).thoughtSignature || (googleExtra as any).thought_signature;
529
+ }
530
+ return undefined;
531
+ }
493
532
  }
@@ -0,0 +1,31 @@
1
+ // SPDX-FileCopyrightText: 2025 LiveKit, Inc.
2
+ //
3
+ // SPDX-License-Identifier: Apache-2.0
4
+ import { describe, expect, it } from 'vitest';
5
+ import { slidingWindowMinMax } from './utils.js';
6
+
7
+ describe('slidingWindowMinMax', () => {
8
+ it('returns -Infinity when array is shorter than window size', () => {
9
+ expect(slidingWindowMinMax([0.5, 0.6], 3)).toBe(-Infinity);
10
+ expect(slidingWindowMinMax([], 1)).toBe(-Infinity);
11
+ });
12
+
13
+ it('returns the max value when window size is 1', () => {
14
+ // With window size 1, min of each window is the element itself,
15
+ // so max of mins is just the max of the array
16
+ expect(slidingWindowMinMax([0.1, 0.5, 0.3, 0.8, 0.2], 1)).toBe(0.8);
17
+ });
18
+
19
+ it('finds the best sustained probability across windows', () => {
20
+ // Windows of size 3: [0.2, 0.8, 0.7], [0.8, 0.7, 0.3], [0.7, 0.3, 0.9]
21
+ // Mins: 0.2, 0.3, 0.3
22
+ // Max of mins: 0.3
23
+ expect(slidingWindowMinMax([0.2, 0.8, 0.7, 0.3, 0.9], 3)).toBe(0.3);
24
+ });
25
+
26
+ it('returns the single element when array length equals window size', () => {
27
+ // Only one window covering the entire array, return min of that window
28
+ expect(slidingWindowMinMax([0.5, 0.9, 0.7], 3)).toBe(0.5);
29
+ expect(slidingWindowMinMax([0.8], 1)).toBe(0.8);
30
+ });
31
+ });
@@ -64,3 +64,18 @@ export async function connectWs(
64
64
  socket.once('close', onClose);
65
65
  });
66
66
  }
67
+
68
+ export function slidingWindowMinMax(probabilities: Float32Array, minWindow: number): number {
69
+ if (probabilities.length < minWindow) {
70
+ return -Infinity;
71
+ }
72
+
73
+ let maxOfMins = -Infinity;
74
+
75
+ for (let i = 0; i <= probabilities.length - minWindow; i++) {
76
+ const windowMin = Math.min(...probabilities.slice(i, i + minWindow));
77
+ maxOfMins = Math.max(maxOfMins, windowMin);
78
+ }
79
+
80
+ return maxOfMins;
81
+ }
@@ -136,7 +136,7 @@ const startJob = (
136
136
  shutdownTasks.push(callback());
137
137
  }
138
138
  await Promise.all(shutdownTasks).catch((error) =>
139
- logger.error('error while shutting down the job', error),
139
+ logger.error({ error }, 'error while shutting down the job'),
140
140
  );
141
141
 
142
142
  process.send!({ case: 'done' });
@@ -189,6 +189,12 @@ export class FunctionCall {
189
189
 
190
190
  createdAt: number;
191
191
 
192
+ extra: Record<string, unknown>;
193
+ /**
194
+ * Optional grouping identifier for parallel tool calls.
195
+ */
196
+ groupId?: string;
197
+
192
198
  /**
193
199
  * Opaque signature for Gemini thinking mode.
194
200
  * When using Gemini 3+ models with thinking enabled, this signature must be
@@ -202,6 +208,8 @@ export class FunctionCall {
202
208
  args: string;
203
209
  id?: string;
204
210
  createdAt?: number;
211
+ extra?: Record<string, unknown>;
212
+ groupId?: string;
205
213
  thoughtSignature?: string;
206
214
  }) {
207
215
  const {
@@ -210,6 +218,8 @@ export class FunctionCall {
210
218
  args,
211
219
  id = shortuuid('item_'),
212
220
  createdAt = Date.now(),
221
+ extra = {},
222
+ groupId,
213
223
  thoughtSignature,
214
224
  } = params;
215
225
  this.id = id;
@@ -217,7 +227,15 @@ export class FunctionCall {
217
227
  this.args = args;
218
228
  this.name = name;
219
229
  this.createdAt = createdAt;
220
- this.thoughtSignature = thoughtSignature;
230
+ this.extra = { ...extra };
231
+ this.groupId = groupId;
232
+ this.thoughtSignature =
233
+ thoughtSignature ??
234
+ (typeof this.extra.google === 'object' && this.extra.google !== null
235
+ ? // eslint-disable-next-line @typescript-eslint/no-explicit-any
236
+ (this.extra.google as any).thoughtSignature ||
237
+ (this.extra.google as any).thought_signature
238
+ : undefined);
221
239
  }
222
240
 
223
241
  static create(params: {
@@ -226,6 +244,8 @@ export class FunctionCall {
226
244
  args: string;
227
245
  id?: string;
228
246
  createdAt?: number;
247
+ extra?: Record<string, unknown>;
248
+ groupId?: string;
229
249
  thoughtSignature?: string;
230
250
  }) {
231
251
  return new FunctionCall(params);
@@ -241,6 +261,14 @@ export class FunctionCall {
241
261
  args: this.args,
242
262
  };
243
263
 
264
+ if (Object.keys(this.extra).length > 0) {
265
+ result.extra = this.extra as JSONValue;
266
+ }
267
+
268
+ if (this.groupId) {
269
+ result.groupId = this.groupId;
270
+ }
271
+
244
272
  if (this.thoughtSignature) {
245
273
  result.thoughtSignature = this.thoughtSignature;
246
274
  }
@@ -627,7 +655,9 @@ export class ChatContext {
627
655
  a.name !== b.name ||
628
656
  a.callId !== b.callId ||
629
657
  a.args !== b.args ||
630
- a.thoughtSignature !== b.thoughtSignature
658
+ a.thoughtSignature !== b.thoughtSignature ||
659
+ a.groupId !== b.groupId ||
660
+ JSON.stringify(a.extra) !== JSON.stringify(b.extra)
631
661
  ) {
632
662
  return false;
633
663
  }
package/src/llm/llm.ts CHANGED
@@ -17,6 +17,7 @@ export interface ChoiceDelta {
17
17
  role: ChatRole;
18
18
  content?: string;
19
19
  toolCalls?: FunctionCall[];
20
+ extra?: Record<string, unknown>;
20
21
  }
21
22
 
22
23
  export interface CompletionUsage {
@@ -258,6 +258,46 @@ describe('toChatCtx', () => {
258
258
  ]);
259
259
  });
260
260
 
261
+ it('should include provider-specific extra content on tool calls', async () => {
262
+ const ctx = ChatContext.empty();
263
+ const msg = ctx.addMessage({ role: 'assistant', content: 'Running tool' });
264
+
265
+ const toolCall = FunctionCall.create({
266
+ id: `${msg.id}/tool_1`,
267
+ callId: 'call_789',
268
+ name: 'google_call',
269
+ args: '{}',
270
+ extra: { google: { thoughtSignature: 'sig-123' } },
271
+ });
272
+ const toolOutput = FunctionCallOutput.create({
273
+ callId: 'call_789',
274
+ output: '{"result": "ok"}',
275
+ isError: false,
276
+ });
277
+
278
+ ctx.insert([toolCall, toolOutput]);
279
+
280
+ const result = await toChatCtx(ctx);
281
+
282
+ expect(result[0]).toEqual({
283
+ role: 'assistant',
284
+ content: 'Running tool',
285
+ tool_calls: [
286
+ {
287
+ type: 'function',
288
+ id: 'call_789',
289
+ function: { name: 'google_call', arguments: '{}' },
290
+ extra_content: { google: { thoughtSignature: 'sig-123' } },
291
+ },
292
+ ],
293
+ });
294
+ expect(result[1]).toEqual({
295
+ role: 'tool',
296
+ tool_call_id: 'call_789',
297
+ content: '{"result": "ok"}',
298
+ });
299
+ });
300
+
261
301
  it('should handle multiple tool calls in one message', async () => {
262
302
  const ctx = ChatContext.empty();
263
303
 
@@ -17,11 +17,20 @@ export async function toChatCtx(chatCtx: ChatContext, injectDummyUserMessage: bo
17
17
  ? await toChatItem(group.message)
18
18
  : { role: 'assistant' };
19
19
 
20
- const toolCalls = group.toolCalls.map((toolCall) => ({
21
- type: 'function',
22
- id: toolCall.callId,
23
- function: { name: toolCall.name, arguments: toolCall.args },
24
- }));
20
+ const toolCalls = group.toolCalls.map((toolCall) => {
21
+ const tc: Record<string, any> = {
22
+ type: 'function',
23
+ id: toolCall.callId,
24
+ function: { name: toolCall.name, arguments: toolCall.args },
25
+ };
26
+
27
+ // Include provider-specific extra content (e.g., Google thought signatures)
28
+ const googleExtra = getGoogleExtra(toolCall);
29
+ if (googleExtra) {
30
+ tc.extra_content = { google: googleExtra };
31
+ }
32
+ return tc;
33
+ });
25
34
 
26
35
  if (toolCalls.length > 0) {
27
36
  message['tool_calls'] = toolCalls;
@@ -53,24 +62,33 @@ async function toChatItem(item: ChatItem) {
53
62
  }
54
63
  }
55
64
 
56
- const content =
57
- listContent.length == 0
58
- ? textContent
59
- : textContent.length == 0
60
- ? listContent
61
- : [...listContent, { type: 'text', text: textContent }];
65
+ const result: Record<string, any> = { role: item.role };
66
+ if (listContent.length === 0) {
67
+ result.content = textContent;
68
+ } else {
69
+ if (textContent.length > 0) {
70
+ listContent.push({ type: 'text', text: textContent });
71
+ }
72
+ result.content = listContent;
73
+ }
62
74
 
63
- return { role: item.role, content };
75
+ return result;
64
76
  } else if (item.type === 'function_call') {
77
+ const tc: Record<string, any> = {
78
+ id: item.callId,
79
+ type: 'function',
80
+ function: { name: item.name, arguments: item.args },
81
+ };
82
+
83
+ // Include provider-specific extra content (e.g., Google thought signatures)
84
+ const googleExtra = getGoogleExtra(item);
85
+ if (googleExtra) {
86
+ tc.extra_content = { google: googleExtra };
87
+ }
88
+
65
89
  return {
66
90
  role: 'assistant',
67
- tool_calls: [
68
- {
69
- id: item.callId,
70
- type: 'function',
71
- function: { name: item.name, arguments: item.args },
72
- },
73
- ],
91
+ tool_calls: [tc],
74
92
  };
75
93
  } else if (item.type === 'function_call_output') {
76
94
  return {
@@ -84,6 +102,15 @@ async function toChatItem(item: ChatItem) {
84
102
  throw new Error(`Unsupported item type: ${item['type']}`);
85
103
  }
86
104
 
105
+ function getGoogleExtra(
106
+ item: Partial<{ extra?: Record<string, unknown>; thoughtSignature?: string }>,
107
+ ): Record<string, unknown> | undefined {
108
+ const googleExtra =
109
+ (item.extra?.google as Record<string, unknown> | undefined) ||
110
+ (item.thoughtSignature ? { thoughtSignature: item.thoughtSignature } : undefined);
111
+ return googleExtra;
112
+ }
113
+
87
114
  async function toImageContent(content: ImageContent) {
88
115
  const cacheKey = 'serialized_image'; // TODO: use hash of encoding options if available
89
116
  let serialized: SerializedImage;
@@ -133,7 +133,11 @@ export function groupToolCalls(chatCtx: ChatContext) {
133
133
 
134
134
  if (isAssistantMessage || isFunctionCall) {
135
135
  // only assistant messages and function calls can be grouped
136
- const groupId = item.id.split('/')[0]!;
136
+ // For function calls, use group_id if available (for parallel function calls),
137
+ // otherwise fall back to id-based grouping for backwards compatibility
138
+ const groupId =
139
+ item.type === 'function_call' && item.groupId ? item.groupId : item.id.split('/')[0]!;
140
+
137
141
  if (itemGroups[groupId] === undefined) {
138
142
  itemGroups[groupId] = ChatItemGroup.create();
139
143
 
@@ -4,14 +4,15 @@
4
4
  import type { ReadableStream } from 'node:stream/web';
5
5
  import { IdentityTransform } from './identity_transform.js';
6
6
 
7
- export interface StreamChannel<T> {
7
+ export interface StreamChannel<T, E extends Error = Error> {
8
8
  write(chunk: T): Promise<void>;
9
9
  close(): Promise<void>;
10
10
  stream(): ReadableStream<T>;
11
+ abort(error: E): Promise<void>;
11
12
  readonly closed: boolean;
12
13
  }
13
14
 
14
- export function createStreamChannel<T>(): StreamChannel<T> {
15
+ export function createStreamChannel<T, E extends Error = Error>(): StreamChannel<T, E> {
15
16
  const transform = new IdentityTransform<T>();
16
17
  const writer = transform.writable.getWriter();
17
18
  let isClosed = false;
@@ -19,6 +20,9 @@ export function createStreamChannel<T>(): StreamChannel<T> {
19
20
  return {
20
21
  write: (chunk: T) => writer.write(chunk),
21
22
  stream: () => transform.readable,
23
+ abort: (error: E) => {
24
+ return writer.abort(error);
25
+ },
22
26
  close: async () => {
23
27
  try {
24
28
  const result = await writer.close();
@@ -51,6 +51,13 @@ export const ATTR_TRANSCRIPT_CONFIDENCE = 'lk.transcript_confidence';
51
51
  export const ATTR_TRANSCRIPTION_DELAY = 'lk.transcription_delay';
52
52
  export const ATTR_END_OF_TURN_DELAY = 'lk.end_of_turn_delay';
53
53
 
54
+ // Adaptive Interruption attributes
55
+ export const ATTR_IS_INTERRUPTION = 'lk.is_interruption';
56
+ export const ATTR_INTERRUPTION_PROBABILITY = 'lk.interruption.probability';
57
+ export const ATTR_INTERRUPTION_TOTAL_DURATION = 'lk.interruption.total_duration';
58
+ export const ATTR_INTERRUPTION_PREDICTION_DURATION = 'lk.interruption.prediction_duration';
59
+ export const ATTR_INTERRUPTION_DETECTION_DELAY = 'lk.interruption.detection_delay';
60
+
54
61
  // metrics
55
62
  export const ATTR_LLM_METRICS = 'lk.llm_metrics';
56
63
  export const ATTR_TTS_METRICS = 'lk.tts_metrics';