@providerprotocol/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +84 -0
  3. package/dist/anthropic/index.d.ts +41 -0
  4. package/dist/anthropic/index.js +500 -0
  5. package/dist/anthropic/index.js.map +1 -0
  6. package/dist/chunk-CUCRF5W6.js +136 -0
  7. package/dist/chunk-CUCRF5W6.js.map +1 -0
  8. package/dist/chunk-FTFX2VET.js +424 -0
  9. package/dist/chunk-FTFX2VET.js.map +1 -0
  10. package/dist/chunk-QUUX4G7U.js +117 -0
  11. package/dist/chunk-QUUX4G7U.js.map +1 -0
  12. package/dist/chunk-Y6Q7JCNP.js +39 -0
  13. package/dist/chunk-Y6Q7JCNP.js.map +1 -0
  14. package/dist/google/index.d.ts +69 -0
  15. package/dist/google/index.js +517 -0
  16. package/dist/google/index.js.map +1 -0
  17. package/dist/http/index.d.ts +61 -0
  18. package/dist/http/index.js +43 -0
  19. package/dist/http/index.js.map +1 -0
  20. package/dist/index.d.ts +792 -0
  21. package/dist/index.js +898 -0
  22. package/dist/index.js.map +1 -0
  23. package/dist/openai/index.d.ts +204 -0
  24. package/dist/openai/index.js +1340 -0
  25. package/dist/openai/index.js.map +1 -0
  26. package/dist/provider-CUJWjgNl.d.ts +192 -0
  27. package/dist/retry-I2661_rv.d.ts +118 -0
  28. package/package.json +88 -0
  29. package/src/anthropic/index.ts +3 -0
  30. package/src/core/image.ts +188 -0
  31. package/src/core/llm.ts +619 -0
  32. package/src/core/provider.ts +92 -0
  33. package/src/google/index.ts +3 -0
  34. package/src/http/errors.ts +112 -0
  35. package/src/http/fetch.ts +210 -0
  36. package/src/http/index.ts +31 -0
  37. package/src/http/keys.ts +136 -0
  38. package/src/http/retry.ts +205 -0
  39. package/src/http/sse.ts +136 -0
  40. package/src/index.ts +32 -0
  41. package/src/openai/index.ts +9 -0
  42. package/src/providers/anthropic/index.ts +17 -0
  43. package/src/providers/anthropic/llm.ts +196 -0
  44. package/src/providers/anthropic/transform.ts +452 -0
  45. package/src/providers/anthropic/types.ts +213 -0
  46. package/src/providers/google/index.ts +17 -0
  47. package/src/providers/google/llm.ts +203 -0
  48. package/src/providers/google/transform.ts +487 -0
  49. package/src/providers/google/types.ts +214 -0
  50. package/src/providers/openai/index.ts +151 -0
  51. package/src/providers/openai/llm.completions.ts +201 -0
  52. package/src/providers/openai/llm.responses.ts +211 -0
  53. package/src/providers/openai/transform.completions.ts +628 -0
  54. package/src/providers/openai/transform.responses.ts +718 -0
  55. package/src/providers/openai/types.ts +711 -0
  56. package/src/types/content.ts +133 -0
  57. package/src/types/errors.ts +85 -0
  58. package/src/types/index.ts +105 -0
  59. package/src/types/llm.ts +211 -0
  60. package/src/types/messages.ts +182 -0
  61. package/src/types/provider.ts +195 -0
  62. package/src/types/schema.ts +58 -0
  63. package/src/types/stream.ts +146 -0
  64. package/src/types/thread.ts +226 -0
  65. package/src/types/tool.ts +88 -0
  66. package/src/types/turn.ts +118 -0
  67. package/src/utils/id.ts +28 -0
@@ -0,0 +1,203 @@
1
+ import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
2
+ import type { StreamEvent } from '../../types/stream.ts';
3
+ import type { LLMProvider } from '../../types/provider.ts';
4
+ import { UPPError } from '../../types/errors.ts';
5
+ import { resolveApiKey } from '../../http/keys.ts';
6
+ import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
+ import { parseSSEStream } from '../../http/sse.ts';
8
+ import { normalizeHttpError } from '../../http/errors.ts';
9
+ import type { GoogleLLMParams, GoogleResponse, GoogleStreamChunk } from './types.ts';
10
+ import {
11
+ transformRequest,
12
+ transformResponse,
13
+ transformStreamChunk,
14
+ createStreamState,
15
+ buildResponseFromState,
16
+ } from './transform.ts';
17
+
18
+ const GOOGLE_API_BASE = 'https://generativelanguage.googleapis.com/v1beta';
19
+
20
+ /**
21
+ * Google API capabilities
22
+ */
23
+ const GOOGLE_CAPABILITIES: LLMCapabilities = {
24
+ streaming: true,
25
+ tools: true,
26
+ structuredOutput: true,
27
+ imageInput: true,
28
+ videoInput: true,
29
+ audioInput: true,
30
+ };
31
+
32
+ /**
33
+ * Build Google API URL for a model
34
+ */
35
+ function buildUrl(modelId: string, action: 'generateContent' | 'streamGenerateContent', apiKey: string): string {
36
+ const base = `${GOOGLE_API_BASE}/models/${modelId}:${action}`;
37
+ return `${base}?key=${apiKey}`;
38
+ }
39
+
40
+ /**
41
+ * Create Google LLM handler
42
+ */
43
+ export function createLLMHandler(): LLMHandler<GoogleLLMParams> {
44
+ // Provider reference injected by createProvider() after construction
45
+ let providerRef: LLMProvider<GoogleLLMParams> | null = null;
46
+
47
+ return {
48
+ _setProvider(provider: LLMProvider<GoogleLLMParams>) {
49
+ providerRef = provider;
50
+ },
51
+
52
+ bind(modelId: string): BoundLLMModel<GoogleLLMParams> {
53
+ // Use the injected provider reference (set by createProvider)
54
+ if (!providerRef) {
55
+ throw new UPPError(
56
+ 'Provider reference not set. Handler must be used with createProvider().',
57
+ 'INVALID_REQUEST',
58
+ 'google',
59
+ 'llm'
60
+ );
61
+ }
62
+
63
+ const model: BoundLLMModel<GoogleLLMParams> = {
64
+ modelId,
65
+ capabilities: GOOGLE_CAPABILITIES,
66
+
67
+ get provider(): LLMProvider<GoogleLLMParams> {
68
+ return providerRef!;
69
+ },
70
+
71
+ async complete(request: LLMRequest<GoogleLLMParams>): Promise<LLMResponse> {
72
+ const apiKey = await resolveApiKey(
73
+ request.config,
74
+ 'GOOGLE_API_KEY',
75
+ 'google',
76
+ 'llm'
77
+ );
78
+
79
+ const url = request.config.baseUrl
80
+ ? `${request.config.baseUrl}/models/${modelId}:generateContent?key=${apiKey}`
81
+ : buildUrl(modelId, 'generateContent', apiKey);
82
+
83
+ const body = transformRequest(request, modelId);
84
+
85
+ const response = await doFetch(
86
+ url,
87
+ {
88
+ method: 'POST',
89
+ headers: {
90
+ 'Content-Type': 'application/json',
91
+ },
92
+ body: JSON.stringify(body),
93
+ signal: request.signal,
94
+ },
95
+ request.config,
96
+ 'google',
97
+ 'llm'
98
+ );
99
+
100
+ const data = (await response.json()) as GoogleResponse;
101
+ return transformResponse(data);
102
+ },
103
+
104
+ stream(request: LLMRequest<GoogleLLMParams>): LLMStreamResult {
105
+ const state = createStreamState();
106
+ let responseResolve: (value: LLMResponse) => void;
107
+ let responseReject: (error: Error) => void;
108
+
109
+ const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
110
+ responseResolve = resolve;
111
+ responseReject = reject;
112
+ });
113
+
114
+ async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
115
+ try {
116
+ const apiKey = await resolveApiKey(
117
+ request.config,
118
+ 'GOOGLE_API_KEY',
119
+ 'google',
120
+ 'llm'
121
+ );
122
+
123
+ const url = request.config.baseUrl
124
+ ? `${request.config.baseUrl}/models/${modelId}:streamGenerateContent?alt=sse&key=${apiKey}`
125
+ : `${buildUrl(modelId, 'streamGenerateContent', apiKey)}&alt=sse`;
126
+
127
+ const body = transformRequest(request, modelId);
128
+
129
+ const response = await doStreamFetch(
130
+ url,
131
+ {
132
+ method: 'POST',
133
+ headers: {
134
+ 'Content-Type': 'application/json',
135
+ },
136
+ body: JSON.stringify(body),
137
+ signal: request.signal,
138
+ },
139
+ request.config,
140
+ 'google',
141
+ 'llm'
142
+ );
143
+
144
+ if (!response.ok) {
145
+ const error = await normalizeHttpError(response, 'google', 'llm');
146
+ responseReject(error);
147
+ throw error;
148
+ }
149
+
150
+ if (!response.body) {
151
+ const error = new UPPError(
152
+ 'No response body for streaming request',
153
+ 'PROVIDER_ERROR',
154
+ 'google',
155
+ 'llm'
156
+ );
157
+ responseReject(error);
158
+ throw error;
159
+ }
160
+
161
+ for await (const data of parseSSEStream(response.body)) {
162
+ if (typeof data === 'object' && data !== null) {
163
+ const chunk = data as GoogleStreamChunk;
164
+
165
+ // Check for error
166
+ if ('error' in chunk) {
167
+ const error = new UPPError(
168
+ (chunk as any).error.message,
169
+ 'PROVIDER_ERROR',
170
+ 'google',
171
+ 'llm'
172
+ );
173
+ responseReject(error);
174
+ throw error;
175
+ }
176
+
177
+ const events = transformStreamChunk(chunk, state);
178
+ for (const event of events) {
179
+ yield event;
180
+ }
181
+ }
182
+ }
183
+
184
+ responseResolve(buildResponseFromState(state));
185
+ } catch (error) {
186
+ responseReject(error as Error);
187
+ throw error;
188
+ }
189
+ }
190
+
191
+ return {
192
+ [Symbol.asyncIterator]() {
193
+ return generateEvents();
194
+ },
195
+ response: responsePromise,
196
+ };
197
+ },
198
+ };
199
+
200
+ return model;
201
+ },
202
+ };
203
+ }
@@ -0,0 +1,487 @@
1
+ import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
+ import type { Message } from '../../types/messages.ts';
3
+ import type { StreamEvent } from '../../types/stream.ts';
4
+ import type { Tool, ToolCall } from '../../types/tool.ts';
5
+ import type { TokenUsage } from '../../types/turn.ts';
6
+ import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
7
+ import {
8
+ AssistantMessage,
9
+ isUserMessage,
10
+ isAssistantMessage,
11
+ isToolResultMessage,
12
+ } from '../../types/messages.ts';
13
+ import type {
14
+ GoogleLLMParams,
15
+ GoogleRequest,
16
+ GoogleContent,
17
+ GooglePart,
18
+ GoogleTool,
19
+ GoogleResponse,
20
+ GoogleStreamChunk,
21
+ GoogleFunctionCallPart,
22
+ } from './types.ts';
23
+
24
+ /**
25
+ * Transform UPP request to Google format
26
+ */
27
+ export function transformRequest<TParams extends GoogleLLMParams>(
28
+ request: LLMRequest<TParams>,
29
+ modelId: string
30
+ ): GoogleRequest {
31
+ const params = (request.params ?? {}) as GoogleLLMParams;
32
+
33
+ const googleRequest: GoogleRequest = {
34
+ contents: transformMessages(request.messages),
35
+ };
36
+
37
+ // System instruction (separate from contents in Google)
38
+ if (request.system) {
39
+ googleRequest.systemInstruction = {
40
+ parts: [{ text: request.system }],
41
+ };
42
+ }
43
+
44
+ // Generation config
45
+ const generationConfig: NonNullable<GoogleRequest['generationConfig']> = {};
46
+
47
+ if (params.maxOutputTokens !== undefined) {
48
+ generationConfig.maxOutputTokens = params.maxOutputTokens;
49
+ }
50
+ if (params.temperature !== undefined) {
51
+ generationConfig.temperature = params.temperature;
52
+ }
53
+ if (params.topP !== undefined) {
54
+ generationConfig.topP = params.topP;
55
+ }
56
+ if (params.topK !== undefined) {
57
+ generationConfig.topK = params.topK;
58
+ }
59
+ if (params.stopSequences !== undefined) {
60
+ generationConfig.stopSequences = params.stopSequences;
61
+ }
62
+ if (params.candidateCount !== undefined) {
63
+ generationConfig.candidateCount = params.candidateCount;
64
+ }
65
+ if (params.responseMimeType !== undefined) {
66
+ generationConfig.responseMimeType = params.responseMimeType;
67
+ }
68
+ if (params.responseSchema !== undefined) {
69
+ generationConfig.responseSchema = params.responseSchema as Record<string, unknown>;
70
+ }
71
+ if (params.presencePenalty !== undefined) {
72
+ generationConfig.presencePenalty = params.presencePenalty;
73
+ }
74
+ if (params.frequencyPenalty !== undefined) {
75
+ generationConfig.frequencyPenalty = params.frequencyPenalty;
76
+ }
77
+ if (params.seed !== undefined) {
78
+ generationConfig.seed = params.seed;
79
+ }
80
+ if (params.responseLogprobs !== undefined) {
81
+ generationConfig.responseLogprobs = params.responseLogprobs;
82
+ }
83
+ if (params.logprobs !== undefined) {
84
+ generationConfig.logprobs = params.logprobs;
85
+ }
86
+ if (params.audioTimestamp !== undefined) {
87
+ generationConfig.audioTimestamp = params.audioTimestamp;
88
+ }
89
+ if (params.thinkingConfig !== undefined) {
90
+ generationConfig.thinkingConfig = params.thinkingConfig;
91
+ }
92
+
93
+ // Protocol-level structured output (overrides provider-specific settings)
94
+ if (request.structure) {
95
+ generationConfig.responseMimeType = 'application/json';
96
+ generationConfig.responseSchema = request.structure as unknown as Record<string, unknown>;
97
+ }
98
+
99
+ if (Object.keys(generationConfig).length > 0) {
100
+ googleRequest.generationConfig = generationConfig;
101
+ }
102
+
103
+ // Tools
104
+ if (request.tools && request.tools.length > 0) {
105
+ googleRequest.tools = [
106
+ {
107
+ functionDeclarations: request.tools.map(transformTool),
108
+ },
109
+ ];
110
+ }
111
+
112
+ return googleRequest;
113
+ }
114
+
115
+ /**
116
+ * Filter to only valid content blocks with a type property
117
+ */
118
+ function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
119
+ return content.filter((c) => c && typeof c.type === 'string');
120
+ }
121
+
122
+ /**
123
+ * Transform UPP Messages to Google contents
124
+ */
125
+ function transformMessages(messages: Message[]): GoogleContent[] {
126
+ const contents: GoogleContent[] = [];
127
+
128
+ for (const msg of messages) {
129
+ if (isUserMessage(msg)) {
130
+ const validContent = filterValidContent(msg.content);
131
+ const parts = validContent.map(transformContentBlock);
132
+ // Google requires at least one part - add placeholder if empty
133
+ if (parts.length === 0) {
134
+ parts.push({ text: '' });
135
+ }
136
+ contents.push({
137
+ role: 'user',
138
+ parts,
139
+ });
140
+ } else if (isAssistantMessage(msg)) {
141
+ const validContent = filterValidContent(msg.content);
142
+ const parts: GooglePart[] = validContent.map(transformContentBlock);
143
+
144
+ // Add function calls - use stored parts with thought signatures if available
145
+ const googleMeta = msg.metadata?.google as {
146
+ functionCallParts?: Array<{
147
+ name: string;
148
+ args: Record<string, unknown>;
149
+ thoughtSignature?: string;
150
+ }>;
151
+ } | undefined;
152
+
153
+ if (googleMeta?.functionCallParts && googleMeta.functionCallParts.length > 0) {
154
+ // Use stored function call parts with thought signatures
155
+ for (const fc of googleMeta.functionCallParts) {
156
+ const part: GoogleFunctionCallPart = {
157
+ functionCall: {
158
+ name: fc.name,
159
+ args: fc.args,
160
+ },
161
+ };
162
+ if (fc.thoughtSignature) {
163
+ part.thoughtSignature = fc.thoughtSignature;
164
+ }
165
+ parts.push(part);
166
+ }
167
+ } else if (msg.toolCalls) {
168
+ // Fallback: reconstruct from tool calls (no thought signatures)
169
+ for (const call of msg.toolCalls) {
170
+ parts.push({
171
+ functionCall: {
172
+ name: call.toolName,
173
+ args: call.arguments,
174
+ },
175
+ });
176
+ }
177
+ }
178
+
179
+ // Google requires at least one part - add placeholder if empty
180
+ if (parts.length === 0) {
181
+ parts.push({ text: '' });
182
+ }
183
+
184
+ contents.push({
185
+ role: 'model',
186
+ parts,
187
+ });
188
+ } else if (isToolResultMessage(msg)) {
189
+ // Function results are sent as user messages in Google
190
+ contents.push({
191
+ role: 'user',
192
+ parts: msg.results.map((result) => ({
193
+ functionResponse: {
194
+ name: result.toolCallId, // Google uses the function name, but we store it in toolCallId
195
+ response:
196
+ typeof result.result === 'object'
197
+ ? (result.result as Record<string, unknown>)
198
+ : { result: result.result },
199
+ },
200
+ })),
201
+ });
202
+ }
203
+ }
204
+
205
+ return contents;
206
+ }
207
+
208
+ /**
209
+ * Transform a content block to Google format
210
+ */
211
+ function transformContentBlock(block: ContentBlock): GooglePart {
212
+ switch (block.type) {
213
+ case 'text':
214
+ return { text: block.text };
215
+
216
+ case 'image': {
217
+ const imageBlock = block as ImageBlock;
218
+ let data: string;
219
+
220
+ if (imageBlock.source.type === 'base64') {
221
+ data = imageBlock.source.data;
222
+ } else if (imageBlock.source.type === 'bytes') {
223
+ data = btoa(
224
+ Array.from(imageBlock.source.data)
225
+ .map((b) => String.fromCharCode(b))
226
+ .join('')
227
+ );
228
+ } else {
229
+ throw new Error('Google API does not support URL image sources directly');
230
+ }
231
+
232
+ return {
233
+ inlineData: {
234
+ mimeType: imageBlock.mimeType,
235
+ data,
236
+ },
237
+ };
238
+ }
239
+
240
+ default:
241
+ throw new Error(`Unsupported content type: ${block.type}`);
242
+ }
243
+ }
244
+
245
+ /**
246
+ * Transform a UPP Tool to Google format
247
+ */
248
+ function transformTool(tool: Tool): GoogleTool['functionDeclarations'][0] {
249
+ return {
250
+ name: tool.name,
251
+ description: tool.description,
252
+ parameters: {
253
+ type: 'object',
254
+ properties: tool.parameters.properties,
255
+ required: tool.parameters.required,
256
+ },
257
+ };
258
+ }
259
+
260
+ /**
261
+ * Transform Google response to UPP LLMResponse
262
+ */
263
+ export function transformResponse(data: GoogleResponse): LLMResponse {
264
+ const candidate = data.candidates?.[0];
265
+ if (!candidate) {
266
+ throw new Error('No candidates in Google response');
267
+ }
268
+
269
+ const textContent: TextBlock[] = [];
270
+ const toolCalls: ToolCall[] = [];
271
+ let structuredData: unknown;
272
+ // Store original function call parts with thought signatures for echoing back
273
+ const functionCallParts: Array<{
274
+ name: string;
275
+ args: Record<string, unknown>;
276
+ thoughtSignature?: string;
277
+ }> = [];
278
+
279
+ for (const part of candidate.content.parts) {
280
+ if ('text' in part) {
281
+ textContent.push({ type: 'text', text: part.text });
282
+ // Try to parse as JSON for structured output (native JSON mode)
283
+ if (structuredData === undefined) {
284
+ try {
285
+ structuredData = JSON.parse(part.text);
286
+ } catch {
287
+ // Not valid JSON - that's fine, might not be structured output
288
+ }
289
+ }
290
+ } else if ('functionCall' in part) {
291
+ const fc = part as GoogleFunctionCallPart;
292
+ toolCalls.push({
293
+ toolCallId: fc.functionCall.name, // Google doesn't have call IDs, use name
294
+ toolName: fc.functionCall.name,
295
+ arguments: fc.functionCall.args,
296
+ });
297
+ // Store the full part including thought signature
298
+ functionCallParts.push({
299
+ name: fc.functionCall.name,
300
+ args: fc.functionCall.args,
301
+ thoughtSignature: fc.thoughtSignature,
302
+ });
303
+ }
304
+ }
305
+
306
+ const message = new AssistantMessage(
307
+ textContent,
308
+ toolCalls.length > 0 ? toolCalls : undefined,
309
+ {
310
+ metadata: {
311
+ google: {
312
+ finishReason: candidate.finishReason,
313
+ safetyRatings: candidate.safetyRatings,
314
+ // Store function call parts with thought signatures for multi-turn
315
+ functionCallParts: functionCallParts.length > 0 ? functionCallParts : undefined,
316
+ },
317
+ },
318
+ }
319
+ );
320
+
321
+ const usage: TokenUsage = {
322
+ inputTokens: data.usageMetadata?.promptTokenCount ?? 0,
323
+ outputTokens: data.usageMetadata?.candidatesTokenCount ?? 0,
324
+ totalTokens: data.usageMetadata?.totalTokenCount ?? 0,
325
+ };
326
+
327
+ return {
328
+ message,
329
+ usage,
330
+ stopReason: candidate.finishReason ?? 'STOP',
331
+ data: structuredData,
332
+ };
333
+ }
334
+
335
+ /**
336
+ * State for accumulating streaming response
337
+ */
338
+ export interface StreamState {
339
+ content: string;
340
+ toolCalls: Array<{ name: string; args: Record<string, unknown>; thoughtSignature?: string }>;
341
+ finishReason: string | null;
342
+ inputTokens: number;
343
+ outputTokens: number;
344
+ isFirstChunk: boolean;
345
+ }
346
+
347
+ /**
348
+ * Create initial stream state
349
+ */
350
+ export function createStreamState(): StreamState {
351
+ return {
352
+ content: '',
353
+ toolCalls: [],
354
+ finishReason: null,
355
+ inputTokens: 0,
356
+ outputTokens: 0,
357
+ isFirstChunk: true,
358
+ };
359
+ }
360
+
361
+ /**
362
+ * Transform Google stream chunk to UPP StreamEvents
363
+ */
364
+ export function transformStreamChunk(
365
+ chunk: GoogleStreamChunk,
366
+ state: StreamState
367
+ ): StreamEvent[] {
368
+ const events: StreamEvent[] = [];
369
+
370
+ // First chunk - emit message start
371
+ if (state.isFirstChunk) {
372
+ events.push({ type: 'message_start', index: 0, delta: {} });
373
+ state.isFirstChunk = false;
374
+ }
375
+
376
+ // Usage metadata
377
+ if (chunk.usageMetadata) {
378
+ state.inputTokens = chunk.usageMetadata.promptTokenCount;
379
+ state.outputTokens = chunk.usageMetadata.candidatesTokenCount;
380
+ }
381
+
382
+ const candidate = chunk.candidates?.[0];
383
+ if (!candidate) {
384
+ return events;
385
+ }
386
+
387
+ // Process parts
388
+ for (const part of candidate.content?.parts ?? []) {
389
+ if ('text' in part) {
390
+ state.content += part.text;
391
+ events.push({
392
+ type: 'text_delta',
393
+ index: 0,
394
+ delta: { text: part.text },
395
+ });
396
+ } else if ('functionCall' in part) {
397
+ const fc = part as GoogleFunctionCallPart;
398
+ // Store with thought signature for echoing back
399
+ state.toolCalls.push({
400
+ name: fc.functionCall.name,
401
+ args: fc.functionCall.args,
402
+ thoughtSignature: fc.thoughtSignature,
403
+ });
404
+ events.push({
405
+ type: 'tool_call_delta',
406
+ index: state.toolCalls.length - 1,
407
+ delta: {
408
+ toolCallId: fc.functionCall.name,
409
+ toolName: fc.functionCall.name,
410
+ argumentsJson: JSON.stringify(fc.functionCall.args),
411
+ },
412
+ });
413
+ }
414
+ }
415
+
416
+ // Finish reason
417
+ if (candidate.finishReason) {
418
+ state.finishReason = candidate.finishReason;
419
+ events.push({ type: 'message_stop', index: 0, delta: {} });
420
+ }
421
+
422
+ return events;
423
+ }
424
+
425
+ /**
426
+ * Build LLMResponse from accumulated stream state
427
+ */
428
+ export function buildResponseFromState(state: StreamState): LLMResponse {
429
+ const textContent: TextBlock[] = [];
430
+ const toolCalls: ToolCall[] = [];
431
+ let structuredData: unknown;
432
+ const functionCallParts: Array<{
433
+ name: string;
434
+ args: Record<string, unknown>;
435
+ thoughtSignature?: string;
436
+ }> = [];
437
+
438
+ if (state.content) {
439
+ textContent.push({ type: 'text', text: state.content });
440
+ // Try to parse as JSON for structured output (native JSON mode)
441
+ try {
442
+ structuredData = JSON.parse(state.content);
443
+ } catch {
444
+ // Not valid JSON - that's fine, might not be structured output
445
+ }
446
+ }
447
+
448
+ for (const tc of state.toolCalls) {
449
+ toolCalls.push({
450
+ toolCallId: tc.name,
451
+ toolName: tc.name,
452
+ arguments: tc.args,
453
+ });
454
+ functionCallParts.push({
455
+ name: tc.name,
456
+ args: tc.args,
457
+ thoughtSignature: tc.thoughtSignature,
458
+ });
459
+ }
460
+
461
+ const message = new AssistantMessage(
462
+ textContent,
463
+ toolCalls.length > 0 ? toolCalls : undefined,
464
+ {
465
+ metadata: {
466
+ google: {
467
+ finishReason: state.finishReason,
468
+ // Store function call parts with thought signatures for multi-turn
469
+ functionCallParts: functionCallParts.length > 0 ? functionCallParts : undefined,
470
+ },
471
+ },
472
+ }
473
+ );
474
+
475
+ const usage: TokenUsage = {
476
+ inputTokens: state.inputTokens,
477
+ outputTokens: state.outputTokens,
478
+ totalTokens: state.inputTokens + state.outputTokens,
479
+ };
480
+
481
+ return {
482
+ message,
483
+ usage,
484
+ stopReason: state.finishReason ?? 'STOP',
485
+ data: structuredData,
486
+ };
487
+ }