@providerprotocol/ai 0.0.23 → 0.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.d.ts +1 -1
- package/dist/anthropic/index.js +66 -12
- package/dist/anthropic/index.js.map +1 -1
- package/dist/{chunk-MF5ETY5O.js → chunk-6AZVUI6H.js} +8 -1
- package/dist/chunk-6AZVUI6H.js.map +1 -0
- package/dist/{chunk-NWS5IKNR.js → chunk-TOJCZMVU.js} +3 -12
- package/dist/chunk-TOJCZMVU.js.map +1 -0
- package/dist/google/index.d.ts +34 -3
- package/dist/google/index.js +62 -22
- package/dist/google/index.js.map +1 -1
- package/dist/http/index.d.ts +2 -2
- package/dist/http/index.js +1 -1
- package/dist/index.d.ts +5 -5
- package/dist/index.js +12 -2
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.d.ts +1 -1
- package/dist/ollama/index.js +14 -8
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +1 -1
- package/dist/openai/index.js +60 -4
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +60 -1
- package/dist/openrouter/index.js +116 -21
- package/dist/openrouter/index.js.map +1 -1
- package/dist/{provider-DR1yins0.d.ts → provider-x4RocsnK.d.ts} +52 -3
- package/dist/proxy/index.d.ts +2 -2
- package/dist/proxy/index.js +1 -1
- package/dist/{retry-DJiqAslw.d.ts → retry-DTfjXXPh.d.ts} +1 -1
- package/dist/{stream-BuTrqt_j.d.ts → stream-ITNFNnO4.d.ts} +6 -1
- package/dist/xai/index.d.ts +1 -1
- package/dist/xai/index.js +151 -32
- package/dist/xai/index.js.map +1 -1
- package/package.json +1 -1
- package/dist/chunk-MF5ETY5O.js.map +0 -1
- package/dist/chunk-NWS5IKNR.js.map +0 -1
|
@@ -1,24 +1,15 @@
|
|
|
1
1
|
// src/http/sse.ts
|
|
2
|
-
var MAX_SSE_BUFFER_CHARS = 1024 * 1024;
|
|
3
2
|
async function* parseSSEStream(body) {
|
|
4
3
|
const reader = body.getReader();
|
|
5
4
|
const decoder = new TextDecoder();
|
|
6
5
|
let buffer = "";
|
|
7
|
-
const appendToBuffer = (chunk) => {
|
|
8
|
-
if (buffer.length + chunk.length > MAX_SSE_BUFFER_CHARS) {
|
|
9
|
-
throw new Error(
|
|
10
|
-
`SSE buffer exceeded maximum size (${MAX_SSE_BUFFER_CHARS} chars)`
|
|
11
|
-
);
|
|
12
|
-
}
|
|
13
|
-
buffer += chunk;
|
|
14
|
-
};
|
|
15
6
|
try {
|
|
16
7
|
while (true) {
|
|
17
8
|
const { done, value } = await reader.read();
|
|
18
9
|
if (done) {
|
|
19
10
|
const tail = decoder.decode();
|
|
20
11
|
if (tail) {
|
|
21
|
-
|
|
12
|
+
buffer += tail;
|
|
22
13
|
}
|
|
23
14
|
if (buffer.trim()) {
|
|
24
15
|
const event = parseSSEEvent(buffer);
|
|
@@ -30,7 +21,7 @@ async function* parseSSEStream(body) {
|
|
|
30
21
|
}
|
|
31
22
|
const chunk = decoder.decode(value, { stream: true });
|
|
32
23
|
if (chunk) {
|
|
33
|
-
|
|
24
|
+
buffer += chunk;
|
|
34
25
|
}
|
|
35
26
|
const events = buffer.split(/\r?\n\r?\n/);
|
|
36
27
|
buffer = events.pop() ?? "";
|
|
@@ -113,4 +104,4 @@ export {
|
|
|
113
104
|
parseSSEStream,
|
|
114
105
|
parseSimpleTextStream
|
|
115
106
|
};
|
|
116
|
-
//# sourceMappingURL=chunk-
|
|
107
|
+
//# sourceMappingURL=chunk-TOJCZMVU.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/http/sse.ts"],"sourcesContent":["/**\n * Server-Sent Events (SSE) stream parsing utilities.\n * @module http/sse\n */\n\n/**\n * Parses a Server-Sent Events stream into JSON objects.\n *\n * This async generator handles the standard SSE wire format:\n * - Lines prefixed with \"data:\" contain event data\n * - Lines prefixed with \"event:\" specify event types\n * - Lines prefixed with \":\" are comments (used for keep-alive)\n * - Events are separated by double newlines\n * - Stream terminates on \"[DONE]\" message (OpenAI convention)\n *\n * Also handles non-standard formats used by some providers:\n * - Raw JSON without \"data:\" prefix (Google)\n * - Multi-line data fields\n *\n * @param body - ReadableStream from fetch response body\n * @yields Parsed JSON objects from each SSE event\n *\n * @example\n * ```typescript\n * const response = await doStreamFetch(url, init, config, 'openai', 'llm');\n *\n * for await (const event of parseSSEStream(response.body!)) {\n * // event is parsed JSON from each SSE data field\n * const chunk = event as OpenAIStreamChunk;\n * const delta = chunk.choices[0]?.delta?.content;\n * if (delta) {\n * process.stdout.write(delta);\n * }\n * }\n * ```\n */\nexport async function* parseSSEStream(\n body: ReadableStream<Uint8Array>\n): AsyncGenerator<unknown, void, unknown> {\n const reader = body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) {\n const tail = decoder.decode();\n if (tail) {\n buffer += tail;\n }\n // Process any remaining data in buffer\n if (buffer.trim()) {\n const event = parseSSEEvent(buffer);\n if (event !== null && event !== undefined) {\n yield event;\n }\n }\n break;\n }\n\n const chunk = decoder.decode(value, { stream: true });\n if (chunk) {\n buffer += chunk;\n }\n\n // Process complete events (separated by double newlines or \\r\\n\\r\\n)\n const events = buffer.split(/\\r?\\n\\r?\\n/);\n\n // Keep the last partial event in the buffer\n buffer = events.pop() ?? '';\n\n for (const eventText of events) {\n if (!eventText.trim()) continue;\n\n const event = parseSSEEvent(eventText);\n if (event === 'DONE') {\n return;\n }\n if (event !== null && event !== undefined) {\n yield event;\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n}\n\n/**\n * Parses a single SSE event block into a JSON object.\n *\n * Handles the following line prefixes:\n * - \"data:\" - Event data (multiple data lines are concatenated)\n * - \"event:\" - Event type (added to result as _eventType)\n * - \":\" - Comment (ignored, often used for keep-alive)\n * - Raw JSON starting with { or [ (provider-specific fallback)\n *\n * @param eventText - Raw text of a single SSE event block\n * @returns Parsed JSON object, 'DONE' for termination signal, or null for invalid/empty events\n */\nfunction parseSSEEvent(eventText: string): unknown | 'DONE' | null {\n const lines = eventText.split('\\n');\n let data = '';\n let eventType = '';\n\n for (const line of lines) {\n const normalizedLine = line.endsWith('\\r') ? line.slice(0, -1) : line;\n if (normalizedLine.startsWith('event:')) {\n let value = normalizedLine.slice(6);\n if (value.startsWith(' ')) value = value.slice(1);\n eventType = value;\n } else if (normalizedLine.startsWith('data:')) {\n let value = normalizedLine.slice(5);\n if (value.startsWith(' ')) value = value.slice(1);\n data += (data ? '\\n' : '') + value;\n } else if (normalizedLine.startsWith(':')) {\n continue;\n } else {\n const trimmedStart = normalizedLine.trimStart();\n if (trimmedStart.startsWith('{') || trimmedStart.startsWith('[')) {\n data += (data ? '\\n' : '') + trimmedStart;\n }\n }\n }\n\n if (!data) {\n return null;\n }\n\n if (data === '[DONE]') {\n return 'DONE';\n }\n\n try {\n const parsed = JSON.parse(data);\n\n if (eventType) {\n return { _eventType: eventType, ...parsed };\n }\n\n return parsed;\n } catch {\n return null;\n }\n}\n\n/**\n * Parses a simple text stream without SSE formatting.\n *\n * This is a simpler alternative to {@link parseSSEStream} for providers\n * that stream raw text deltas without SSE event wrappers. Each chunk\n * from the response body is decoded and yielded as-is.\n *\n * Use this for:\n * - Plain text streaming responses\n * - Providers with custom streaming formats\n * - Testing and debugging stream handling\n *\n * @param body - ReadableStream from fetch response body\n * @yields Decoded text strings from each stream chunk\n *\n * @example\n * ```typescript\n * const response = await doStreamFetch(url, init, config, 'custom', 'llm');\n *\n * for await (const text of parseSimpleTextStream(response.body!)) {\n * process.stdout.write(text);\n * }\n * ```\n */\nexport async function* parseSimpleTextStream(\n body: ReadableStream<Uint8Array>\n): AsyncGenerator<string, void, unknown> {\n const reader = body.getReader();\n const decoder = new TextDecoder();\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) break;\n\n const text = decoder.decode(value, { stream: true });\n if (text) {\n yield text;\n }\n }\n const remaining = decoder.decode();\n if (remaining) {\n yield remaining;\n }\n } finally {\n reader.releaseLock();\n }\n}\n"],"mappings":";AAoCA,gBAAuB,eACrB,MACwC;AACxC,QAAM,SAAS,KAAK,UAAU;AAC9B,QAAM,UAAU,IAAI,YAAY;AAChC,MAAI,SAAS;AAEb,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,UAAI,MAAM;AACR,cAAM,OAAO,QAAQ,OAAO;AAC5B,YAAI,MAAM;AACR,oBAAU;AAAA,QACZ;AAEA,YAAI,OAAO,KAAK,GAAG;AACjB,gBAAM,QAAQ,cAAc,MAAM;AAClC,cAAI,UAAU,QAAQ,UAAU,QAAW;AACzC,kBAAM;AAAA,UACR;AAAA,QACF;AACA;AAAA,MACF;AAEA,YAAM,QAAQ,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AACpD,UAAI,OAAO;AACT,kBAAU;AAAA,MACZ;AAGA,YAAM,SAAS,OAAO,MAAM,YAAY;AAGxC,eAAS,OAAO,IAAI,KAAK;AAEzB,iBAAW,aAAa,QAAQ;AAC9B,YAAI,CAAC,UAAU,KAAK,EAAG;AAEvB,cAAM,QAAQ,cAAc,SAAS;AACrC,YAAI,UAAU,QAAQ;AACpB;AAAA,QACF;AACA,YAAI,UAAU,QAAQ,UAAU,QAAW;AACzC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;AAcA,SAAS,cAAc,WAA4C;AACjE,QAAM,QAAQ,UAAU,MAAM,IAAI;AAClC,MAAI,OAAO;AACX,MAAI,YAAY;AAEhB,aAAW,QAAQ,OAAO;AACxB,UAAM,iBAAiB,KAAK,SAAS,IAAI,IAAI,KAAK,MAAM,GAAG,EAAE,IAAI;AACjE,QAAI,eAAe,WAAW,QAAQ,GAAG;AACvC,UAAI,QAAQ,eAAe,MAAM,CAAC;AAClC,UAAI,MAAM,WAAW,GAAG,EAAG,SAAQ,MAAM,MAAM,CAAC;AAChD,kBAAY;AAAA,IACd,WAAW,eAAe,WAAW,OAAO,GAAG;AAC7C,UAAI,QAAQ,eAAe,MAAM,CAAC;AAClC,UAAI,MAAM,WAAW,GAAG,EAAG,SAAQ,MAAM,MAAM,CAAC;AAChD,eAAS,OAAO,OAAO,MAAM;AAAA,IAC/B,WAAW,eAAe,WAAW,GAAG,GAAG;AACzC;AAAA,IACF,OAAO;AACL,YAAM,eAAe,eAAe,UAAU;AAC9C,UAAI,aAAa,WAAW,GAAG,KAAK,aAAa,WAAW,GAAG,GAAG;AAChE,iBAAS,OAAO,OAAO,MAAM;AAAA,MAC/B;AAAA,IACF;AAAA,EACF;AAEA,MAAI,CAAC,MAAM;AACT,WAAO;AAAA,EACT;AAEA,MAAI,SAAS,UAAU;AACrB,WAAO;AAAA,EACT;AAEA,MAAI;AACF,UAAM,SAAS,KAAK,MAAM,IAAI;AAE9B,QAAI,WAAW;AACb,aAAO,EAAE,YAAY,WAAW,GAAG,OAAO;AAAA,IAC5C;AAEA,WAAO;AAAA,EACT,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AA0BA,gBAAuB,sBACrB,MACuC;AACvC,QAAM,SAAS,KAAK,UAAU;AAC9B,QAAM,UAAU,IAAI,YAAY;AAEhC,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,UAAI,KAAM;AAEV,YAAM,OAAO,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AACnD,UAAI,MAAM;AACR,cAAM;AAAA,MACR;AAAA,IACF;AACA,UAAM,YAAY,QAAQ,OAAO;AACjC,QAAI,WAAW;AACb,YAAM;AAAA,IACR;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;","names":[]}
|
package/dist/google/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { a as ProviderConfig, g as Provider } from '../provider-
|
|
1
|
+
import { a as ProviderConfig, g as Provider } from '../provider-x4RocsnK.js';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* Provider-specific parameters for Google Gemini API requests.
|
|
@@ -125,14 +125,37 @@ interface GoogleLLMParams {
|
|
|
125
125
|
toolConfig?: GoogleToolConfig;
|
|
126
126
|
}
|
|
127
127
|
/**
|
|
128
|
-
* Configuration for extended thinking/reasoning in Gemini 3+ models.
|
|
128
|
+
* Configuration for extended thinking/reasoning in Gemini 2.5+ and 3+ models.
|
|
129
129
|
*
|
|
130
130
|
* Enables models to spend additional compute on reasoning before
|
|
131
131
|
* generating a response, improving quality for complex tasks.
|
|
132
|
+
*
|
|
133
|
+
* For Gemini 2.5 models: Use `thinkingBudget` to control token allocation.
|
|
134
|
+
* For Gemini 3+ models: Use `thinkingLevel` (recommended) to set reasoning depth.
|
|
135
|
+
*
|
|
136
|
+
* Set `includeThoughts: true` to receive thought/reasoning content in the response.
|
|
132
137
|
*/
|
|
133
138
|
interface GoogleThinkingConfig {
|
|
134
|
-
/**
|
|
139
|
+
/**
|
|
140
|
+
* Token budget allocated for model thinking/reasoning (Gemini 2.5 models).
|
|
141
|
+
* - `-1`: Dynamic thinking (default)
|
|
142
|
+
* - `0`: Disable thinking (Flash models only)
|
|
143
|
+
* - `128-32768`: Specific token budget
|
|
144
|
+
*/
|
|
135
145
|
thinkingBudget?: number;
|
|
146
|
+
/**
|
|
147
|
+
* Thinking level for Gemini 3+ models (recommended over thinkingBudget).
|
|
148
|
+
* - `"minimal"`: Likely prevents thinking (Gemini 3 Flash only)
|
|
149
|
+
* - `"low"`: Minimizes latency and cost
|
|
150
|
+
* - `"medium"`: Balanced (Gemini 3 Flash only)
|
|
151
|
+
* - `"high"`: Maximizes reasoning depth (default for Gemini 3)
|
|
152
|
+
*/
|
|
153
|
+
thinkingLevel?: 'minimal' | 'low' | 'medium' | 'high';
|
|
154
|
+
/**
|
|
155
|
+
* Whether to include thought summaries in the response.
|
|
156
|
+
* When true, response parts with `thought: true` contain reasoning content.
|
|
157
|
+
*/
|
|
158
|
+
includeThoughts?: boolean;
|
|
136
159
|
}
|
|
137
160
|
/**
|
|
138
161
|
* A single content turn in the Google conversation format.
|
|
@@ -160,6 +183,14 @@ type GooglePart = GoogleTextPart | GoogleImagePart | GoogleFunctionCallPart | Go
|
|
|
160
183
|
interface GoogleTextPart {
|
|
161
184
|
/** The text content. */
|
|
162
185
|
text: string;
|
|
186
|
+
/** If true, this part contains thinking/reasoning content (Gemini 2.5+/3+). */
|
|
187
|
+
thought?: boolean;
|
|
188
|
+
/**
|
|
189
|
+
* Encrypted thought signature for Gemini 3+ models.
|
|
190
|
+
* Must be forwarded back in subsequent requests to maintain reasoning context.
|
|
191
|
+
* Required for Gemini 3 multi-turn conversations; recommended for Gemini 2.5.
|
|
192
|
+
*/
|
|
193
|
+
thoughtSignature?: string;
|
|
163
194
|
}
|
|
164
195
|
/**
|
|
165
196
|
* Inline image content part with base64-encoded data.
|
package/dist/google/index.js
CHANGED
|
@@ -13,10 +13,10 @@ import {
|
|
|
13
13
|
isAssistantMessage,
|
|
14
14
|
isToolResultMessage,
|
|
15
15
|
isUserMessage
|
|
16
|
-
} from "../chunk-
|
|
16
|
+
} from "../chunk-6AZVUI6H.js";
|
|
17
17
|
import {
|
|
18
18
|
parseSSEStream
|
|
19
|
-
} from "../chunk-
|
|
19
|
+
} from "../chunk-TOJCZMVU.js";
|
|
20
20
|
import {
|
|
21
21
|
resolveApiKey
|
|
22
22
|
} from "../chunk-55X3W2MN.js";
|
|
@@ -147,8 +147,18 @@ function transformMessages(messages) {
|
|
|
147
147
|
});
|
|
148
148
|
} else if (isAssistantMessage(msg)) {
|
|
149
149
|
const validContent = filterValidContent(msg.content);
|
|
150
|
-
const
|
|
150
|
+
const nonReasoningContent = validContent.filter((c) => c.type !== "reasoning");
|
|
151
|
+
const parts = nonReasoningContent.map(transformContentBlock);
|
|
151
152
|
const googleMeta = msg.metadata?.google;
|
|
153
|
+
if (googleMeta?.thoughtSignature) {
|
|
154
|
+
for (let i = parts.length - 1; i >= 0; i--) {
|
|
155
|
+
const part = parts[i];
|
|
156
|
+
if (part && "text" in part) {
|
|
157
|
+
part.thoughtSignature = googleMeta.thoughtSignature;
|
|
158
|
+
break;
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
}
|
|
152
162
|
if (googleMeta?.functionCallParts && googleMeta.functionCallParts.length > 0) {
|
|
153
163
|
for (const fc of googleMeta.functionCallParts) {
|
|
154
164
|
const part = {
|
|
@@ -236,17 +246,26 @@ function transformResponse(data) {
|
|
|
236
246
|
if (!candidate) {
|
|
237
247
|
throw new Error("No candidates in Google response");
|
|
238
248
|
}
|
|
239
|
-
const
|
|
249
|
+
const content = [];
|
|
240
250
|
const toolCalls = [];
|
|
241
251
|
let structuredData;
|
|
252
|
+
let lastThoughtSignature;
|
|
242
253
|
const functionCallParts = [];
|
|
243
254
|
for (const part of candidate.content.parts) {
|
|
244
255
|
if ("text" in part) {
|
|
245
|
-
|
|
246
|
-
if (
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
256
|
+
const textPart = part;
|
|
257
|
+
if (textPart.thoughtSignature) {
|
|
258
|
+
lastThoughtSignature = textPart.thoughtSignature;
|
|
259
|
+
}
|
|
260
|
+
if (textPart.thought) {
|
|
261
|
+
content.push({ type: "reasoning", text: textPart.text });
|
|
262
|
+
} else {
|
|
263
|
+
content.push({ type: "text", text: textPart.text });
|
|
264
|
+
if (structuredData === void 0) {
|
|
265
|
+
try {
|
|
266
|
+
structuredData = JSON.parse(textPart.text);
|
|
267
|
+
} catch {
|
|
268
|
+
}
|
|
250
269
|
}
|
|
251
270
|
}
|
|
252
271
|
} else if ("functionCall" in part) {
|
|
@@ -265,7 +284,7 @@ function transformResponse(data) {
|
|
|
265
284
|
} else if ("codeExecutionResult" in part) {
|
|
266
285
|
const codeResult = part;
|
|
267
286
|
if (codeResult.codeExecutionResult.output) {
|
|
268
|
-
|
|
287
|
+
content.push({ type: "text", text: `
|
|
269
288
|
\`\`\`
|
|
270
289
|
${codeResult.codeExecutionResult.output}\`\`\`
|
|
271
290
|
` });
|
|
@@ -273,14 +292,16 @@ ${codeResult.codeExecutionResult.output}\`\`\`
|
|
|
273
292
|
}
|
|
274
293
|
}
|
|
275
294
|
const message = new AssistantMessage(
|
|
276
|
-
|
|
295
|
+
content,
|
|
277
296
|
toolCalls.length > 0 ? toolCalls : void 0,
|
|
278
297
|
{
|
|
279
298
|
metadata: {
|
|
280
299
|
google: {
|
|
281
300
|
finishReason: candidate.finishReason,
|
|
282
301
|
safetyRatings: candidate.safetyRatings,
|
|
283
|
-
functionCallParts: functionCallParts.length > 0 ? functionCallParts : void 0
|
|
302
|
+
functionCallParts: functionCallParts.length > 0 ? functionCallParts : void 0,
|
|
303
|
+
// Store thoughtSignature for multi-turn context preservation (Gemini 3+)
|
|
304
|
+
thoughtSignature: lastThoughtSignature
|
|
284
305
|
}
|
|
285
306
|
}
|
|
286
307
|
}
|
|
@@ -302,6 +323,8 @@ ${codeResult.codeExecutionResult.output}\`\`\`
|
|
|
302
323
|
function createStreamState() {
|
|
303
324
|
return {
|
|
304
325
|
content: "",
|
|
326
|
+
reasoning: "",
|
|
327
|
+
thoughtSignature: void 0,
|
|
305
328
|
toolCalls: [],
|
|
306
329
|
finishReason: null,
|
|
307
330
|
inputTokens: 0,
|
|
@@ -327,12 +350,25 @@ function transformStreamChunk(chunk, state) {
|
|
|
327
350
|
}
|
|
328
351
|
for (const part of candidate.content?.parts ?? []) {
|
|
329
352
|
if ("text" in part) {
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
353
|
+
const textPart = part;
|
|
354
|
+
if (textPart.thoughtSignature) {
|
|
355
|
+
state.thoughtSignature = textPart.thoughtSignature;
|
|
356
|
+
}
|
|
357
|
+
if (textPart.thought) {
|
|
358
|
+
state.reasoning += textPart.text;
|
|
359
|
+
events.push({
|
|
360
|
+
type: StreamEventType.ReasoningDelta,
|
|
361
|
+
index: 0,
|
|
362
|
+
delta: { text: textPart.text }
|
|
363
|
+
});
|
|
364
|
+
} else {
|
|
365
|
+
state.content += textPart.text;
|
|
366
|
+
events.push({
|
|
367
|
+
type: StreamEventType.TextDelta,
|
|
368
|
+
index: 0,
|
|
369
|
+
delta: { text: textPart.text }
|
|
370
|
+
});
|
|
371
|
+
}
|
|
336
372
|
} else if ("functionCall" in part) {
|
|
337
373
|
const fc = part;
|
|
338
374
|
const toolCallId = createGoogleToolCallId(fc.functionCall.name, state.toolCalls.length);
|
|
@@ -374,12 +410,15 @@ ${codeResult.codeExecutionResult.output}\`\`\`
|
|
|
374
410
|
return events;
|
|
375
411
|
}
|
|
376
412
|
function buildResponseFromState(state) {
|
|
377
|
-
const
|
|
413
|
+
const content = [];
|
|
378
414
|
const toolCalls = [];
|
|
379
415
|
let structuredData;
|
|
380
416
|
const functionCallParts = [];
|
|
417
|
+
if (state.reasoning) {
|
|
418
|
+
content.push({ type: "reasoning", text: state.reasoning });
|
|
419
|
+
}
|
|
381
420
|
if (state.content) {
|
|
382
|
-
|
|
421
|
+
content.push({ type: "text", text: state.content });
|
|
383
422
|
try {
|
|
384
423
|
structuredData = JSON.parse(state.content);
|
|
385
424
|
} catch {
|
|
@@ -399,13 +438,14 @@ function buildResponseFromState(state) {
|
|
|
399
438
|
});
|
|
400
439
|
}
|
|
401
440
|
const message = new AssistantMessage(
|
|
402
|
-
|
|
441
|
+
content,
|
|
403
442
|
toolCalls.length > 0 ? toolCalls : void 0,
|
|
404
443
|
{
|
|
405
444
|
metadata: {
|
|
406
445
|
google: {
|
|
407
446
|
finishReason: state.finishReason,
|
|
408
|
-
functionCallParts: functionCallParts.length > 0 ? functionCallParts : void 0
|
|
447
|
+
functionCallParts: functionCallParts.length > 0 ? functionCallParts : void 0,
|
|
448
|
+
thoughtSignature: state.thoughtSignature
|
|
409
449
|
}
|
|
410
450
|
}
|
|
411
451
|
}
|