illuma-agents 1.0.6 → 1.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/events.cjs +34 -10
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/llm/google/index.cjs +78 -9
- package/dist/cjs/llm/google/index.cjs.map +1 -1
- package/dist/cjs/llm/google/utils/common.cjs +185 -28
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -1
- package/dist/cjs/messages/format.cjs +9 -1
- package/dist/cjs/messages/format.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +154 -55
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/esm/events.mjs +34 -10
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/llm/google/index.mjs +79 -10
- package/dist/esm/llm/google/index.mjs.map +1 -1
- package/dist/esm/llm/google/utils/common.mjs +184 -30
- package/dist/esm/llm/google/utils/common.mjs.map +1 -1
- package/dist/esm/messages/format.mjs +9 -1
- package/dist/esm/messages/format.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +155 -56
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/types/events.d.ts +3 -1
- package/dist/types/llm/google/index.d.ts +10 -0
- package/dist/types/llm/google/types.d.ts +11 -1
- package/dist/types/llm/google/utils/common.d.ts +17 -2
- package/dist/types/messages/format.d.ts +7 -1
- package/dist/types/tools/ToolNode.d.ts +9 -1
- package/dist/types/types/stream.d.ts +1 -1
- package/dist/types/types/tools.d.ts +1 -1
- package/package.json +3 -3
- package/src/events.ts +37 -15
- package/src/llm/google/data/gettysburg10.wav +0 -0
- package/src/llm/google/data/hotdog.jpg +0 -0
- package/src/llm/google/index.ts +129 -14
- package/src/llm/google/llm.spec.ts +932 -0
- package/src/llm/google/types.ts +56 -43
- package/src/llm/google/utils/common.ts +873 -660
- package/src/messages/ensureThinkingBlock.test.ts +75 -0
- package/src/messages/format.ts +9 -1
- package/src/tools/ToolNode.ts +195 -64
- package/src/types/stream.ts +1 -1
- package/src/types/tools.ts +80 -80
|
@@ -1,7 +1,16 @@
|
|
|
1
1
|
import { POSSIBLE_ROLES, type Part, type Content, type EnhancedGenerateContentResponse, type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool } from '@google/generative-ai';
|
|
2
2
|
import { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
3
3
|
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
+
import type { ChatResult } from '@langchain/core/outputs';
|
|
4
5
|
import { GoogleGenerativeAIToolType } from '../types';
|
|
6
|
+
export declare const _FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY = "__gemini_function_call_thought_signatures__";
|
|
7
|
+
/**
|
|
8
|
+
* Executes a function immediately and returns its result.
|
|
9
|
+
* Functional utility similar to an Immediately Invoked Function Expression (IIFE).
|
|
10
|
+
* @param fn The function to execute.
|
|
11
|
+
* @returns The result of invoking fn.
|
|
12
|
+
*/
|
|
13
|
+
export declare const iife: <T>(fn: () => T) => T;
|
|
5
14
|
export declare function getMessageAuthor(message: BaseMessage): string;
|
|
6
15
|
/**
|
|
7
16
|
* Maps a message type to a Google Generative AI chat author.
|
|
@@ -10,10 +19,16 @@ export declare function getMessageAuthor(message: BaseMessage): string;
|
|
|
10
19
|
* @returns The message type mapped to a Google Generative AI chat author.
|
|
11
20
|
*/
|
|
12
21
|
export declare function convertAuthorToRole(author: string): (typeof POSSIBLE_ROLES)[number];
|
|
13
|
-
export declare function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: boolean, previousMessages: BaseMessage[]): Part[];
|
|
14
|
-
export declare function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean, convertSystemMessageToHumanContent?: boolean): Content[] | undefined;
|
|
22
|
+
export declare function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: boolean, previousMessages: BaseMessage[], model?: string): Part[];
|
|
23
|
+
export declare function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean, convertSystemMessageToHumanContent?: boolean, model?: string): Content[] | undefined;
|
|
15
24
|
export declare function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse, extra: {
|
|
16
25
|
usageMetadata?: UsageMetadata | undefined;
|
|
17
26
|
index: number;
|
|
18
27
|
}): ChatGenerationChunk | null;
|
|
28
|
+
/**
|
|
29
|
+
* Maps a Google GenerateContentResult to a LangChain ChatResult
|
|
30
|
+
*/
|
|
31
|
+
export declare function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse, extra?: {
|
|
32
|
+
usageMetadata: UsageMetadata | undefined;
|
|
33
|
+
}): ChatResult;
|
|
19
34
|
export declare function convertToGenerativeAITools(tools: GoogleGenerativeAIToolType[]): GoogleGenerativeAIFunctionDeclarationsTool[];
|
|
@@ -120,9 +120,15 @@ export declare const formatAgentMessages: (payload: TPayload, indexTokenCountMap
|
|
|
120
120
|
export declare function shiftIndexTokenCountMap(indexTokenCountMap: Record<number, number>, instructionsTokenCount: number): Record<number, number>;
|
|
121
121
|
/**
|
|
122
122
|
* Ensures compatibility when switching from a non-thinking agent to a thinking-enabled agent.
|
|
123
|
-
* Converts AI messages with tool calls (that lack thinking blocks) into buffer strings,
|
|
123
|
+
* Converts AI messages with tool calls (that lack thinking/reasoning blocks) into buffer strings,
|
|
124
124
|
* avoiding the thinking block signature requirement.
|
|
125
125
|
*
|
|
126
|
+
* Recognizes the following as valid thinking/reasoning blocks:
|
|
127
|
+
* - ContentTypes.THINKING (Anthropic)
|
|
128
|
+
* - ContentTypes.REASONING_CONTENT (Bedrock)
|
|
129
|
+
* - ContentTypes.REASONING (VertexAI / Google)
|
|
130
|
+
* - 'redacted_thinking'
|
|
131
|
+
*
|
|
126
132
|
* @param messages - Array of messages to process
|
|
127
133
|
* @param provider - The provider being used (unused but kept for future compatibility)
|
|
128
134
|
* @returns The messages array with tool sequences converted to buffer strings if necessary
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { ToolCall } from '@langchain/core/messages/tool';
|
|
2
|
+
import { END, Command, MessagesAnnotation } from '@langchain/langgraph';
|
|
2
3
|
import type { RunnableConfig } from '@langchain/core/runnables';
|
|
3
4
|
import type { BaseMessage } from '@langchain/core/messages';
|
|
4
5
|
import type * as t from '@/types';
|
|
@@ -8,6 +9,7 @@ export declare class ToolNode<T = any> extends RunnableCallable<T, T> {
|
|
|
8
9
|
private toolMap;
|
|
9
10
|
private loadRuntimeTools?;
|
|
10
11
|
handleToolErrors: boolean;
|
|
12
|
+
trace: boolean;
|
|
11
13
|
toolCallStepIds?: Map<string, string>;
|
|
12
14
|
errorHandler?: t.ToolNodeConstructorParams['errorHandler'];
|
|
13
15
|
private toolUsageCount;
|
|
@@ -17,6 +19,12 @@ export declare class ToolNode<T = any> extends RunnableCallable<T, T> {
|
|
|
17
19
|
* @returns A ReadonlyMap where keys are tool names and values are their usage counts.
|
|
18
20
|
*/
|
|
19
21
|
getToolUsageCounts(): ReadonlyMap<string, number>;
|
|
22
|
+
/**
|
|
23
|
+
* Runs a single tool call with error handling
|
|
24
|
+
*/
|
|
25
|
+
protected runTool(call: ToolCall, config: RunnableConfig): Promise<BaseMessage | Command>;
|
|
20
26
|
protected run(input: any, config: RunnableConfig): Promise<T>;
|
|
27
|
+
private isSendInput;
|
|
28
|
+
private isMessagesState;
|
|
21
29
|
}
|
|
22
30
|
export declare function toolsCondition<T extends string>(state: BaseMessage[] | typeof MessagesAnnotation.State, toolNode: T, invokedToolIds?: Set<string>): T | typeof END;
|
|
@@ -74,7 +74,7 @@ export type ToolErrorData = {
|
|
|
74
74
|
name: string;
|
|
75
75
|
error?: Error;
|
|
76
76
|
} & Pick<ToolEndData, 'input'>;
|
|
77
|
-
export type ToolEndCallback = (data: ToolEndData, metadata?: Record<string, unknown>) => void
|
|
77
|
+
export type ToolEndCallback = (data: ToolEndData, metadata?: Record<string, unknown>) => Promise<void>;
|
|
78
78
|
export type ProcessedToolCall = {
|
|
79
79
|
name: string;
|
|
80
80
|
args: string | Record<string, unknown>;
|
|
@@ -24,7 +24,7 @@ export type ToolNodeOptions = {
|
|
|
24
24
|
handleToolErrors?: boolean;
|
|
25
25
|
loadRuntimeTools?: ToolRefGenerator;
|
|
26
26
|
toolCallStepIds?: Map<string, string>;
|
|
27
|
-
errorHandler?: (data: ToolErrorData, metadata?: Record<string, unknown>) => void
|
|
27
|
+
errorHandler?: (data: ToolErrorData, metadata?: Record<string, unknown>) => Promise<void>;
|
|
28
28
|
};
|
|
29
29
|
export type ToolNodeConstructorParams = ToolRefs & ToolNodeOptions;
|
|
30
30
|
export type ToolEndEvent = {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "illuma-agents",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.8",
|
|
4
4
|
"main": "./dist/cjs/main.cjs",
|
|
5
5
|
"module": "./dist/esm/main.mjs",
|
|
6
6
|
"types": "./dist/types/index.d.ts",
|
|
@@ -94,8 +94,8 @@
|
|
|
94
94
|
"@langchain/aws": "^0.1.15",
|
|
95
95
|
"@langchain/core": "^0.3.79",
|
|
96
96
|
"@langchain/deepseek": "^0.0.2",
|
|
97
|
-
"@langchain/google-genai": "^0.2.
|
|
98
|
-
"@langchain/google-vertexai": "^0.2.
|
|
97
|
+
"@langchain/google-genai": "^0.2.18",
|
|
98
|
+
"@langchain/google-vertexai": "^0.2.18",
|
|
99
99
|
"@langchain/langgraph": "^0.4.9",
|
|
100
100
|
"@langchain/mistralai": "^0.2.1",
|
|
101
101
|
"@langchain/ollama": "^0.2.3",
|
package/src/events.ts
CHANGED
|
@@ -6,6 +6,7 @@ import type {
|
|
|
6
6
|
BaseMessageFields,
|
|
7
7
|
} from '@langchain/core/messages';
|
|
8
8
|
import type { MultiAgentGraph, StandardGraph } from '@/graphs';
|
|
9
|
+
import type { Logger } from 'winston';
|
|
9
10
|
import type * as t from '@/types';
|
|
10
11
|
import { handleToolCalls } from '@/tools/handlers';
|
|
11
12
|
import { Providers } from '@/common';
|
|
@@ -74,12 +75,15 @@ export class ModelEndHandler implements t.EventHandler {
|
|
|
74
75
|
|
|
75
76
|
export class ToolEndHandler implements t.EventHandler {
|
|
76
77
|
private callback?: t.ToolEndCallback;
|
|
78
|
+
private logger?: Logger;
|
|
77
79
|
private omitOutput?: (name?: string) => boolean;
|
|
78
80
|
constructor(
|
|
79
81
|
callback?: t.ToolEndCallback,
|
|
82
|
+
logger?: Logger,
|
|
80
83
|
omitOutput?: (name?: string) => boolean
|
|
81
84
|
) {
|
|
82
85
|
this.callback = callback;
|
|
86
|
+
this.logger = logger;
|
|
83
87
|
this.omitOutput = omitOutput;
|
|
84
88
|
}
|
|
85
89
|
async handle(
|
|
@@ -88,23 +92,41 @@ export class ToolEndHandler implements t.EventHandler {
|
|
|
88
92
|
metadata?: Record<string, unknown>,
|
|
89
93
|
graph?: StandardGraph | MultiAgentGraph
|
|
90
94
|
): Promise<void> {
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
+
try {
|
|
96
|
+
if (!graph || !metadata) {
|
|
97
|
+
if (this.logger) {
|
|
98
|
+
this.logger.warn(`Graph or metadata not found in ${event} event`);
|
|
99
|
+
} else {
|
|
100
|
+
console.warn(`Graph or metadata not found in ${event} event`);
|
|
101
|
+
}
|
|
102
|
+
return;
|
|
103
|
+
}
|
|
95
104
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
105
|
+
const toolEndData = data as t.ToolEndData | undefined;
|
|
106
|
+
if (!toolEndData?.output) {
|
|
107
|
+
if (this.logger) {
|
|
108
|
+
this.logger.warn('No output found in tool_end event');
|
|
109
|
+
} else {
|
|
110
|
+
console.warn('No output found in tool_end event');
|
|
111
|
+
}
|
|
112
|
+
return;
|
|
113
|
+
}
|
|
101
114
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
115
|
+
if (this.callback) {
|
|
116
|
+
await this.callback(toolEndData, metadata);
|
|
117
|
+
}
|
|
118
|
+
await graph.handleToolCallCompleted(
|
|
119
|
+
{ input: toolEndData.input, output: toolEndData.output },
|
|
120
|
+
metadata,
|
|
121
|
+
this.omitOutput?.((toolEndData.output as ToolMessage | undefined)?.name)
|
|
122
|
+
);
|
|
123
|
+
} catch (error) {
|
|
124
|
+
if (this.logger) {
|
|
125
|
+
this.logger.error('Error handling tool_end event:', error);
|
|
126
|
+
} else {
|
|
127
|
+
console.error('Error handling tool_end event:', error);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
108
130
|
}
|
|
109
131
|
}
|
|
110
132
|
|
|
Binary file
|
|
Binary file
|
package/src/llm/google/index.ts
CHANGED
|
@@ -11,15 +11,30 @@ import type {
|
|
|
11
11
|
import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
12
12
|
import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
13
13
|
import type { GeminiGenerationConfig } from '@langchain/google-common';
|
|
14
|
-
import type { GeminiApiUsageMetadata } from './types';
|
|
14
|
+
import type { GeminiApiUsageMetadata, InputTokenDetails } from './types';
|
|
15
15
|
import type { GoogleClientOptions } from '@/types';
|
|
16
16
|
import {
|
|
17
17
|
convertResponseContentToChatGenerationChunk,
|
|
18
18
|
convertBaseMessagesToContent,
|
|
19
|
+
mapGenerateContentResultToChatResult,
|
|
19
20
|
} from './utils/common';
|
|
20
21
|
|
|
21
22
|
export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
22
23
|
thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Override to add gemini-3 model support for multimodal and function calling thought signatures
|
|
27
|
+
*/
|
|
28
|
+
get _isMultimodalModel(): boolean {
|
|
29
|
+
return (
|
|
30
|
+
this.model.startsWith('gemini-1.5') ||
|
|
31
|
+
this.model.startsWith('gemini-2') ||
|
|
32
|
+
(this.model.startsWith('gemma-3-') &&
|
|
33
|
+
!this.model.startsWith('gemma-3-1b')) ||
|
|
34
|
+
this.model.startsWith('gemini-3')
|
|
35
|
+
);
|
|
36
|
+
}
|
|
37
|
+
|
|
23
38
|
constructor(fields: GoogleClientOptions) {
|
|
24
39
|
super(fields);
|
|
25
40
|
|
|
@@ -111,6 +126,59 @@ export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
|
111
126
|
return 'IllumaGoogleGenerativeAI';
|
|
112
127
|
}
|
|
113
128
|
|
|
129
|
+
/**
|
|
130
|
+
* Helper function to convert Gemini API usage metadata to LangChain format
|
|
131
|
+
* Includes support for cached tokens and tier-based tracking for gemini-3-pro-preview
|
|
132
|
+
*/
|
|
133
|
+
private _convertToUsageMetadata(
|
|
134
|
+
usageMetadata: GeminiApiUsageMetadata | undefined,
|
|
135
|
+
model: string
|
|
136
|
+
): UsageMetadata | undefined {
|
|
137
|
+
if (!usageMetadata) {
|
|
138
|
+
return undefined;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
const output: UsageMetadata = {
|
|
142
|
+
input_tokens: usageMetadata.promptTokenCount ?? 0,
|
|
143
|
+
output_tokens:
|
|
144
|
+
(usageMetadata.candidatesTokenCount ?? 0) +
|
|
145
|
+
(usageMetadata.thoughtsTokenCount ?? 0),
|
|
146
|
+
total_tokens: usageMetadata.totalTokenCount ?? 0,
|
|
147
|
+
};
|
|
148
|
+
|
|
149
|
+
if (usageMetadata.cachedContentTokenCount) {
|
|
150
|
+
output.input_token_details ??= {};
|
|
151
|
+
output.input_token_details.cache_read =
|
|
152
|
+
usageMetadata.cachedContentTokenCount;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// gemini-3-pro-preview has bracket based tracking of tokens per request
|
|
156
|
+
if (model === 'gemini-3-pro-preview') {
|
|
157
|
+
const over200k = Math.max(
|
|
158
|
+
0,
|
|
159
|
+
(usageMetadata.promptTokenCount ?? 0) - 200000
|
|
160
|
+
);
|
|
161
|
+
const cachedOver200k = Math.max(
|
|
162
|
+
0,
|
|
163
|
+
(usageMetadata.cachedContentTokenCount ?? 0) - 200000
|
|
164
|
+
);
|
|
165
|
+
if (over200k) {
|
|
166
|
+
output.input_token_details = {
|
|
167
|
+
...output.input_token_details,
|
|
168
|
+
over_200k: over200k,
|
|
169
|
+
} as InputTokenDetails;
|
|
170
|
+
}
|
|
171
|
+
if (cachedOver200k) {
|
|
172
|
+
output.input_token_details = {
|
|
173
|
+
...output.input_token_details,
|
|
174
|
+
cache_read_over_200k: cachedOver200k,
|
|
175
|
+
} as InputTokenDetails;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
return output;
|
|
180
|
+
}
|
|
181
|
+
|
|
114
182
|
invocationParams(
|
|
115
183
|
options?: this['ParsedCallOptions']
|
|
116
184
|
): Omit<GenerateContentRequest, 'contents'> {
|
|
@@ -127,6 +195,60 @@ export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
|
127
195
|
return params;
|
|
128
196
|
}
|
|
129
197
|
|
|
198
|
+
async _generate(
|
|
199
|
+
messages: BaseMessage[],
|
|
200
|
+
options: this['ParsedCallOptions'],
|
|
201
|
+
runManager?: CallbackManagerForLLMRun
|
|
202
|
+
): Promise<import('@langchain/core/outputs').ChatResult> {
|
|
203
|
+
const prompt = convertBaseMessagesToContent(
|
|
204
|
+
messages,
|
|
205
|
+
this._isMultimodalModel,
|
|
206
|
+
this.useSystemInstruction,
|
|
207
|
+
this.model
|
|
208
|
+
);
|
|
209
|
+
let actualPrompt = prompt;
|
|
210
|
+
if (prompt?.[0].role === 'system') {
|
|
211
|
+
const [systemInstruction] = prompt;
|
|
212
|
+
/** @ts-ignore */
|
|
213
|
+
this.client.systemInstruction = systemInstruction;
|
|
214
|
+
actualPrompt = prompt.slice(1);
|
|
215
|
+
}
|
|
216
|
+
const parameters = this.invocationParams(options);
|
|
217
|
+
const request = {
|
|
218
|
+
...parameters,
|
|
219
|
+
contents: actualPrompt,
|
|
220
|
+
};
|
|
221
|
+
|
|
222
|
+
const res = await this.caller.callWithOptions(
|
|
223
|
+
{ signal: options.signal },
|
|
224
|
+
async () =>
|
|
225
|
+
/** @ts-ignore */
|
|
226
|
+
this.client.generateContent(request)
|
|
227
|
+
);
|
|
228
|
+
|
|
229
|
+
const response = res.response;
|
|
230
|
+
const usageMetadata = this._convertToUsageMetadata(
|
|
231
|
+
/** @ts-ignore */
|
|
232
|
+
response.usageMetadata,
|
|
233
|
+
this.model
|
|
234
|
+
);
|
|
235
|
+
|
|
236
|
+
/** @ts-ignore */
|
|
237
|
+
const generationResult = mapGenerateContentResultToChatResult(response, {
|
|
238
|
+
usageMetadata,
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
await runManager?.handleLLMNewToken(
|
|
242
|
+
generationResult.generations[0].text || '',
|
|
243
|
+
undefined,
|
|
244
|
+
undefined,
|
|
245
|
+
undefined,
|
|
246
|
+
undefined,
|
|
247
|
+
undefined
|
|
248
|
+
);
|
|
249
|
+
return generationResult;
|
|
250
|
+
}
|
|
251
|
+
|
|
130
252
|
async *_streamResponseChunks(
|
|
131
253
|
messages: BaseMessage[],
|
|
132
254
|
options: this['ParsedCallOptions'],
|
|
@@ -135,7 +257,8 @@ export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
|
135
257
|
const prompt = convertBaseMessagesToContent(
|
|
136
258
|
messages,
|
|
137
259
|
this._isMultimodalModel,
|
|
138
|
-
this.useSystemInstruction
|
|
260
|
+
this.useSystemInstruction,
|
|
261
|
+
this.model
|
|
139
262
|
);
|
|
140
263
|
let actualPrompt = prompt;
|
|
141
264
|
if (prompt?.[0].role === 'system') {
|
|
@@ -166,18 +289,10 @@ export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
|
166
289
|
this.streamUsage !== false &&
|
|
167
290
|
options.streamUsage !== false
|
|
168
291
|
) {
|
|
169
|
-
|
|
170
|
-
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
const output_tokens =
|
|
174
|
-
(genAIUsageMetadata?.candidatesTokenCount ?? 0) +
|
|
175
|
-
(genAIUsageMetadata?.thoughtsTokenCount ?? 0);
|
|
176
|
-
lastUsageMetadata = {
|
|
177
|
-
input_tokens: genAIUsageMetadata?.promptTokenCount ?? 0,
|
|
178
|
-
output_tokens,
|
|
179
|
-
total_tokens: genAIUsageMetadata?.totalTokenCount ?? 0,
|
|
180
|
-
};
|
|
292
|
+
lastUsageMetadata = this._convertToUsageMetadata(
|
|
293
|
+
response.usageMetadata as GeminiApiUsageMetadata | undefined,
|
|
294
|
+
this.model
|
|
295
|
+
);
|
|
181
296
|
}
|
|
182
297
|
|
|
183
298
|
const chunk = convertResponseContentToChatGenerationChunk(response, {
|