@smythos/sre 1.5.36 → 1.5.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/dist/index.js +64 -46
  2. package/dist/index.js.map +1 -1
  3. package/dist/types/Components/DataSourceLookup.class.d.ts +1 -0
  4. package/dist/types/Components/ECMASandbox.class.d.ts +14 -0
  5. package/dist/types/Components/index.d.ts +2 -0
  6. package/dist/types/Core/ConnectorsService.d.ts +2 -1
  7. package/dist/types/helpers/ECMASandbox.helper.d.ts +3 -0
  8. package/dist/types/helpers/Log.helper.d.ts +1 -1
  9. package/dist/types/index.d.ts +4 -1
  10. package/dist/types/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.d.ts +19 -0
  11. package/dist/types/subsystems/LLMManager/LLM.helper.d.ts +21 -10
  12. package/dist/types/subsystems/LLMManager/LLM.service/LLMConnector.d.ts +5 -5
  13. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.d.ts +2 -3
  14. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.d.ts +2 -3
  15. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Echo.class.d.ts +2 -3
  16. package/dist/types/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.d.ts +2 -3
  17. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Groq.class.d.ts +2 -3
  18. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.d.ts +3 -4
  19. package/dist/types/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.d.ts +15 -7
  20. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +95 -0
  21. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.d.ts +87 -0
  22. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.d.ts +85 -0
  23. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.d.ts +49 -0
  24. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.d.ts +146 -0
  25. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.d.ts +10 -0
  26. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.d.ts +4 -0
  27. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/types.d.ts +38 -0
  28. package/dist/types/subsystems/LLMManager/LLM.service/connectors/xAI.class.d.ts +1 -2
  29. package/dist/types/subsystems/Security/Vault.service/connectors/JSONFileVault.class.d.ts +5 -0
  30. package/dist/types/types/LLM.types.d.ts +82 -37
  31. package/dist/types/utils/data.utils.d.ts +2 -1
  32. package/package.json +4 -3
  33. package/src/Components/APICall/APICall.class.ts +2 -1
  34. package/src/Components/Component.class.ts +1 -1
  35. package/src/Components/DataSourceLookup.class.ts +29 -10
  36. package/src/Components/ECMASandbox.class.ts +71 -0
  37. package/src/Components/GenAILLM.class.ts +1 -1
  38. package/src/Components/ServerlessCode.class.ts +2 -1
  39. package/src/Components/index.ts +2 -0
  40. package/src/Core/ConnectorsService.ts +3 -3
  41. package/src/helpers/ECMASandbox.helper.ts +54 -0
  42. package/src/helpers/Log.helper.ts +57 -17
  43. package/src/index.ts +188 -185
  44. package/src/index.ts.bak +188 -185
  45. package/src/subsystems/AgentManager/Agent.class.ts +11 -6
  46. package/src/subsystems/AgentManager/AgentRuntime.class.ts +13 -13
  47. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -0
  48. package/src/subsystems/ComputeManager/Code.service/index.ts +2 -0
  49. package/src/subsystems/LLMManager/LLM.helper.ts +57 -27
  50. package/src/subsystems/LLMManager/LLM.inference.ts +4 -0
  51. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +123 -28
  52. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +13 -14
  53. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +2 -7
  54. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +2 -6
  55. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +8 -14
  56. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +2 -7
  57. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +2 -7
  58. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +121 -9
  59. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +455 -0
  60. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +528 -0
  61. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -0
  62. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -0
  63. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +853 -0
  64. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +37 -0
  65. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -0
  66. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +37 -0
  67. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +0 -5
  68. package/src/subsystems/LLMManager/LLM.service/index.ts +1 -1
  69. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +18 -0
  70. package/src/subsystems/MemoryManager/RuntimeContext.ts +33 -16
  71. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +68 -42
  72. package/src/types/LLM.types.ts +91 -43
  73. package/src/utils/data.utils.ts +3 -2
  74. package/src/subsystems/LLMManager/LLM.service/connectors/OpenAI.class.ts +0 -848
@@ -0,0 +1,455 @@
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import { toFile } from 'openai';
4
+ import { encodeChat } from 'gpt-tokenizer';
5
+
6
+ import { BUILT_IN_MODEL_PREFIX } from '@sre/constants';
7
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
8
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
9
+ import { AccessRequest } from '@sre/Security/AccessControl/AccessRequest.class';
10
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
11
+
12
+ import {
13
+ TLLMParams,
14
+ ToolData,
15
+ TLLMMessageBlock,
16
+ TLLMToolResultMessageBlock,
17
+ TLLMMessageRole,
18
+ APIKeySource,
19
+ ILLMRequestFuncParams,
20
+ TOpenAIRequestBody,
21
+ TLLMChatResponse,
22
+ ILLMRequestContext,
23
+ BasicCredentials,
24
+ TLLMPreparedParams,
25
+ } from '@sre/types/LLM.types';
26
+
27
+ import { LLMConnector } from '../../LLMConnector';
28
+ import { SystemEvents } from '@sre/Core/SystemEvents';
29
+ import { ConnectorService } from '@sre/Core/ConnectorsService';
30
+ import { HandlerDependencies, TToolType } from './types';
31
+ import { OpenAIApiInterfaceFactory, OpenAIApiInterface } from './apiInterfaces';
32
+
33
+ export class OpenAIConnector extends LLMConnector {
34
+ public name = 'LLM:OpenAI';
35
+
36
+ private interfaceFactory: OpenAIApiInterfaceFactory;
37
+
38
+ constructor() {
39
+ super();
40
+
41
+ this.interfaceFactory = new OpenAIApiInterfaceFactory();
42
+ }
43
+
44
+ /**
45
+ * Get the appropriate API interface for the given interface type and context
46
+ */
47
+ private getApiInterface(interfaceType: string, context: ILLMRequestContext): OpenAIApiInterface {
48
+ const deps: HandlerDependencies = {
49
+ getClient: (context) => this.getClient(context),
50
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
51
+ };
52
+
53
+ return this.interfaceFactory.createInterface(interfaceType, context, deps);
54
+ }
55
+
56
+ /**
57
+ * Determine the appropriate interface type based on context and capabilities
58
+ */
59
+ private getInterfaceType(context: ILLMRequestContext): string {
60
+ // Start with model-specified interface or default
61
+ let responseInterface = this.interfaceFactory.getInterfaceTypeFromModelInfo(context.modelInfo);
62
+
63
+ // Auto-switch to Responses API when web search is enabled
64
+ if (context.toolsInfo?.openai?.webSearch?.enabled === true) {
65
+ responseInterface = 'responses';
66
+ }
67
+
68
+ return responseInterface;
69
+ }
70
+
71
+ protected async getClient(params: ILLMRequestContext): Promise<OpenAI> {
72
+ const apiKey = (params.credentials as BasicCredentials)?.apiKey;
73
+ const baseURL = params?.modelInfo?.baseURL;
74
+
75
+ if (!apiKey) throw new Error('Please provide an API key for OpenAI');
76
+
77
+ const openai = new OpenAI({ baseURL, apiKey });
78
+
79
+ return openai;
80
+ }
81
+
82
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
83
+ const _body = body as OpenAI.ChatCompletionCreateParams;
84
+
85
+ // #region Validate token limit
86
+ const messages = _body?.messages || [];
87
+ const lastMessage = messages[messages.length - 1];
88
+
89
+ const promptTokens = context?.hasFiles
90
+ ? await LLMHelper.countVisionPromptTokens(lastMessage?.content)
91
+ : encodeChat(messages as any, 'gpt-4')?.length;
92
+
93
+ await this.validateTokenLimit({
94
+ acRequest,
95
+ promptTokens,
96
+ context,
97
+ maxTokens: _body.max_completion_tokens,
98
+ });
99
+ // #endregion Validate token limit
100
+
101
+ const responseInterface = this.getInterfaceType(context);
102
+ const apiInterface = this.getApiInterface(responseInterface, context);
103
+
104
+ const result = await apiInterface.createRequest(body, context);
105
+
106
+ const message = result?.choices?.[0]?.message;
107
+ const finishReason = result?.choices?.[0]?.finish_reason;
108
+
109
+ let toolsData: ToolData[] = [];
110
+ let useTool = false;
111
+
112
+ if (finishReason === 'tool_calls') {
113
+ toolsData =
114
+ message?.tool_calls?.map((tool, index) => ({
115
+ index,
116
+ id: tool?.id,
117
+ type: tool?.type,
118
+ name: tool?.function?.name,
119
+ arguments: tool?.function?.arguments,
120
+ role: 'tool',
121
+ })) || [];
122
+
123
+ useTool = true;
124
+ }
125
+
126
+ const usage = result?.usage;
127
+ this.reportUsage(usage, {
128
+ modelEntryName: context.modelEntryName,
129
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
130
+ agentId: context.agentId,
131
+ teamId: context.teamId,
132
+ });
133
+
134
+ return {
135
+ content: message?.content ?? '',
136
+ finishReason,
137
+ useTool,
138
+ toolsData,
139
+ message,
140
+ usage,
141
+ };
142
+ }
143
+
144
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
145
+ // #region Validate token limit
146
+ const messages = body?.messages || body?.input || [];
147
+ const lastMessage = messages[messages.length - 1];
148
+
149
+ const promptTokens = context?.hasFiles
150
+ ? await LLMHelper.countVisionPromptTokens(lastMessage?.content)
151
+ : encodeChat(messages as any, 'gpt-4')?.length;
152
+
153
+ await this.validateTokenLimit({
154
+ acRequest,
155
+ promptTokens,
156
+ context,
157
+ maxTokens: body.max_completion_tokens,
158
+ });
159
+ // #endregion Validate token limit
160
+
161
+ const responseInterface = this.getInterfaceType(context);
162
+ const apiInterface = this.getApiInterface(responseInterface, context);
163
+
164
+ const stream = await apiInterface.createStream(body, context);
165
+
166
+ const emitter = apiInterface.handleStream(stream, context);
167
+
168
+ return emitter;
169
+ }
170
+
171
+ // #region Image Generation, will be moved to a different subsystem
172
+ protected async imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
173
+ const openai = await this.getClient(context);
174
+ const response = await openai.images.generate(body as OpenAI.Images.ImageGenerateParams);
175
+
176
+ return response;
177
+ }
178
+
179
+ protected async imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
180
+ const _body = body as OpenAI.Images.ImageEditParams;
181
+
182
+ const openai = await this.getClient(context);
183
+ const response = await openai.images.edit(_body);
184
+
185
+ return response;
186
+ }
187
+ // #endregion
188
+
189
+ public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto', modelInfo = null }) {
190
+ let tools = [];
191
+
192
+ if (toolDefinitions && toolDefinitions.length > 0) {
193
+ const interfaceType = modelInfo?.interface || 'chat.completions';
194
+
195
+ const tempContext: ILLMRequestContext = {
196
+ modelEntryName: '',
197
+ agentId: '',
198
+ teamId: '',
199
+ isUserKey: false,
200
+ modelInfo,
201
+ credentials: null,
202
+ } as ILLMRequestContext;
203
+
204
+ const deps: HandlerDependencies = {
205
+ getClient: (context) => this.getClient(context),
206
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
207
+ };
208
+
209
+ const apiInterface = this.interfaceFactory.createInterface(interfaceType, tempContext, deps);
210
+
211
+ // Transform tools using the interface
212
+ tools = apiInterface.transformToolsConfig({
213
+ type,
214
+ toolDefinitions,
215
+ toolChoice: toolChoice as OpenAI.ChatCompletionToolChoiceOption,
216
+ modelInfo,
217
+ });
218
+ }
219
+
220
+ return tools?.length > 0 ? { tools, tool_choice: toolChoice || 'auto' } : {};
221
+ }
222
+
223
+ public transformToolMessageBlocks({
224
+ messageBlock,
225
+ toolsData,
226
+ }: {
227
+ messageBlock: TLLMMessageBlock;
228
+ toolsData: ToolData[];
229
+ }): TLLMToolResultMessageBlock[] {
230
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
231
+
232
+ if (messageBlock) {
233
+ const transformedMessageBlock = {
234
+ ...messageBlock,
235
+ content: typeof messageBlock.content === 'object' ? JSON.stringify(messageBlock.content) : messageBlock.content,
236
+ };
237
+ if (transformedMessageBlock.tool_calls) {
238
+ for (let toolCall of transformedMessageBlock.tool_calls) {
239
+ toolCall.function.arguments =
240
+ typeof toolCall.function.arguments === 'object' ? JSON.stringify(toolCall.function.arguments) : toolCall.function.arguments;
241
+ }
242
+ }
243
+ messageBlocks.push(transformedMessageBlock);
244
+ }
245
+
246
+ const transformedToolsData = toolsData.map((toolData) => ({
247
+ tool_call_id: toolData.id,
248
+ role: TLLMMessageRole.Tool, // toolData.role as TLLMMessageRole, //should always be 'tool' for OpenAI
249
+ name: toolData.name,
250
+ content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result), // Ensure content is a string
251
+ }));
252
+
253
+ return [...messageBlocks, ...transformedToolsData];
254
+ }
255
+
256
+ public getConsistentMessages(messages) {
257
+ const _messages = LLMHelper.removeDuplicateUserMessages(messages);
258
+
259
+ return _messages.map((message) => {
260
+ const _message = { ...message };
261
+ let textContent = '';
262
+
263
+ if (message?.parts) {
264
+ textContent = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
265
+ } else if (Array.isArray(message?.content)) {
266
+ textContent = message.content.map((textBlock) => textBlock?.text || '').join(' ');
267
+ } else if (message?.content) {
268
+ textContent = message.content;
269
+ }
270
+
271
+ _message.content = textContent;
272
+
273
+ return _message;
274
+ });
275
+ }
276
+
277
+ private async validateTokenLimit({
278
+ acRequest,
279
+ maxTokens,
280
+ promptTokens,
281
+ context,
282
+ }: {
283
+ acRequest: AccessRequest;
284
+ maxTokens: number;
285
+ promptTokens: number;
286
+ context: ILLMRequestContext;
287
+ }): Promise<void> {
288
+ const provider = await this.getProvider(acRequest, context.modelEntryName);
289
+
290
+ await provider.validateTokensLimit({
291
+ model: context.modelEntryName,
292
+ promptTokens,
293
+ completionTokens: maxTokens,
294
+ hasAPIKey: context.isUserKey,
295
+ });
296
+ }
297
+
298
+ private async getProvider(acRequest: AccessRequest, modelEntryName: string) {
299
+ const modelsProviderConnector = ConnectorService.getModelsProviderConnector();
300
+ const modelsProvider = modelsProviderConnector.requester(acRequest.candidate as AccessCandidate);
301
+
302
+ return modelsProvider;
303
+ }
304
+
305
+ /**
306
+ * Prepare request body for OpenAI Responses API
307
+ * Uses MessageTransformer and ToolsTransformer for clean interface transformations
308
+ */
309
+
310
+ private async prepareImageGenerationBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageGenerateParams> {
311
+ const { model, size, quality, n, responseFormat, style } = params;
312
+
313
+ const body: OpenAI.Images.ImageGenerateParams = {
314
+ prompt: params.prompt,
315
+ model: model as string,
316
+ size: size as OpenAI.Images.ImageGenerateParams['size'],
317
+ n: n || 1,
318
+ };
319
+
320
+ if (quality) {
321
+ body.quality = quality;
322
+ }
323
+
324
+ if (style) {
325
+ body.style = style;
326
+ }
327
+
328
+ return body;
329
+ }
330
+
331
+ private async prepareImageEditBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageEditParams> {
332
+ const { model, size, n, responseFormat } = params;
333
+
334
+ const body: OpenAI.Images.ImageEditParams = {
335
+ prompt: params.prompt,
336
+ model: model as string,
337
+ size: size as OpenAI.Images.ImageEditParams['size'],
338
+ n: n || 1,
339
+ image: null,
340
+ };
341
+
342
+ const files: BinaryInput[] = params?.files || [];
343
+
344
+ if (files.length > 0) {
345
+ const images = await Promise.all(
346
+ files.map(
347
+ async (file) =>
348
+ await toFile(await file.getReadStream(), await file.getName(), {
349
+ type: file.mimetype,
350
+ })
351
+ )
352
+ );
353
+
354
+ // Assign only the first image file as required by the OpenAI image-edit endpoint
355
+ body.image = images[0];
356
+ }
357
+
358
+ return body;
359
+ }
360
+
361
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TOpenAIRequestBody> {
362
+ // Handle special capabilities first (these override interface type)
363
+ if (params.capabilities?.imageGeneration === true) {
364
+ const capabilityType = params?.files?.length > 0 ? 'image-edit' : 'image-generation';
365
+ return this.prepareRequestBody(params, capabilityType);
366
+ }
367
+
368
+ // Create a minimal context to use the same interface selection logic
369
+ const minimalContext: ILLMRequestContext = {
370
+ modelInfo: params.modelInfo,
371
+ toolsInfo: params.toolsInfo,
372
+ } as ILLMRequestContext;
373
+
374
+ const responseInterface = this.getInterfaceType(minimalContext);
375
+
376
+ // Use interface-specific preparation
377
+ return this.prepareRequestBody(params, responseInterface);
378
+ }
379
+
380
+ private async prepareRequestBody(params: TLLMPreparedParams, preparationType: string): Promise<TOpenAIRequestBody> {
381
+ // Create a minimal context for body preparation - the interface may need access to model info
382
+ const minimalContext: ILLMRequestContext = {
383
+ modelInfo: params.modelInfo,
384
+ modelEntryName: params.modelEntryName,
385
+ agentId: params.agentId,
386
+ teamId: params.teamId,
387
+ isUserKey: params.isUserKey,
388
+ credentials: params.credentials,
389
+ hasFiles: params.files && params.files.length > 0,
390
+ toolsInfo: params.toolsInfo,
391
+ };
392
+
393
+ const preparers = {
394
+ 'chat.completions': async () => {
395
+ const apiInterface = this.getApiInterface('chat.completions', minimalContext);
396
+ return apiInterface.prepareRequestBody(params);
397
+ },
398
+ responses: async () => {
399
+ const apiInterface = this.getApiInterface('responses', minimalContext);
400
+ return apiInterface.prepareRequestBody(params);
401
+ },
402
+ 'image-generation': () => this.prepareImageGenerationBody(params),
403
+ 'image-edit': () => this.prepareImageEditBody(params),
404
+ // Future interfaces can be added here
405
+ };
406
+
407
+ const preparer = preparers[preparationType];
408
+ if (!preparer) {
409
+ throw new Error(`Unsupported preparation type: ${preparationType}`);
410
+ }
411
+
412
+ return preparer();
413
+ }
414
+
415
+ protected reportUsage(
416
+ usage: OpenAI.Completions.CompletionUsage & {
417
+ input_tokens?: number;
418
+ output_tokens?: number;
419
+ input_tokens_details?: { cached_tokens?: number };
420
+ prompt_tokens_details?: { cached_tokens?: number };
421
+ cost?: number; // for web search tool
422
+ },
423
+ metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
424
+ ) {
425
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
426
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
427
+
428
+ const inputTokens = usage?.input_tokens || usage?.prompt_tokens - (usage?.prompt_tokens_details?.cached_tokens || 0); // Returned by the search tool
429
+
430
+ const outputTokens =
431
+ usage?.output_tokens || // Returned by the search tool
432
+ usage?.completion_tokens ||
433
+ 0;
434
+
435
+ const cachedInputTokens =
436
+ usage?.input_tokens_details?.cached_tokens || // Returned by the search tool
437
+ usage?.prompt_tokens_details?.cached_tokens ||
438
+ 0;
439
+
440
+ const usageData = {
441
+ sourceId: `llm:${modelName}`,
442
+ input_tokens: inputTokens,
443
+ output_tokens: outputTokens,
444
+ input_tokens_cache_write: 0,
445
+ input_tokens_cache_read: cachedInputTokens,
446
+ cost: usage?.cost || 0, // for web search tool
447
+ keySource: metadata.keySource,
448
+ agentId: metadata.agentId,
449
+ teamId: metadata.teamId,
450
+ };
451
+ SystemEvents.emit('USAGE:LLM', usageData);
452
+
453
+ return usageData;
454
+ }
455
+ }