@smythos/sre 1.5.36 → 1.5.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +64 -46
- package/dist/index.js.map +1 -1
- package/dist/types/Components/DataSourceLookup.class.d.ts +1 -0
- package/dist/types/Components/ECMASandbox.class.d.ts +14 -0
- package/dist/types/Components/index.d.ts +2 -0
- package/dist/types/Core/ConnectorsService.d.ts +2 -1
- package/dist/types/helpers/ECMASandbox.helper.d.ts +3 -0
- package/dist/types/helpers/Log.helper.d.ts +1 -1
- package/dist/types/index.d.ts +4 -1
- package/dist/types/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.d.ts +19 -0
- package/dist/types/subsystems/LLMManager/LLM.helper.d.ts +21 -10
- package/dist/types/subsystems/LLMManager/LLM.service/LLMConnector.d.ts +5 -5
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.d.ts +2 -3
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.d.ts +2 -3
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/Echo.class.d.ts +2 -3
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.d.ts +2 -3
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/Groq.class.d.ts +2 -3
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.d.ts +3 -4
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.d.ts +15 -7
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +95 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.d.ts +87 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.d.ts +85 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.d.ts +49 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.d.ts +146 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.d.ts +10 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.d.ts +4 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/types.d.ts +38 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/xAI.class.d.ts +1 -2
- package/dist/types/subsystems/Security/Vault.service/connectors/JSONFileVault.class.d.ts +5 -0
- package/dist/types/types/LLM.types.d.ts +82 -37
- package/dist/types/utils/data.utils.d.ts +2 -1
- package/package.json +4 -3
- package/src/Components/APICall/APICall.class.ts +2 -1
- package/src/Components/Component.class.ts +1 -1
- package/src/Components/DataSourceLookup.class.ts +29 -10
- package/src/Components/ECMASandbox.class.ts +71 -0
- package/src/Components/GenAILLM.class.ts +1 -1
- package/src/Components/ServerlessCode.class.ts +2 -1
- package/src/Components/index.ts +2 -0
- package/src/Core/ConnectorsService.ts +3 -3
- package/src/helpers/ECMASandbox.helper.ts +54 -0
- package/src/helpers/Log.helper.ts +57 -17
- package/src/index.ts +188 -185
- package/src/index.ts.bak +188 -185
- package/src/subsystems/AgentManager/Agent.class.ts +11 -6
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +13 -13
- package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -0
- package/src/subsystems/ComputeManager/Code.service/index.ts +2 -0
- package/src/subsystems/LLMManager/LLM.helper.ts +57 -27
- package/src/subsystems/LLMManager/LLM.inference.ts +4 -0
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +123 -28
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +13 -14
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +2 -7
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +2 -6
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +8 -14
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +2 -7
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +2 -7
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +121 -9
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +455 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +528 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +853 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +37 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +37 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +0 -5
- package/src/subsystems/LLMManager/LLM.service/index.ts +1 -1
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +18 -0
- package/src/subsystems/MemoryManager/RuntimeContext.ts +33 -16
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +68 -42
- package/src/types/LLM.types.ts +91 -43
- package/src/utils/data.utils.ts +3 -2
- package/src/subsystems/LLMManager/LLM.service/connectors/OpenAI.class.ts +0 -848
|
@@ -0,0 +1,853 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
2
|
+
import OpenAI from 'openai';
|
|
3
|
+
import type { Stream } from 'openai/streaming';
|
|
4
|
+
|
|
5
|
+
import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
|
|
6
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
7
|
+
import {
|
|
8
|
+
TLLMParams,
|
|
9
|
+
TLLMPreparedParams,
|
|
10
|
+
ILLMRequestContext,
|
|
11
|
+
TLLMMessageBlock,
|
|
12
|
+
ToolData,
|
|
13
|
+
TLLMToolResultMessageBlock,
|
|
14
|
+
TLLMMessageRole,
|
|
15
|
+
APIKeySource,
|
|
16
|
+
TLLMEvent,
|
|
17
|
+
OpenAIToolDefinition,
|
|
18
|
+
LegacyToolDefinition,
|
|
19
|
+
LLMModelInfo,
|
|
20
|
+
} from '@sre/types/LLM.types';
|
|
21
|
+
import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
|
|
22
|
+
import { HandlerDependencies, TToolType } from '../types';
|
|
23
|
+
import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
|
|
24
|
+
import { MODELS_WITHOUT_TEMPERATURE_SUPPORT, SEARCH_TOOL_COSTS } from './constants';
|
|
25
|
+
|
|
26
|
+
// File size limits in bytes
|
|
27
|
+
const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
|
|
28
|
+
const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
|
|
29
|
+
|
|
30
|
+
type TSearchContextSize = 'low' | 'medium' | 'high';
|
|
31
|
+
type TSearchLocation = {
|
|
32
|
+
type: 'approximate';
|
|
33
|
+
city?: string;
|
|
34
|
+
country?: string;
|
|
35
|
+
region?: string;
|
|
36
|
+
timezone?: string;
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* OpenAI Responses API interface implementation
|
|
41
|
+
* Handles all Responses API-specific logic including:
|
|
42
|
+
* - Stream creation and handling
|
|
43
|
+
* - Request body preparation
|
|
44
|
+
* - Tool and message transformations
|
|
45
|
+
* - File attachment handling
|
|
46
|
+
*/
|
|
47
|
+
export class ResponsesApiInterface extends OpenAIApiInterface {
|
|
48
|
+
private deps: HandlerDependencies;
|
|
49
|
+
private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
|
|
50
|
+
private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
|
|
51
|
+
|
|
52
|
+
constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
|
|
53
|
+
super(context);
|
|
54
|
+
this.deps = deps;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
async createRequest(body: OpenAI.Responses.ResponseCreateParams, context: ILLMRequestContext): Promise<OpenAI.Responses.Response> {
|
|
58
|
+
const openai = await this.deps.getClient(context);
|
|
59
|
+
return await openai.responses.create({
|
|
60
|
+
...body,
|
|
61
|
+
stream: false,
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
async createStream(
|
|
66
|
+
body: OpenAI.Responses.ResponseCreateParams,
|
|
67
|
+
context: ILLMRequestContext
|
|
68
|
+
): Promise<Stream<OpenAI.Responses.ResponseStreamEvent>> {
|
|
69
|
+
const openai = await this.deps.getClient(context);
|
|
70
|
+
return (await openai.responses.create({
|
|
71
|
+
...body,
|
|
72
|
+
stream: true,
|
|
73
|
+
})) as Stream<OpenAI.Responses.ResponseStreamEvent>;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
public handleStream(stream: Stream<OpenAI.Responses.ResponseStreamEvent>, context: ILLMRequestContext): EventEmitter {
|
|
77
|
+
const emitter = new EventEmitter();
|
|
78
|
+
const usage_data: any[] = [];
|
|
79
|
+
const reportedUsage: any[] = [];
|
|
80
|
+
let finishReason = 'stop';
|
|
81
|
+
|
|
82
|
+
// Process stream asynchronously while returning emitter immediately
|
|
83
|
+
(async () => {
|
|
84
|
+
let finalToolsData: ToolData[] = [];
|
|
85
|
+
|
|
86
|
+
try {
|
|
87
|
+
// Step 1: Process the stream
|
|
88
|
+
const streamResult = await this.processStream(stream, emitter, usage_data);
|
|
89
|
+
finalToolsData = streamResult.toolsData;
|
|
90
|
+
finishReason = streamResult.finishReason;
|
|
91
|
+
|
|
92
|
+
// Step 2: Report usage statistics
|
|
93
|
+
this.reportUsageStatistics(usage_data, context, reportedUsage);
|
|
94
|
+
|
|
95
|
+
// Step 3: Emit final events
|
|
96
|
+
this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
|
|
97
|
+
} catch (error) {
|
|
98
|
+
emitter.emit('error', error);
|
|
99
|
+
}
|
|
100
|
+
})();
|
|
101
|
+
|
|
102
|
+
return emitter;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Process the responses API stream format
|
|
107
|
+
*/
|
|
108
|
+
private async processStream(
|
|
109
|
+
stream: Stream<OpenAI.Responses.ResponseStreamEvent>,
|
|
110
|
+
emitter: EventEmitter,
|
|
111
|
+
usage_data: any[]
|
|
112
|
+
): Promise<{ toolsData: ToolData[]; finishReason: string }> {
|
|
113
|
+
let toolsData: ToolData[] = [];
|
|
114
|
+
let finishReason = 'stop';
|
|
115
|
+
|
|
116
|
+
for await (const part of stream) {
|
|
117
|
+
// Handle different event types from the Responses API stream
|
|
118
|
+
if ('type' in part) {
|
|
119
|
+
const event = part.type;
|
|
120
|
+
|
|
121
|
+
switch (event) {
|
|
122
|
+
case 'response.output_text.delta': {
|
|
123
|
+
if ('delta' in part && part.delta) {
|
|
124
|
+
// Emit content in delta format for compatibility
|
|
125
|
+
const deltaMsg = {
|
|
126
|
+
role: 'assistant',
|
|
127
|
+
content: part.delta,
|
|
128
|
+
};
|
|
129
|
+
emitter.emit('data', deltaMsg);
|
|
130
|
+
emitter.emit('content', part.delta, 'assistant');
|
|
131
|
+
}
|
|
132
|
+
break;
|
|
133
|
+
}
|
|
134
|
+
case 'response.function_call_arguments.delta': {
|
|
135
|
+
// Handle function call arguments streaming - use any to work around type issues
|
|
136
|
+
const partAny = part as any;
|
|
137
|
+
if (partAny?.delta && partAny?.call_id) {
|
|
138
|
+
// Find or create tool data entry
|
|
139
|
+
let toolIndex = toolsData.findIndex((t) => t.id === partAny.call_id);
|
|
140
|
+
if (toolIndex === -1) {
|
|
141
|
+
toolIndex = toolsData.length;
|
|
142
|
+
toolsData.push({
|
|
143
|
+
index: toolIndex,
|
|
144
|
+
id: partAny.call_id,
|
|
145
|
+
type: 'function',
|
|
146
|
+
name: partAny?.name || '',
|
|
147
|
+
arguments: '',
|
|
148
|
+
role: 'tool',
|
|
149
|
+
});
|
|
150
|
+
}
|
|
151
|
+
toolsData[toolIndex].arguments += partAny.delta;
|
|
152
|
+
}
|
|
153
|
+
break;
|
|
154
|
+
}
|
|
155
|
+
case 'response.web_search_call.started' as any:
|
|
156
|
+
case 'response.web_search_call.completed' as any: {
|
|
157
|
+
// Handle web search events - these are newer event types not yet in the official types
|
|
158
|
+
const partAny = part as any;
|
|
159
|
+
if (partAny?.id) {
|
|
160
|
+
// Find or create web search tool data entry
|
|
161
|
+
let toolIndex = toolsData.findIndex((t) => t.id === partAny.id);
|
|
162
|
+
if (toolIndex === -1) {
|
|
163
|
+
toolIndex = toolsData.length;
|
|
164
|
+
toolsData.push({
|
|
165
|
+
index: toolIndex,
|
|
166
|
+
id: partAny.id,
|
|
167
|
+
type: TToolType.WebSearch,
|
|
168
|
+
name: 'web_search',
|
|
169
|
+
arguments: partAny?.query || '',
|
|
170
|
+
role: 'tool',
|
|
171
|
+
});
|
|
172
|
+
} else {
|
|
173
|
+
// Update existing entry
|
|
174
|
+
if (partAny?.query) {
|
|
175
|
+
toolsData[toolIndex].arguments = partAny.query;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
break;
|
|
180
|
+
}
|
|
181
|
+
default: {
|
|
182
|
+
// Handle other event types including response completion
|
|
183
|
+
if (event.includes('done')) {
|
|
184
|
+
finishReason = 'stop';
|
|
185
|
+
}
|
|
186
|
+
break;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Handle usage statistics from response object
|
|
192
|
+
if ('response' in part && (part as any).response?.usage) {
|
|
193
|
+
usage_data.push((part as any).response.usage);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
return { toolsData: this.extractToolCalls(toolsData), finishReason };
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Extract and format tool calls from the accumulated data
|
|
202
|
+
*/
|
|
203
|
+
private extractToolCalls(output: ToolData[]): ToolData[] {
|
|
204
|
+
return output.map((tool) => ({
|
|
205
|
+
index: tool.index,
|
|
206
|
+
name: tool.name,
|
|
207
|
+
arguments: tool.arguments,
|
|
208
|
+
id: tool.id,
|
|
209
|
+
type: tool.type,
|
|
210
|
+
role: tool.role,
|
|
211
|
+
}));
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Report usage statistics
|
|
216
|
+
*/
|
|
217
|
+
private reportUsageStatistics(usage_data: any[], context: ILLMRequestContext, reportedUsage: any[]): void {
|
|
218
|
+
// Report normal usage
|
|
219
|
+
usage_data.forEach((usage) => {
|
|
220
|
+
// Convert ResponseUsage to CompletionUsage format for compatibility
|
|
221
|
+
const convertedUsage = {
|
|
222
|
+
completion_tokens: usage.completion_tokens || 0,
|
|
223
|
+
prompt_tokens: usage.prompt_tokens || 0,
|
|
224
|
+
total_tokens: usage.total_tokens || 0,
|
|
225
|
+
...usage,
|
|
226
|
+
};
|
|
227
|
+
const reported = this.deps.reportUsage(convertedUsage, this.buildUsageContext(context));
|
|
228
|
+
reportedUsage.push(reported);
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
// Report search tool usage if enabled
|
|
232
|
+
if (context.toolsInfo?.openai?.webSearch?.enabled) {
|
|
233
|
+
const searchUsage = this.calculateSearchToolUsage(context);
|
|
234
|
+
const reported = this.deps.reportUsage(searchUsage, this.buildUsageContext(context));
|
|
235
|
+
reportedUsage.push(reported);
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* Emit final events
|
|
241
|
+
*/
|
|
242
|
+
private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
|
|
243
|
+
// Emit tool info event if tools were called
|
|
244
|
+
if (toolsData.length > 0) {
|
|
245
|
+
emitter.emit(TLLMEvent.ToolInfo, toolsData);
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// Emit interrupted event if finishReason is not 'stop'
|
|
249
|
+
if (finishReason !== 'stop') {
|
|
250
|
+
emitter.emit('interrupted', finishReason);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Emit end event with setImmediate to ensure proper event ordering
|
|
254
|
+
setImmediate(() => {
|
|
255
|
+
emitter.emit('end', toolsData, reportedUsage, finishReason);
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Build usage context parameters from request context
|
|
261
|
+
*/
|
|
262
|
+
private buildUsageContext(context: ILLMRequestContext) {
|
|
263
|
+
return {
|
|
264
|
+
modelEntryName: context.modelEntryName,
|
|
265
|
+
keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
|
|
266
|
+
agentId: context.agentId,
|
|
267
|
+
teamId: context.teamId,
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
/**
|
|
272
|
+
* Calculate search tool usage with cost
|
|
273
|
+
*/
|
|
274
|
+
private calculateSearchToolUsage(context: ILLMRequestContext) {
|
|
275
|
+
const modelName = context.modelEntryName?.replace('smythos/', '');
|
|
276
|
+
const cost = this.getSearchToolCost(modelName, context.toolsInfo?.openai?.webSearch?.contextSize);
|
|
277
|
+
|
|
278
|
+
return {
|
|
279
|
+
cost,
|
|
280
|
+
completion_tokens: 0,
|
|
281
|
+
prompt_tokens: 0,
|
|
282
|
+
total_tokens: 0,
|
|
283
|
+
};
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.Responses.ResponseCreateParams> {
|
|
287
|
+
let input = await this.prepareInputMessages(params);
|
|
288
|
+
|
|
289
|
+
// Apply tool message transformation to input messages
|
|
290
|
+
// There's a difference in the tools message data structures between `Chat Completions` and the `Response` interface.
|
|
291
|
+
// Since we don't have enough context for the interface in `transformToolMessageBlocks`, we need to perform the transformation here so it's compatible with the `Responses` interface.
|
|
292
|
+
input = this.applyToolMessageTransformation(input);
|
|
293
|
+
|
|
294
|
+
const body: OpenAI.Responses.ResponseCreateParams = {
|
|
295
|
+
model: params.model as string,
|
|
296
|
+
input,
|
|
297
|
+
};
|
|
298
|
+
|
|
299
|
+
// Handle max tokens
|
|
300
|
+
if (params?.maxTokens !== undefined) {
|
|
301
|
+
body.max_output_tokens = params.maxTokens;
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
// o3-pro does not support temperature
|
|
305
|
+
if (params?.temperature !== undefined && !MODELS_WITHOUT_TEMPERATURE_SUPPORT.includes(params.modelEntryName)) {
|
|
306
|
+
body.temperature = params.temperature;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
if (params?.topP !== undefined) {
|
|
310
|
+
body.top_p = params.topP;
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
let tools: OpenAI.Responses.Tool[] = [];
|
|
314
|
+
|
|
315
|
+
if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
|
|
316
|
+
tools = await this.prepareFunctionTools(params);
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
// Add null safety check before accessing toolsInfo
|
|
320
|
+
if (params.toolsInfo?.openai?.webSearch?.enabled) {
|
|
321
|
+
const searchTool = this.prepareWebSearchTool(params);
|
|
322
|
+
tools.push(searchTool);
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
if (tools.length > 0) {
|
|
326
|
+
body.tools = tools;
|
|
327
|
+
|
|
328
|
+
if (params?.toolsConfig?.tool_choice) {
|
|
329
|
+
body.tool_choice = params?.toolsConfig?.tool_choice as any;
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
return body;
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
/**
|
|
337
|
+
* Type guard to check if a tool is an OpenAI tool definition
|
|
338
|
+
*/
|
|
339
|
+
private isOpenAIToolDefinition(tool: OpenAIToolDefinition | LegacyToolDefinition): tool is OpenAIToolDefinition {
|
|
340
|
+
return 'parameters' in tool;
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
/**
|
|
344
|
+
* Transform OpenAI tool definitions to Responses.Tool format
|
|
345
|
+
*/
|
|
346
|
+
public transformToolsConfig(config: ToolConfig): OpenAI.Responses.Tool[] {
|
|
347
|
+
return config.toolDefinitions.map((tool) => {
|
|
348
|
+
// Handle OpenAI tool definition format
|
|
349
|
+
if (this.isOpenAIToolDefinition(tool)) {
|
|
350
|
+
return {
|
|
351
|
+
type: 'function' as const,
|
|
352
|
+
name: tool.name,
|
|
353
|
+
description: tool.description,
|
|
354
|
+
parameters: tool.parameters,
|
|
355
|
+
strict: false, // Add required property for OpenAI Responses API
|
|
356
|
+
} as OpenAI.Responses.Tool;
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
// Handle legacy format for backward compatibility
|
|
360
|
+
return {
|
|
361
|
+
type: 'function' as const,
|
|
362
|
+
name: tool.name,
|
|
363
|
+
description: tool.description,
|
|
364
|
+
parameters: {
|
|
365
|
+
type: 'object',
|
|
366
|
+
properties: tool.properties || {},
|
|
367
|
+
required: tool.requiredFields || [],
|
|
368
|
+
},
|
|
369
|
+
strict: false, // Add required property for OpenAI Responses API
|
|
370
|
+
} as OpenAI.Responses.Tool;
|
|
371
|
+
});
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
/**
|
|
375
|
+
* Transform assistant message block with tool calls for Responses API
|
|
376
|
+
*/
|
|
377
|
+
private transformAssistantMessageBlock(messageBlock: TLLMMessageBlock): TLLMToolResultMessageBlock {
|
|
378
|
+
const transformedMessageBlock: TLLMToolResultMessageBlock = {
|
|
379
|
+
...messageBlock,
|
|
380
|
+
content: this.normalizeContent(messageBlock.content),
|
|
381
|
+
};
|
|
382
|
+
|
|
383
|
+
// Transform tool calls if present
|
|
384
|
+
if (transformedMessageBlock.tool_calls) {
|
|
385
|
+
transformedMessageBlock.tool_calls = this.transformToolCalls(transformedMessageBlock.tool_calls);
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
return transformedMessageBlock;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
/**
|
|
392
|
+
* Transform individual tool calls to ensure proper formatting
|
|
393
|
+
*/
|
|
394
|
+
private transformToolCalls(toolCalls: ToolData[]): ToolData[] {
|
|
395
|
+
return toolCalls.map((toolCall) => ({
|
|
396
|
+
...toolCall,
|
|
397
|
+
// Ensure function arguments are properly stringified for Responses API
|
|
398
|
+
function: {
|
|
399
|
+
...toolCall.function,
|
|
400
|
+
arguments: this.normalizeToolArguments(toolCall.function?.arguments || toolCall.arguments),
|
|
401
|
+
},
|
|
402
|
+
// Ensure arguments at root level are also normalized (for backward compatibility)
|
|
403
|
+
arguments: this.normalizeToolArguments(toolCall.arguments),
|
|
404
|
+
}));
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
/**
|
|
408
|
+
* Transform tool results with comprehensive error handling and type support
|
|
409
|
+
*/
|
|
410
|
+
private transformToolResults(toolsData: ToolData[]): TLLMToolResultMessageBlock[] {
|
|
411
|
+
return toolsData.filter((toolData) => this.isValidToolData(toolData)).map((toolData) => this.createToolResultMessage(toolData));
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
/**
|
|
415
|
+
* Create a tool result message for the Responses API format
|
|
416
|
+
*/
|
|
417
|
+
private createToolResultMessage(toolData: ToolData): TLLMToolResultMessageBlock {
|
|
418
|
+
const baseMessage: TLLMToolResultMessageBlock = {
|
|
419
|
+
tool_call_id: toolData.id,
|
|
420
|
+
role: TLLMMessageRole.Tool,
|
|
421
|
+
name: toolData.name,
|
|
422
|
+
content: this.formatToolResult(toolData),
|
|
423
|
+
};
|
|
424
|
+
|
|
425
|
+
// Handle tool errors specifically
|
|
426
|
+
if (toolData.error) {
|
|
427
|
+
baseMessage.content = this.formatToolError(toolData);
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
return baseMessage;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
/**
|
|
434
|
+
* Format tool result content based on type and handle special cases
|
|
435
|
+
*/
|
|
436
|
+
private formatToolResult(toolData: ToolData): string {
|
|
437
|
+
const result = toolData.result;
|
|
438
|
+
|
|
439
|
+
// Handle different result types
|
|
440
|
+
if (typeof result === 'string') {
|
|
441
|
+
return result;
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
if (typeof result === 'object' && result !== null) {
|
|
445
|
+
try {
|
|
446
|
+
return JSON.stringify(result, null, 2);
|
|
447
|
+
} catch (error) {
|
|
448
|
+
return `[Error serializing result: ${error instanceof Error ? error.message : 'Unknown error'}]`;
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
// Handle special tool types
|
|
453
|
+
if (this.isWebSearchTool(toolData)) {
|
|
454
|
+
return this.formatWebSearchResult(result);
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
// Handle undefined/null results
|
|
458
|
+
if (result === undefined || result === null) {
|
|
459
|
+
return `[Tool ${toolData.name} completed with no result]`;
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
// Fallback to string conversion
|
|
463
|
+
return String(result);
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
/**
|
|
467
|
+
* Format tool error messages with context
|
|
468
|
+
*/
|
|
469
|
+
private formatToolError(toolData: ToolData): string {
|
|
470
|
+
const errorMessage = toolData.error || 'Unknown error occurred';
|
|
471
|
+
return `[Tool Error in ${toolData.name}]: ${errorMessage}`;
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
/**
|
|
475
|
+
* Normalize content to string format for Responses API
|
|
476
|
+
*/
|
|
477
|
+
private normalizeContent(content: any): string {
|
|
478
|
+
if (typeof content === 'string') {
|
|
479
|
+
return content;
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
if (Array.isArray(content)) {
|
|
483
|
+
// Handle array content by extracting text parts
|
|
484
|
+
return content
|
|
485
|
+
.map((item) => {
|
|
486
|
+
if (typeof item === 'string') return item;
|
|
487
|
+
if (item?.text) return item.text;
|
|
488
|
+
if (item?.type === 'text' && item?.text) return item.text;
|
|
489
|
+
return JSON.stringify(item);
|
|
490
|
+
})
|
|
491
|
+
.join(' ');
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
if (typeof content === 'object' && content !== null) {
|
|
495
|
+
try {
|
|
496
|
+
return JSON.stringify(content);
|
|
497
|
+
} catch (error) {
|
|
498
|
+
return '[Error serializing content]';
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
return String(content || '');
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
/**
|
|
506
|
+
* Normalize tool arguments to string format for Responses API
|
|
507
|
+
*/
|
|
508
|
+
private normalizeToolArguments(args: any): string {
|
|
509
|
+
if (typeof args === 'string') {
|
|
510
|
+
// If it's already a string, validate it's proper JSON
|
|
511
|
+
try {
|
|
512
|
+
JSON.parse(args);
|
|
513
|
+
return args;
|
|
514
|
+
} catch {
|
|
515
|
+
// If not valid JSON, wrap it in quotes to make it valid
|
|
516
|
+
return JSON.stringify(args);
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
if (typeof args === 'object' && args !== null) {
|
|
521
|
+
try {
|
|
522
|
+
return JSON.stringify(args);
|
|
523
|
+
} catch (error) {
|
|
524
|
+
return '{}'; // Fallback to empty object
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
if (args === undefined || args === null) {
|
|
529
|
+
return '{}';
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
// For primitive types, convert to JSON
|
|
533
|
+
return JSON.stringify(args);
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
/**
|
|
537
|
+
* Validate if tool data is complete and valid for transformation
|
|
538
|
+
*/
|
|
539
|
+
private isValidToolData(toolData: ToolData): boolean {
|
|
540
|
+
return !!(toolData && toolData.id && toolData.name && (toolData.result !== undefined || toolData.error !== undefined));
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
/**
|
|
544
|
+
* Check if the tool is a web search tool based on type or name
|
|
545
|
+
*/
|
|
546
|
+
private isWebSearchTool(toolData: ToolData): boolean {
|
|
547
|
+
return (
|
|
548
|
+
toolData.type === TToolType.WebSearch || toolData.name?.toLowerCase().includes('search') || toolData.name?.toLowerCase().includes('web')
|
|
549
|
+
);
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
/**
|
|
553
|
+
* Format web search results with better structure
|
|
554
|
+
*/
|
|
555
|
+
private formatWebSearchResult(result: any): string {
|
|
556
|
+
if (!result) return '[Web search completed with no results]';
|
|
557
|
+
|
|
558
|
+
// If result is already a well-formatted string, use it
|
|
559
|
+
if (typeof result === 'string') {
|
|
560
|
+
return result;
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
// If result is an object with search-specific structure, format it nicely
|
|
564
|
+
if (typeof result === 'object') {
|
|
565
|
+
try {
|
|
566
|
+
// Check for common web search result structures
|
|
567
|
+
if (result.results || result.items || result.data) {
|
|
568
|
+
return JSON.stringify(result, null, 2);
|
|
569
|
+
}
|
|
570
|
+
return JSON.stringify(result, null, 2);
|
|
571
|
+
} catch (error) {
|
|
572
|
+
return '[Error formatting web search results]';
|
|
573
|
+
}
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
return String(result);
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
async handleFileAttachments(files: BinaryInput[], agentId: string, messages: any[]): Promise<any[]> {
|
|
580
|
+
if (files.length === 0) return messages;
|
|
581
|
+
|
|
582
|
+
const uploadedFiles = await this.uploadFiles(files, agentId);
|
|
583
|
+
const validImageFiles = this.getValidImageFiles(uploadedFiles);
|
|
584
|
+
const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
|
|
585
|
+
|
|
586
|
+
// Process images and documents with Responses API specific formatting
|
|
587
|
+
const imageData = await this.processImageData(validImageFiles, agentId);
|
|
588
|
+
const documentData = await this.processDocumentData(validDocumentFiles, agentId);
|
|
589
|
+
|
|
590
|
+
// Find the last user message and add files to it
|
|
591
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
592
|
+
if (messages[i].role === 'user') {
|
|
593
|
+
// Ensure content is an array before pushing files
|
|
594
|
+
if (typeof messages[i].content === 'string') {
|
|
595
|
+
messages[i].content = [{ type: 'input_text', text: messages[i].content }];
|
|
596
|
+
} else if (!Array.isArray(messages[i].content)) {
|
|
597
|
+
messages[i].content = [];
|
|
598
|
+
}
|
|
599
|
+
messages[i].content.push(...imageData, ...documentData);
|
|
600
|
+
break;
|
|
601
|
+
}
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
// If no user message found, create one with files
|
|
605
|
+
if (!messages.some((item) => item.role === 'user')) {
|
|
606
|
+
messages.push({
|
|
607
|
+
role: 'user',
|
|
608
|
+
content: [...imageData, ...documentData],
|
|
609
|
+
});
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
return messages;
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
/**
|
|
616
|
+
* Get valid image files based on supported MIME types
|
|
617
|
+
*/
|
|
618
|
+
private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
|
|
619
|
+
return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
/**
|
|
623
|
+
* Get valid document files based on supported MIME types
|
|
624
|
+
*/
|
|
625
|
+
private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
|
|
626
|
+
return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
/**
|
|
630
|
+
* Upload files to storage
|
|
631
|
+
*/
|
|
632
|
+
private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
|
|
633
|
+
const promises = files.map((file) => {
|
|
634
|
+
const binaryInput = BinaryInput.from(file);
|
|
635
|
+
return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
|
|
636
|
+
});
|
|
637
|
+
|
|
638
|
+
return Promise.all(promises);
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
/**
|
|
642
|
+
* Process image files with Responses API specific formatting
|
|
643
|
+
*/
|
|
644
|
+
private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
|
|
645
|
+
if (files.length === 0) return [];
|
|
646
|
+
|
|
647
|
+
const imageData = [];
|
|
648
|
+
for (const file of files) {
|
|
649
|
+
await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image');
|
|
650
|
+
|
|
651
|
+
const bufferData = await file.readData(AccessCandidate.agent(agentId));
|
|
652
|
+
const base64Data = bufferData.toString('base64');
|
|
653
|
+
const url = `data:${file.mimetype};base64,${base64Data}`;
|
|
654
|
+
|
|
655
|
+
imageData.push({
|
|
656
|
+
type: 'input_image',
|
|
657
|
+
image_url: url,
|
|
658
|
+
});
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
return imageData;
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
/**
|
|
665
|
+
* Process document files with Responses API specific formatting
|
|
666
|
+
*/
|
|
667
|
+
private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
|
|
668
|
+
if (files.length === 0) return [];
|
|
669
|
+
|
|
670
|
+
const documentData = [];
|
|
671
|
+
for (const file of files) {
|
|
672
|
+
await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document');
|
|
673
|
+
|
|
674
|
+
const bufferData = await file.readData(AccessCandidate.agent(agentId));
|
|
675
|
+
const base64Data = bufferData.toString('base64');
|
|
676
|
+
const fileData = `data:${file.mimetype};base64,${base64Data}`;
|
|
677
|
+
const filename = await file.getName();
|
|
678
|
+
|
|
679
|
+
documentData.push({
|
|
680
|
+
type: 'input_file',
|
|
681
|
+
file: {
|
|
682
|
+
file_data: fileData,
|
|
683
|
+
filename,
|
|
684
|
+
},
|
|
685
|
+
});
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
return documentData;
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
/**
|
|
692
|
+
* Validate file size before processing
|
|
693
|
+
*/
|
|
694
|
+
private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string): Promise<void> {
|
|
695
|
+
await file.ready();
|
|
696
|
+
const fileInfo = await file.getJsonData(AccessCandidate.agent('temp'));
|
|
697
|
+
if (fileInfo.size > maxSize) {
|
|
698
|
+
throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
getInterfaceName(): string {
|
|
703
|
+
return 'responses';
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
validateParameters(params: TLLMParams): boolean {
|
|
707
|
+
// Basic validation for Responses API parameters
|
|
708
|
+
return !!params.model;
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
/**
|
|
712
|
+
* Prepare input messages for Responses API
|
|
713
|
+
*/
|
|
714
|
+
private async prepareInputMessages(params: TLLMParams): Promise<any[]> {
|
|
715
|
+
const messages = params?.messages || [];
|
|
716
|
+
const files: BinaryInput[] = params?.files || [];
|
|
717
|
+
|
|
718
|
+
// Start with raw messages - transformation now happens in applyToolMessageTransformation
|
|
719
|
+
let input = [...messages];
|
|
720
|
+
|
|
721
|
+
// Handle files if present
|
|
722
|
+
if (files.length > 0) {
|
|
723
|
+
input = await this.handleFileAttachments(files, params.agentId, input);
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
return input;
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
/**
|
|
730
|
+
* Prepare tools for request
|
|
731
|
+
*/
|
|
732
|
+
private async prepareFunctionTools(params: TLLMParams): Promise<OpenAI.Responses.Tool[]> {
|
|
733
|
+
const tools: OpenAI.Responses.Tool[] = [];
|
|
734
|
+
|
|
735
|
+
// Add regular function tools
|
|
736
|
+
if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
|
|
737
|
+
// Now we can pass the tools directly to transformToolsConfig
|
|
738
|
+
// which handles type detection and conversion properly
|
|
739
|
+
const toolsConfig = this.transformToolsConfig({
|
|
740
|
+
type: 'function',
|
|
741
|
+
toolDefinitions: params.toolsConfig.tools as (OpenAIToolDefinition | LegacyToolDefinition)[],
|
|
742
|
+
toolChoice: 'auto',
|
|
743
|
+
modelInfo: (params.modelInfo as LLMModelInfo) || null,
|
|
744
|
+
});
|
|
745
|
+
tools.push(...toolsConfig);
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
return tools;
|
|
749
|
+
}
|
|
750
|
+
|
|
751
|
+
/**
|
|
752
|
+
* Get web search tool configuration for OpenAI Responses API
|
|
753
|
+
* According to OpenAI documentation: https://platform.openai.com/docs/api-reference/responses/create
|
|
754
|
+
*/
|
|
755
|
+
private prepareWebSearchTool(params: TLLMPreparedParams): OpenAI.Responses.WebSearchTool {
|
|
756
|
+
const webSearch = params?.toolsInfo?.openai?.webSearch;
|
|
757
|
+
const contextSize = webSearch?.contextSize;
|
|
758
|
+
const searchCity = webSearch?.city;
|
|
759
|
+
const searchCountry = webSearch?.country;
|
|
760
|
+
const searchRegion = webSearch?.region;
|
|
761
|
+
const searchTimezone = webSearch?.timezone;
|
|
762
|
+
|
|
763
|
+
// Prepare location object - build incrementally if any location parameters exist
|
|
764
|
+
const userLocation: TSearchLocation = {
|
|
765
|
+
type: 'approximate', // Required, always be 'approximate' when we implement location
|
|
766
|
+
};
|
|
767
|
+
|
|
768
|
+
// Add location fields if they exist
|
|
769
|
+
if (searchCity) userLocation.city = searchCity;
|
|
770
|
+
if (searchCountry) userLocation.country = searchCountry;
|
|
771
|
+
if (searchRegion) userLocation.region = searchRegion;
|
|
772
|
+
if (searchTimezone) userLocation.timezone = searchTimezone;
|
|
773
|
+
|
|
774
|
+
// Only include location in config if we have actual location data
|
|
775
|
+
const hasLocationData = searchCity || searchCountry || searchRegion || searchTimezone;
|
|
776
|
+
|
|
777
|
+
// Configure web search tool according to OpenAI Responses API specification
|
|
778
|
+
const searchTool: OpenAI.Responses.WebSearchTool = {
|
|
779
|
+
type: 'web_search_preview' as any, // Use correct type as per OpenAI docs
|
|
780
|
+
};
|
|
781
|
+
|
|
782
|
+
// Add optional configuration properties
|
|
783
|
+
const webSearchConfig: any = {};
|
|
784
|
+
|
|
785
|
+
if (contextSize) {
|
|
786
|
+
webSearchConfig.search_context_size = contextSize;
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
if (hasLocationData) {
|
|
790
|
+
webSearchConfig.user_location = userLocation;
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
return { ...searchTool, ...webSearchConfig };
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
private applyToolMessageTransformation(input: any[]): any[] {
|
|
797
|
+
const transformedMessages: any[] = [];
|
|
798
|
+
|
|
799
|
+
input.forEach((message) => {
|
|
800
|
+
if (message.role === 'assistant' && message.tool_calls) {
|
|
801
|
+
// Split assistant message with tool_calls into separate items (Responses API format)
|
|
802
|
+
if (message.content) {
|
|
803
|
+
transformedMessages.push({
|
|
804
|
+
role: 'assistant',
|
|
805
|
+
content: typeof message.content === 'object' ? JSON.stringify(message.content) : message.content,
|
|
806
|
+
});
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
message.tool_calls.forEach((toolCall) => {
|
|
810
|
+
transformedMessages.push({
|
|
811
|
+
type: 'function_call',
|
|
812
|
+
name: toolCall.function.name,
|
|
813
|
+
arguments:
|
|
814
|
+
typeof toolCall.function.arguments === 'object'
|
|
815
|
+
? JSON.stringify(toolCall.function.arguments)
|
|
816
|
+
: toolCall.function.arguments,
|
|
817
|
+
call_id: toolCall.id,
|
|
818
|
+
});
|
|
819
|
+
});
|
|
820
|
+
} else if (message.role === 'tool') {
|
|
821
|
+
// Transform tool message to function_call_output (Responses API format)
|
|
822
|
+
transformedMessages.push({
|
|
823
|
+
type: 'function_call_output',
|
|
824
|
+
call_id: message.tool_call_id,
|
|
825
|
+
output: typeof message.content === 'string' ? message.content : JSON.stringify(message.content),
|
|
826
|
+
});
|
|
827
|
+
} else {
|
|
828
|
+
transformedMessages.push(message);
|
|
829
|
+
}
|
|
830
|
+
});
|
|
831
|
+
|
|
832
|
+
return transformedMessages;
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
/**
|
|
836
|
+
* Get search tool cost for a specific model and context size
|
|
837
|
+
*/
|
|
838
|
+
private getSearchToolCost(modelName: string, contextSize: string): number {
|
|
839
|
+
const normalizedModelName = modelName?.replace('smythos/', '');
|
|
840
|
+
|
|
841
|
+
// Check normal models first
|
|
842
|
+
if (SEARCH_TOOL_COSTS.normalModels[normalizedModelName]) {
|
|
843
|
+
return SEARCH_TOOL_COSTS.normalModels[normalizedModelName][contextSize] || 0;
|
|
844
|
+
}
|
|
845
|
+
|
|
846
|
+
// Check mini models
|
|
847
|
+
if (SEARCH_TOOL_COSTS.miniModels[normalizedModelName]) {
|
|
848
|
+
return SEARCH_TOOL_COSTS.miniModels[normalizedModelName][contextSize] || 0;
|
|
849
|
+
}
|
|
850
|
+
|
|
851
|
+
return 0;
|
|
852
|
+
}
|
|
853
|
+
}
|