@ank1015/providers 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +453 -0
- package/biome.json +43 -0
- package/dist/agent/agent-loop.d.ts +5 -0
- package/dist/agent/agent-loop.d.ts.map +1 -0
- package/dist/agent/agent-loop.js +219 -0
- package/dist/agent/agent-loop.js.map +1 -0
- package/dist/agent/types.d.ts +67 -0
- package/dist/agent/types.d.ts.map +1 -0
- package/dist/agent/types.js +3 -0
- package/dist/agent/types.js.map +1 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +29 -0
- package/dist/index.js.map +1 -0
- package/dist/models.d.ts +3 -0
- package/dist/models.d.ts.map +1 -0
- package/dist/models.generated.d.ts +247 -0
- package/dist/models.generated.d.ts.map +1 -0
- package/dist/models.generated.js +315 -0
- package/dist/models.generated.js.map +1 -0
- package/dist/models.js +41 -0
- package/dist/models.js.map +1 -0
- package/dist/providers/convert.d.ts +6 -0
- package/dist/providers/convert.d.ts.map +1 -0
- package/dist/providers/convert.js +207 -0
- package/dist/providers/convert.js.map +1 -0
- package/dist/providers/google.d.ts +26 -0
- package/dist/providers/google.d.ts.map +1 -0
- package/dist/providers/google.js +434 -0
- package/dist/providers/google.js.map +1 -0
- package/dist/providers/openai.d.ts +17 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/providers/openai.js +396 -0
- package/dist/providers/openai.js.map +1 -0
- package/dist/stream.d.ts +4 -0
- package/dist/stream.d.ts.map +1 -0
- package/dist/stream.js +40 -0
- package/dist/stream.js.map +1 -0
- package/dist/test-google-agent-loop.d.ts +2 -0
- package/dist/test-google-agent-loop.d.ts.map +1 -0
- package/dist/test-google-agent-loop.js +186 -0
- package/dist/test-google-agent-loop.js.map +1 -0
- package/dist/test-google.d.ts +2 -0
- package/dist/test-google.d.ts.map +1 -0
- package/dist/test-google.js +41 -0
- package/dist/test-google.js.map +1 -0
- package/dist/types.d.ts +187 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +10 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/event-stream.d.ts +16 -0
- package/dist/utils/event-stream.d.ts.map +1 -0
- package/dist/utils/event-stream.js +61 -0
- package/dist/utils/event-stream.js.map +1 -0
- package/dist/utils/json-parse.d.ts +9 -0
- package/dist/utils/json-parse.d.ts.map +1 -0
- package/dist/utils/json-parse.js +32 -0
- package/dist/utils/json-parse.js.map +1 -0
- package/dist/utils/sanitize-unicode.d.ts +22 -0
- package/dist/utils/sanitize-unicode.d.ts.map +1 -0
- package/dist/utils/sanitize-unicode.js +29 -0
- package/dist/utils/sanitize-unicode.js.map +1 -0
- package/dist/utils/validation.d.ts +11 -0
- package/dist/utils/validation.d.ts.map +1 -0
- package/dist/utils/validation.js +61 -0
- package/dist/utils/validation.js.map +1 -0
- package/package.json +33 -0
- package/src/agent/agent-loop.ts +275 -0
- package/src/agent/types.ts +80 -0
- package/src/index.ts +72 -0
- package/src/models.generated.ts +314 -0
- package/src/models.ts +45 -0
- package/src/providers/convert.ts +222 -0
- package/src/providers/google.ts +496 -0
- package/src/providers/openai.ts +437 -0
- package/src/stream.ts +60 -0
- package/src/types.ts +198 -0
- package/src/utils/event-stream.ts +60 -0
- package/src/utils/json-parse.ts +28 -0
- package/src/utils/sanitize-unicode.ts +25 -0
- package/src/utils/validation.ts +69 -0
- package/test/core/agent-loop.test.ts +958 -0
- package/test/core/stream.test.ts +409 -0
- package/test/data/red-circle.png +0 -0
- package/test/data/superintelligentwill.pdf +0 -0
- package/test/edge-cases/general.test.ts +565 -0
- package/test/integration/e2e.test.ts +530 -0
- package/test/models/cost.test.ts +499 -0
- package/test/models/registry.test.ts +298 -0
- package/test/providers/convert.test.ts +846 -0
- package/test/providers/google-schema.test.ts +666 -0
- package/test/providers/google-stream.test.ts +369 -0
- package/test/providers/openai-stream.test.ts +251 -0
- package/test/utils/event-stream.test.ts +289 -0
- package/test/utils/json-parse.test.ts +344 -0
- package/test/utils/sanitize-unicode.test.ts +329 -0
- package/test/utils/validation.test.ts +614 -0
- package/tsconfig.json +21 -0
- package/vitest.config.ts +9 -0
|
@@ -0,0 +1,846 @@
|
|
|
1
|
+
import { describe, it, expect } from 'vitest';
|
|
2
|
+
import { buildOpenAIMessages, buildGoogleMessages } from '../../src/providers/convert';
|
|
3
|
+
import { Context, Model, UserMessage, ToolResultMessage, NativeOpenAIMessage } from '../../src/types';
|
|
4
|
+
|
|
5
|
+
// Mock models for testing
|
|
6
|
+
const mockOpenAIModel: Model<'openai'> = {
|
|
7
|
+
id: 'test-openai',
|
|
8
|
+
name: 'Test OpenAI',
|
|
9
|
+
api: 'openai',
|
|
10
|
+
baseUrl: 'https://api.openai.com',
|
|
11
|
+
reasoning: false,
|
|
12
|
+
input: ['text', 'image', 'file'],
|
|
13
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
14
|
+
contextWindow: 128000,
|
|
15
|
+
maxTokens: 4096,
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
const mockGoogleModel: Model<'google'> = {
|
|
19
|
+
id: 'test-google',
|
|
20
|
+
name: 'Test Google',
|
|
21
|
+
api: 'google',
|
|
22
|
+
baseUrl: 'https://generativelanguage.googleapis.com',
|
|
23
|
+
reasoning: false,
|
|
24
|
+
input: ['text', 'image', 'file'],
|
|
25
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
26
|
+
contextWindow: 128000,
|
|
27
|
+
maxTokens: 8192,
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
describe('buildOpenAIMessages', () => {
|
|
31
|
+
describe('System prompt conversion', () => {
|
|
32
|
+
it('should convert system prompt to developer role', () => {
|
|
33
|
+
const context: Context = {
|
|
34
|
+
systemPrompt: 'You are a helpful assistant.',
|
|
35
|
+
messages: [],
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
39
|
+
|
|
40
|
+
expect(result).toHaveLength(1);
|
|
41
|
+
expect(result[0]).toEqual({
|
|
42
|
+
role: 'developer',
|
|
43
|
+
content: 'You are a helpful assistant.',
|
|
44
|
+
});
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
it('should handle missing system prompt', () => {
|
|
48
|
+
const context: Context = {
|
|
49
|
+
messages: [],
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
53
|
+
|
|
54
|
+
expect(result).toHaveLength(0);
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
it('should sanitize unicode surrogates in system prompt', () => {
|
|
58
|
+
const context: Context = {
|
|
59
|
+
systemPrompt: 'Test \uD800 invalid surrogate',
|
|
60
|
+
messages: [],
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
64
|
+
|
|
65
|
+
expect(result[0]).toMatchObject({
|
|
66
|
+
role: 'developer',
|
|
67
|
+
});
|
|
68
|
+
// Surrogate should be removed by sanitizeSurrogates
|
|
69
|
+
expect((result[0] as any).content).not.toContain('\uD800');
|
|
70
|
+
});
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
describe('User message conversion', () => {
|
|
74
|
+
it('should convert user message with text content', () => {
|
|
75
|
+
const userMessage: UserMessage = {
|
|
76
|
+
role: 'user',
|
|
77
|
+
content: [{ type: 'text', content: 'Hello, world!' }],
|
|
78
|
+
timestamp: Date.now(),
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
const context: Context = {
|
|
82
|
+
messages: [userMessage],
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
86
|
+
|
|
87
|
+
expect(result).toHaveLength(1);
|
|
88
|
+
expect(result[0]).toEqual({
|
|
89
|
+
role: 'user',
|
|
90
|
+
content: [
|
|
91
|
+
{
|
|
92
|
+
type: 'input_text',
|
|
93
|
+
text: 'Hello, world!',
|
|
94
|
+
},
|
|
95
|
+
],
|
|
96
|
+
});
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
it('should convert user message with image content', () => {
|
|
100
|
+
const userMessage: UserMessage = {
|
|
101
|
+
role: 'user',
|
|
102
|
+
content: [
|
|
103
|
+
{ type: 'text', content: 'Look at this image:' },
|
|
104
|
+
{ type: 'image', data: 'base64data', mimeType: 'image/jpeg' },
|
|
105
|
+
],
|
|
106
|
+
timestamp: Date.now(),
|
|
107
|
+
};
|
|
108
|
+
|
|
109
|
+
const context: Context = {
|
|
110
|
+
messages: [userMessage],
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
114
|
+
|
|
115
|
+
expect(result).toHaveLength(1);
|
|
116
|
+
expect(result[0]).toMatchObject({
|
|
117
|
+
role: 'user',
|
|
118
|
+
content: [
|
|
119
|
+
{ type: 'input_text', text: 'Look at this image:' },
|
|
120
|
+
{
|
|
121
|
+
type: 'input_image',
|
|
122
|
+
detail: 'auto',
|
|
123
|
+
image_url: 'data:image/jpeg;base64,base64data',
|
|
124
|
+
},
|
|
125
|
+
],
|
|
126
|
+
});
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
it('should convert user message with file content', () => {
|
|
130
|
+
const userMessage: UserMessage = {
|
|
131
|
+
role: 'user',
|
|
132
|
+
content: [
|
|
133
|
+
{ type: 'text', content: 'Here is a PDF:' },
|
|
134
|
+
{ type: 'file', data: 'base64pdfdata', mimeType: 'application/pdf' },
|
|
135
|
+
],
|
|
136
|
+
timestamp: Date.now(),
|
|
137
|
+
};
|
|
138
|
+
|
|
139
|
+
const context: Context = {
|
|
140
|
+
messages: [userMessage],
|
|
141
|
+
};
|
|
142
|
+
|
|
143
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
144
|
+
|
|
145
|
+
expect(result).toHaveLength(1);
|
|
146
|
+
expect(result[0]).toMatchObject({
|
|
147
|
+
role: 'user',
|
|
148
|
+
content: [
|
|
149
|
+
{ type: 'input_text', text: 'Here is a PDF:' },
|
|
150
|
+
{
|
|
151
|
+
type: 'input_file',
|
|
152
|
+
file_data: 'data:application/pdf;base64,base64pdfdata',
|
|
153
|
+
},
|
|
154
|
+
],
|
|
155
|
+
});
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
it('should convert user message with mixed content', () => {
|
|
159
|
+
const userMessage: UserMessage = {
|
|
160
|
+
role: 'user',
|
|
161
|
+
content: [
|
|
162
|
+
{ type: 'text', content: 'Check this out:' },
|
|
163
|
+
{ type: 'image', data: 'imagedata', mimeType: 'image/png' },
|
|
164
|
+
{ type: 'text', content: 'And this file:' },
|
|
165
|
+
{ type: 'file', data: 'filedata', mimeType: 'text/plain' },
|
|
166
|
+
],
|
|
167
|
+
timestamp: Date.now(),
|
|
168
|
+
};
|
|
169
|
+
|
|
170
|
+
const context: Context = {
|
|
171
|
+
messages: [userMessage],
|
|
172
|
+
};
|
|
173
|
+
|
|
174
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
175
|
+
|
|
176
|
+
expect(result).toHaveLength(1);
|
|
177
|
+
expect((result[0] as any).content).toHaveLength(4);
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
it('should skip images when model does not support them', () => {
|
|
181
|
+
const modelWithoutImages: Model<'openai'> = {
|
|
182
|
+
...mockOpenAIModel,
|
|
183
|
+
input: ['text'],
|
|
184
|
+
};
|
|
185
|
+
|
|
186
|
+
const userMessage: UserMessage = {
|
|
187
|
+
role: 'user',
|
|
188
|
+
content: [
|
|
189
|
+
{ type: 'text', content: 'Text only' },
|
|
190
|
+
{ type: 'image', data: 'imagedata', mimeType: 'image/png' },
|
|
191
|
+
],
|
|
192
|
+
timestamp: Date.now(),
|
|
193
|
+
};
|
|
194
|
+
|
|
195
|
+
const context: Context = {
|
|
196
|
+
messages: [userMessage],
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
const result = buildOpenAIMessages(modelWithoutImages, context);
|
|
200
|
+
|
|
201
|
+
expect(result).toHaveLength(1);
|
|
202
|
+
expect((result[0] as any).content).toHaveLength(1);
|
|
203
|
+
expect((result[0] as any).content[0]).toEqual({
|
|
204
|
+
type: 'input_text',
|
|
205
|
+
text: 'Text only',
|
|
206
|
+
});
|
|
207
|
+
});
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
describe('Tool result conversion', () => {
|
|
211
|
+
it('should convert tool result with text content', () => {
|
|
212
|
+
const toolResult: ToolResultMessage = {
|
|
213
|
+
role: 'toolResult',
|
|
214
|
+
toolName: 'calculator',
|
|
215
|
+
toolCallId: 'call_123',
|
|
216
|
+
content: [{ type: 'text', content: 'Result: 42' }],
|
|
217
|
+
isError: false,
|
|
218
|
+
timestamp: Date.now(),
|
|
219
|
+
};
|
|
220
|
+
|
|
221
|
+
const context: Context = {
|
|
222
|
+
messages: [toolResult],
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
226
|
+
|
|
227
|
+
expect(result).toHaveLength(1);
|
|
228
|
+
expect(result[0]).toEqual({
|
|
229
|
+
type: 'function_call_output',
|
|
230
|
+
call_id: 'call_123',
|
|
231
|
+
output: [
|
|
232
|
+
{
|
|
233
|
+
type: 'input_text',
|
|
234
|
+
text: 'Result: 42',
|
|
235
|
+
},
|
|
236
|
+
],
|
|
237
|
+
});
|
|
238
|
+
});
|
|
239
|
+
|
|
240
|
+
it('should convert tool result with image content', () => {
|
|
241
|
+
const toolResult: ToolResultMessage = {
|
|
242
|
+
role: 'toolResult',
|
|
243
|
+
toolName: 'screenshot',
|
|
244
|
+
toolCallId: 'call_456',
|
|
245
|
+
content: [
|
|
246
|
+
{ type: 'text', content: '(see attached)' },
|
|
247
|
+
{ type: 'image', data: 'screenshotdata', mimeType: 'image/png' },
|
|
248
|
+
],
|
|
249
|
+
isError: false,
|
|
250
|
+
timestamp: Date.now(),
|
|
251
|
+
};
|
|
252
|
+
|
|
253
|
+
const context: Context = {
|
|
254
|
+
messages: [toolResult],
|
|
255
|
+
};
|
|
256
|
+
|
|
257
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
258
|
+
|
|
259
|
+
expect(result).toHaveLength(1);
|
|
260
|
+
expect((result[0] as any).output).toHaveLength(2);
|
|
261
|
+
});
|
|
262
|
+
|
|
263
|
+
it('should convert tool result with error flag', () => {
|
|
264
|
+
const toolResult: ToolResultMessage = {
|
|
265
|
+
role: 'toolResult',
|
|
266
|
+
toolName: 'failing_tool',
|
|
267
|
+
toolCallId: 'call_789',
|
|
268
|
+
content: [{ type: 'text', content: 'Error: Something went wrong' }],
|
|
269
|
+
isError: true,
|
|
270
|
+
error: {
|
|
271
|
+
message: 'Something went wrong',
|
|
272
|
+
name: 'Error',
|
|
273
|
+
},
|
|
274
|
+
timestamp: Date.now(),
|
|
275
|
+
};
|
|
276
|
+
|
|
277
|
+
const context: Context = {
|
|
278
|
+
messages: [toolResult],
|
|
279
|
+
};
|
|
280
|
+
|
|
281
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
282
|
+
|
|
283
|
+
expect(result).toHaveLength(1);
|
|
284
|
+
expect((result[0] as any).call_id).toBe('call_789');
|
|
285
|
+
});
|
|
286
|
+
|
|
287
|
+
it('should add "(see attached)" text for non-text content', () => {
|
|
288
|
+
const toolResult: ToolResultMessage = {
|
|
289
|
+
role: 'toolResult',
|
|
290
|
+
toolName: 'image_tool',
|
|
291
|
+
toolCallId: 'call_999',
|
|
292
|
+
content: [{ type: 'image', data: 'imagedata', mimeType: 'image/png' }],
|
|
293
|
+
isError: false,
|
|
294
|
+
timestamp: Date.now(),
|
|
295
|
+
};
|
|
296
|
+
|
|
297
|
+
const context: Context = {
|
|
298
|
+
messages: [toolResult],
|
|
299
|
+
};
|
|
300
|
+
|
|
301
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
302
|
+
|
|
303
|
+
const output = (result[0] as any).output;
|
|
304
|
+
expect(output).toContainEqual({
|
|
305
|
+
type: 'input_text',
|
|
306
|
+
text: '(see attached)',
|
|
307
|
+
});
|
|
308
|
+
});
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
describe('Assistant message conversion', () => {
|
|
312
|
+
it('should convert native OpenAI assistant message', () => {
|
|
313
|
+
const assistantMessage: NativeOpenAIMessage = {
|
|
314
|
+
role: 'assistant',
|
|
315
|
+
_provider: 'openai',
|
|
316
|
+
message: {
|
|
317
|
+
id: 'resp_123',
|
|
318
|
+
object: "response",
|
|
319
|
+
created_at: 1740855869,
|
|
320
|
+
output_text: '',
|
|
321
|
+
status: "completed",
|
|
322
|
+
incomplete_details: null,
|
|
323
|
+
parallel_tool_calls: false,
|
|
324
|
+
error: null,
|
|
325
|
+
instructions: null,
|
|
326
|
+
max_output_tokens: null,
|
|
327
|
+
model: "gpt-4o-mini-2024-07-18",
|
|
328
|
+
user: undefined,
|
|
329
|
+
metadata: {},
|
|
330
|
+
previous_response_id: null,
|
|
331
|
+
temperature: 1,
|
|
332
|
+
text: {},
|
|
333
|
+
tool_choice: "auto",
|
|
334
|
+
tools: [],
|
|
335
|
+
top_p: 1,
|
|
336
|
+
truncation: "disabled",
|
|
337
|
+
output: [
|
|
338
|
+
{
|
|
339
|
+
type: 'message',
|
|
340
|
+
role: 'assistant',
|
|
341
|
+
content: [{ type: 'output_text', text: 'Hello!', annotations: [] }],
|
|
342
|
+
id: '',
|
|
343
|
+
status: 'completed'
|
|
344
|
+
},
|
|
345
|
+
],
|
|
346
|
+
usage: { input_tokens: 10, output_tokens: 5, total_tokens: 15, input_tokens_details: {cached_tokens: 0}, output_tokens_details: {reasoning_tokens: 0} },
|
|
347
|
+
},
|
|
348
|
+
};
|
|
349
|
+
|
|
350
|
+
const context: Context = {
|
|
351
|
+
messages: [assistantMessage],
|
|
352
|
+
};
|
|
353
|
+
|
|
354
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
355
|
+
|
|
356
|
+
expect(result).toHaveLength(1);
|
|
357
|
+
expect(result[0]).toEqual({
|
|
358
|
+
type: 'message',
|
|
359
|
+
role: 'assistant',
|
|
360
|
+
content: [{ type: 'output_text', text: 'Hello!', annotations: [] }],
|
|
361
|
+
id: '',
|
|
362
|
+
status: 'completed'
|
|
363
|
+
});
|
|
364
|
+
});
|
|
365
|
+
|
|
366
|
+
it('should handle assistant message with function call', () => {
|
|
367
|
+
const assistantMessage: NativeOpenAIMessage = {
|
|
368
|
+
role: 'assistant',
|
|
369
|
+
_provider: 'openai',
|
|
370
|
+
message: {
|
|
371
|
+
output: [
|
|
372
|
+
{
|
|
373
|
+
type: 'function_call',
|
|
374
|
+
call_id: 'call_123',
|
|
375
|
+
name: 'calculator',
|
|
376
|
+
arguments: '{"expression": "2 + 2"}',
|
|
377
|
+
},
|
|
378
|
+
],
|
|
379
|
+
id: 'resp_123',
|
|
380
|
+
object: "response",
|
|
381
|
+
created_at: 1740855869,
|
|
382
|
+
output_text: '',
|
|
383
|
+
status: "completed",
|
|
384
|
+
incomplete_details: null,
|
|
385
|
+
parallel_tool_calls: false,
|
|
386
|
+
error: null,
|
|
387
|
+
instructions: null,
|
|
388
|
+
max_output_tokens: null,
|
|
389
|
+
model: "gpt-4o-mini-2024-07-18",
|
|
390
|
+
user: undefined,
|
|
391
|
+
metadata: {},
|
|
392
|
+
previous_response_id: null,
|
|
393
|
+
temperature: 1,
|
|
394
|
+
text: {},
|
|
395
|
+
tool_choice: "auto",
|
|
396
|
+
tools: [],
|
|
397
|
+
top_p: 1,
|
|
398
|
+
truncation: "disabled",
|
|
399
|
+
usage: { input_tokens: 10, output_tokens: 5, total_tokens: 15, input_tokens_details: {cached_tokens: 0}, output_tokens_details: {reasoning_tokens: 0} },
|
|
400
|
+
},
|
|
401
|
+
};
|
|
402
|
+
|
|
403
|
+
const context: Context = {
|
|
404
|
+
messages: [assistantMessage],
|
|
405
|
+
};
|
|
406
|
+
|
|
407
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
408
|
+
|
|
409
|
+
expect(result).toHaveLength(1);
|
|
410
|
+
expect(result[0]).toMatchObject({
|
|
411
|
+
type: 'function_call',
|
|
412
|
+
name: 'calculator',
|
|
413
|
+
});
|
|
414
|
+
});
|
|
415
|
+
|
|
416
|
+
it('should handle assistant message with reasoning', () => {
|
|
417
|
+
const assistantMessage: NativeOpenAIMessage = {
|
|
418
|
+
role: 'assistant',
|
|
419
|
+
_provider: 'openai',
|
|
420
|
+
message: {
|
|
421
|
+
id: 'resp_789',
|
|
422
|
+
object: 'response',
|
|
423
|
+
output: [
|
|
424
|
+
{
|
|
425
|
+
type: 'reasoning',
|
|
426
|
+
id:'',
|
|
427
|
+
summary: [],
|
|
428
|
+
},
|
|
429
|
+
],
|
|
430
|
+
created_at: 1740855869,
|
|
431
|
+
output_text: '',
|
|
432
|
+
status: "completed",
|
|
433
|
+
incomplete_details: null,
|
|
434
|
+
parallel_tool_calls: false,
|
|
435
|
+
error: null,
|
|
436
|
+
instructions: null,
|
|
437
|
+
max_output_tokens: null,
|
|
438
|
+
model: "gpt-4o-mini-2024-07-18",
|
|
439
|
+
user: undefined,
|
|
440
|
+
metadata: {},
|
|
441
|
+
previous_response_id: null,
|
|
442
|
+
temperature: 1,
|
|
443
|
+
text: {},
|
|
444
|
+
tool_choice: "auto",
|
|
445
|
+
tools: [],
|
|
446
|
+
top_p: 1,
|
|
447
|
+
truncation: "disabled",
|
|
448
|
+
usage: { input_tokens: 10, output_tokens: 5, total_tokens: 15, input_tokens_details: {cached_tokens: 0}, output_tokens_details: {reasoning_tokens: 0} },
|
|
449
|
+
},
|
|
450
|
+
};
|
|
451
|
+
|
|
452
|
+
const context: Context = {
|
|
453
|
+
messages: [assistantMessage],
|
|
454
|
+
};
|
|
455
|
+
|
|
456
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
457
|
+
|
|
458
|
+
expect(result).toHaveLength(1);
|
|
459
|
+
expect(result[0]).toMatchObject({
|
|
460
|
+
type: 'reasoning',
|
|
461
|
+
});
|
|
462
|
+
});
|
|
463
|
+
|
|
464
|
+
it('should throw error for cross-provider conversion (Google → OpenAI)', () => {
|
|
465
|
+
const googleAssistantMessage = {
|
|
466
|
+
role: 'assistant' as const,
|
|
467
|
+
_provider: 'google' as const,
|
|
468
|
+
message: {} as any,
|
|
469
|
+
};
|
|
470
|
+
|
|
471
|
+
const context: Context = {
|
|
472
|
+
messages: [googleAssistantMessage],
|
|
473
|
+
};
|
|
474
|
+
|
|
475
|
+
expect(() => buildOpenAIMessages(mockOpenAIModel, context)).toThrow(
|
|
476
|
+
/Cannot convert google assistant message to openai format/
|
|
477
|
+
);
|
|
478
|
+
});
|
|
479
|
+
});
|
|
480
|
+
|
|
481
|
+
describe('Multiple messages', () => {
|
|
482
|
+
it('should handle multiple messages in sequence', () => {
|
|
483
|
+
const context: Context = {
|
|
484
|
+
systemPrompt: 'System prompt',
|
|
485
|
+
messages: [
|
|
486
|
+
{
|
|
487
|
+
role: 'user',
|
|
488
|
+
content: [{ type: 'text', content: 'Question 1' }],
|
|
489
|
+
timestamp: Date.now(),
|
|
490
|
+
},
|
|
491
|
+
{
|
|
492
|
+
role: 'user',
|
|
493
|
+
content: [{ type: 'text', content: 'Question 2' }],
|
|
494
|
+
timestamp: Date.now(),
|
|
495
|
+
},
|
|
496
|
+
],
|
|
497
|
+
};
|
|
498
|
+
|
|
499
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
500
|
+
|
|
501
|
+
expect(result).toHaveLength(3); // system + 2 user messages
|
|
502
|
+
});
|
|
503
|
+
|
|
504
|
+
it('should handle conversation with tool calls', () => {
|
|
505
|
+
const context: Context = {
|
|
506
|
+
messages: [
|
|
507
|
+
{
|
|
508
|
+
role: 'user',
|
|
509
|
+
content: [{ type: 'text', content: 'Calculate 2 + 2' }],
|
|
510
|
+
timestamp: Date.now(),
|
|
511
|
+
},
|
|
512
|
+
{
|
|
513
|
+
role: 'toolResult',
|
|
514
|
+
toolName: 'calculator',
|
|
515
|
+
toolCallId: 'call_123',
|
|
516
|
+
content: [{ type: 'text', content: '4' }],
|
|
517
|
+
isError: false,
|
|
518
|
+
timestamp: Date.now(),
|
|
519
|
+
},
|
|
520
|
+
],
|
|
521
|
+
};
|
|
522
|
+
|
|
523
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
524
|
+
|
|
525
|
+
expect(result).toHaveLength(2);
|
|
526
|
+
});
|
|
527
|
+
});
|
|
528
|
+
|
|
529
|
+
describe('Empty messages', () => {
|
|
530
|
+
it('should handle empty messages array', () => {
|
|
531
|
+
const context: Context = {
|
|
532
|
+
messages: [],
|
|
533
|
+
};
|
|
534
|
+
|
|
535
|
+
const result = buildOpenAIMessages(mockOpenAIModel, context);
|
|
536
|
+
|
|
537
|
+
expect(result).toHaveLength(0);
|
|
538
|
+
});
|
|
539
|
+
});
|
|
540
|
+
});
|
|
541
|
+
|
|
542
|
+
describe('buildGoogleMessages', () => {
|
|
543
|
+
describe('User message conversion', () => {
|
|
544
|
+
it('should convert user message with text content', () => {
|
|
545
|
+
const userMessage: UserMessage = {
|
|
546
|
+
role: 'user',
|
|
547
|
+
content: [{ type: 'text', content: 'Hello, Gemini!' }],
|
|
548
|
+
timestamp: Date.now(),
|
|
549
|
+
};
|
|
550
|
+
|
|
551
|
+
const context: Context = {
|
|
552
|
+
messages: [userMessage],
|
|
553
|
+
};
|
|
554
|
+
|
|
555
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
556
|
+
|
|
557
|
+
expect(result).toHaveLength(1);
|
|
558
|
+
expect(result[0]).toEqual({
|
|
559
|
+
role: 'user',
|
|
560
|
+
parts: [{ text: 'Hello, Gemini!' }],
|
|
561
|
+
});
|
|
562
|
+
});
|
|
563
|
+
|
|
564
|
+
it('should convert user message with image content', () => {
|
|
565
|
+
const userMessage: UserMessage = {
|
|
566
|
+
role: 'user',
|
|
567
|
+
content: [
|
|
568
|
+
{ type: 'text', content: 'Describe this image:' },
|
|
569
|
+
{ type: 'image', data: 'base64imagedata', mimeType: 'image/jpeg' },
|
|
570
|
+
],
|
|
571
|
+
timestamp: Date.now(),
|
|
572
|
+
};
|
|
573
|
+
|
|
574
|
+
const context: Context = {
|
|
575
|
+
messages: [userMessage],
|
|
576
|
+
};
|
|
577
|
+
|
|
578
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
579
|
+
|
|
580
|
+
expect(result).toHaveLength(1);
|
|
581
|
+
expect(result[0]).toMatchObject({
|
|
582
|
+
role: 'user',
|
|
583
|
+
parts: [
|
|
584
|
+
{ text: 'Describe this image:' },
|
|
585
|
+
{ inlineData: { mimeType: 'image/jpeg', data: 'base64imagedata' } },
|
|
586
|
+
],
|
|
587
|
+
});
|
|
588
|
+
});
|
|
589
|
+
|
|
590
|
+
it('should convert user message with file content', () => {
|
|
591
|
+
const userMessage: UserMessage = {
|
|
592
|
+
role: 'user',
|
|
593
|
+
content: [
|
|
594
|
+
{ type: 'text', content: 'Analyze this document:' },
|
|
595
|
+
{ type: 'file', data: 'base64filedata', mimeType: 'application/pdf' },
|
|
596
|
+
],
|
|
597
|
+
timestamp: Date.now(),
|
|
598
|
+
};
|
|
599
|
+
|
|
600
|
+
const context: Context = {
|
|
601
|
+
messages: [userMessage],
|
|
602
|
+
};
|
|
603
|
+
|
|
604
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
605
|
+
|
|
606
|
+
expect(result).toHaveLength(1);
|
|
607
|
+
expect(result[0]).toMatchObject({
|
|
608
|
+
role: 'user',
|
|
609
|
+
parts: [
|
|
610
|
+
{ text: 'Analyze this document:' },
|
|
611
|
+
{ inlineData: { mimeType: 'application/pdf', data: 'base64filedata' } },
|
|
612
|
+
],
|
|
613
|
+
});
|
|
614
|
+
});
|
|
615
|
+
|
|
616
|
+
it('should handle mixed content types', () => {
|
|
617
|
+
const userMessage: UserMessage = {
|
|
618
|
+
role: 'user',
|
|
619
|
+
content: [
|
|
620
|
+
{ type: 'text', content: 'First text' },
|
|
621
|
+
{ type: 'image', data: 'img1', mimeType: 'image/png' },
|
|
622
|
+
{ type: 'text', content: 'Second text' },
|
|
623
|
+
{ type: 'file', data: 'file1', mimeType: 'text/plain' },
|
|
624
|
+
],
|
|
625
|
+
timestamp: Date.now(),
|
|
626
|
+
};
|
|
627
|
+
|
|
628
|
+
const context: Context = {
|
|
629
|
+
messages: [userMessage],
|
|
630
|
+
};
|
|
631
|
+
|
|
632
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
633
|
+
|
|
634
|
+
expect(result).toHaveLength(1);
|
|
635
|
+
expect((result[0] as any).parts).toHaveLength(4);
|
|
636
|
+
});
|
|
637
|
+
});
|
|
638
|
+
|
|
639
|
+
describe('Tool result conversion', () => {
|
|
640
|
+
it('should convert tool result with text content', () => {
|
|
641
|
+
const toolResult: ToolResultMessage = {
|
|
642
|
+
role: 'toolResult',
|
|
643
|
+
toolName: 'search',
|
|
644
|
+
toolCallId: 'call_123',
|
|
645
|
+
content: [{ type: 'text', content: 'Search results here' }],
|
|
646
|
+
isError: false,
|
|
647
|
+
timestamp: Date.now(),
|
|
648
|
+
};
|
|
649
|
+
|
|
650
|
+
const context: Context = {
|
|
651
|
+
messages: [toolResult],
|
|
652
|
+
};
|
|
653
|
+
|
|
654
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
655
|
+
|
|
656
|
+
expect(result).toHaveLength(1);
|
|
657
|
+
expect(result[0]).toMatchObject({
|
|
658
|
+
role: 'user',
|
|
659
|
+
parts: [
|
|
660
|
+
{
|
|
661
|
+
functionResponse: {
|
|
662
|
+
id: 'call_123',
|
|
663
|
+
name: 'search',
|
|
664
|
+
response: {
|
|
665
|
+
result: 'Search results here',
|
|
666
|
+
isError: false,
|
|
667
|
+
},
|
|
668
|
+
},
|
|
669
|
+
},
|
|
670
|
+
],
|
|
671
|
+
});
|
|
672
|
+
});
|
|
673
|
+
|
|
674
|
+
it('should convert tool result with error flag', () => {
|
|
675
|
+
const toolResult: ToolResultMessage = {
|
|
676
|
+
role: 'toolResult',
|
|
677
|
+
toolName: 'failing_tool',
|
|
678
|
+
toolCallId: 'call_456',
|
|
679
|
+
content: [{ type: 'text', content: 'Error occurred' }],
|
|
680
|
+
isError: true,
|
|
681
|
+
error: {
|
|
682
|
+
message: 'Something went wrong',
|
|
683
|
+
},
|
|
684
|
+
timestamp: Date.now(),
|
|
685
|
+
};
|
|
686
|
+
|
|
687
|
+
const context: Context = {
|
|
688
|
+
messages: [toolResult],
|
|
689
|
+
};
|
|
690
|
+
|
|
691
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
692
|
+
|
|
693
|
+
expect(result).toHaveLength(1);
|
|
694
|
+
expect((result[0] as any).parts[0].functionResponse.response.isError).toBe(true);
|
|
695
|
+
});
|
|
696
|
+
|
|
697
|
+
it('should handle tool result with image content', () => {
|
|
698
|
+
const toolResult: ToolResultMessage = {
|
|
699
|
+
role: 'toolResult',
|
|
700
|
+
toolName: 'screenshot',
|
|
701
|
+
toolCallId: 'call_789',
|
|
702
|
+
content: [
|
|
703
|
+
{ type: 'text', content: 'Screenshot taken' },
|
|
704
|
+
{ type: 'image', data: 'screenshotdata', mimeType: 'image/png' },
|
|
705
|
+
],
|
|
706
|
+
isError: false,
|
|
707
|
+
timestamp: Date.now(),
|
|
708
|
+
};
|
|
709
|
+
|
|
710
|
+
const context: Context = {
|
|
711
|
+
messages: [toolResult],
|
|
712
|
+
};
|
|
713
|
+
|
|
714
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
715
|
+
|
|
716
|
+
expect(result).toHaveLength(1);
|
|
717
|
+
const functionResponse = (result[0] as any).parts[0].functionResponse;
|
|
718
|
+
expect(functionResponse.parts).toHaveLength(1); // Image part
|
|
719
|
+
expect(functionResponse.response.result).toBe('Screenshot taken');
|
|
720
|
+
});
|
|
721
|
+
|
|
722
|
+
it('should use default text when no text content present', () => {
|
|
723
|
+
const toolResult: ToolResultMessage = {
|
|
724
|
+
role: 'toolResult',
|
|
725
|
+
toolName: 'image_tool',
|
|
726
|
+
toolCallId: 'call_999',
|
|
727
|
+
content: [{ type: 'image', data: 'imagedata', mimeType: 'image/png' }],
|
|
728
|
+
isError: false,
|
|
729
|
+
timestamp: Date.now(),
|
|
730
|
+
};
|
|
731
|
+
|
|
732
|
+
const context: Context = {
|
|
733
|
+
messages: [toolResult],
|
|
734
|
+
};
|
|
735
|
+
|
|
736
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
737
|
+
|
|
738
|
+
const functionResponse = (result[0] as any).parts[0].functionResponse;
|
|
739
|
+
expect(functionResponse.response.result).toBe('(see attached:)');
|
|
740
|
+
});
|
|
741
|
+
});
|
|
742
|
+
|
|
743
|
+
describe('Assistant message conversion', () => {
|
|
744
|
+
it('should throw error for cross-provider conversion (OpenAI → Google)', () => {
|
|
745
|
+
const openaiAssistantMessage: NativeOpenAIMessage = {
|
|
746
|
+
role: 'assistant',
|
|
747
|
+
_provider: 'openai',
|
|
748
|
+
message: {} as any,
|
|
749
|
+
};
|
|
750
|
+
|
|
751
|
+
const context: Context = {
|
|
752
|
+
messages: [openaiAssistantMessage],
|
|
753
|
+
};
|
|
754
|
+
|
|
755
|
+
expect(() => buildGoogleMessages(mockGoogleModel, context)).toThrow(
|
|
756
|
+
/Cannot convert openai assistant message to google format/
|
|
757
|
+
);
|
|
758
|
+
});
|
|
759
|
+
});
|
|
760
|
+
|
|
761
|
+
describe('Multiple messages', () => {
|
|
762
|
+
it('should handle multiple user messages', () => {
|
|
763
|
+
const context: Context = {
|
|
764
|
+
messages: [
|
|
765
|
+
{
|
|
766
|
+
role: 'user',
|
|
767
|
+
content: [{ type: 'text', content: 'First message' }],
|
|
768
|
+
timestamp: Date.now(),
|
|
769
|
+
},
|
|
770
|
+
{
|
|
771
|
+
role: 'user',
|
|
772
|
+
content: [{ type: 'text', content: 'Second message' }],
|
|
773
|
+
timestamp: Date.now(),
|
|
774
|
+
},
|
|
775
|
+
],
|
|
776
|
+
};
|
|
777
|
+
|
|
778
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
779
|
+
|
|
780
|
+
expect(result).toHaveLength(2);
|
|
781
|
+
});
|
|
782
|
+
});
|
|
783
|
+
|
|
784
|
+
describe('Empty messages', () => {
|
|
785
|
+
it('should handle empty messages array', () => {
|
|
786
|
+
const context: Context = {
|
|
787
|
+
messages: [],
|
|
788
|
+
};
|
|
789
|
+
|
|
790
|
+
const result = buildGoogleMessages(mockGoogleModel, context);
|
|
791
|
+
|
|
792
|
+
expect(result).toHaveLength(0);
|
|
793
|
+
});
|
|
794
|
+
});
|
|
795
|
+
});
|
|
796
|
+
|
|
797
|
+
describe('Cross-provider consistency', () => {
|
|
798
|
+
it('should handle same user message for both providers', () => {
|
|
799
|
+
const userMessage: UserMessage = {
|
|
800
|
+
role: 'user',
|
|
801
|
+
content: [{ type: 'text', content: 'Hello!' }],
|
|
802
|
+
timestamp: Date.now(),
|
|
803
|
+
};
|
|
804
|
+
|
|
805
|
+
const context: Context = {
|
|
806
|
+
messages: [userMessage],
|
|
807
|
+
};
|
|
808
|
+
|
|
809
|
+
const openaiResult = buildOpenAIMessages(mockOpenAIModel, context);
|
|
810
|
+
const googleResult = buildGoogleMessages(mockGoogleModel, context);
|
|
811
|
+
|
|
812
|
+
// Both should produce exactly 1 message
|
|
813
|
+
expect(openaiResult).toHaveLength(1);
|
|
814
|
+
expect(googleResult).toHaveLength(1);
|
|
815
|
+
|
|
816
|
+
// Both should have role 'user'
|
|
817
|
+
expect((openaiResult[0] as any).role).toBe('user');
|
|
818
|
+
expect((googleResult[0] as any).role).toBe('user');
|
|
819
|
+
});
|
|
820
|
+
|
|
821
|
+
it('should handle same tool result for both providers', () => {
|
|
822
|
+
const toolResult: ToolResultMessage = {
|
|
823
|
+
role: 'toolResult',
|
|
824
|
+
toolName: 'test_tool',
|
|
825
|
+
toolCallId: 'call_123',
|
|
826
|
+
content: [{ type: 'text', content: 'Result' }],
|
|
827
|
+
isError: false,
|
|
828
|
+
timestamp: Date.now(),
|
|
829
|
+
};
|
|
830
|
+
|
|
831
|
+
const context: Context = {
|
|
832
|
+
messages: [toolResult],
|
|
833
|
+
};
|
|
834
|
+
|
|
835
|
+
const openaiResult = buildOpenAIMessages(mockOpenAIModel, context);
|
|
836
|
+
const googleResult = buildGoogleMessages(mockGoogleModel, context);
|
|
837
|
+
|
|
838
|
+
// Both should produce exactly 1 message
|
|
839
|
+
expect(openaiResult).toHaveLength(1);
|
|
840
|
+
expect(googleResult).toHaveLength(1);
|
|
841
|
+
|
|
842
|
+
// Both should reference the tool call ID
|
|
843
|
+
expect((openaiResult[0] as any).call_id).toBe('call_123');
|
|
844
|
+
expect((googleResult[0] as any).parts[0].functionResponse.id).toBe('call_123');
|
|
845
|
+
});
|
|
846
|
+
});
|