@theia/ai-openai 1.66.0-next.73 → 1.66.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/lib/browser/openai-frontend-application-contribution.d.ts.map +1 -1
  2. package/lib/browser/openai-frontend-application-contribution.js +11 -4
  3. package/lib/browser/openai-frontend-application-contribution.js.map +1 -1
  4. package/lib/common/openai-language-models-manager.d.ts +6 -0
  5. package/lib/common/openai-language-models-manager.d.ts.map +1 -1
  6. package/lib/common/openai-preferences.d.ts +1 -0
  7. package/lib/common/openai-preferences.d.ts.map +1 -1
  8. package/lib/common/openai-preferences.js +17 -1
  9. package/lib/common/openai-preferences.js.map +1 -1
  10. package/lib/node/openai-backend-module.d.ts.map +1 -1
  11. package/lib/node/openai-backend-module.js +2 -0
  12. package/lib/node/openai-backend-module.js.map +1 -1
  13. package/lib/node/openai-language-model.d.ts +8 -2
  14. package/lib/node/openai-language-model.d.ts.map +1 -1
  15. package/lib/node/openai-language-model.js +43 -42
  16. package/lib/node/openai-language-model.js.map +1 -1
  17. package/lib/node/openai-language-models-manager-impl.d.ts +2 -0
  18. package/lib/node/openai-language-models-manager-impl.d.ts.map +1 -1
  19. package/lib/node/openai-language-models-manager-impl.js +9 -2
  20. package/lib/node/openai-language-models-manager-impl.js.map +1 -1
  21. package/lib/node/openai-model-utils.spec.d.ts +1 -3
  22. package/lib/node/openai-model-utils.spec.d.ts.map +1 -1
  23. package/lib/node/openai-model-utils.spec.js +250 -23
  24. package/lib/node/openai-model-utils.spec.js.map +1 -1
  25. package/lib/node/openai-request-api-context.d.ts +4 -0
  26. package/lib/node/openai-request-api-context.d.ts.map +1 -0
  27. package/lib/node/openai-request-api-context.js +18 -0
  28. package/lib/node/openai-request-api-context.js.map +1 -0
  29. package/lib/node/openai-response-api-utils.d.ts +42 -0
  30. package/lib/node/openai-response-api-utils.d.ts.map +1 -0
  31. package/lib/node/openai-response-api-utils.js +677 -0
  32. package/lib/node/openai-response-api-utils.js.map +1 -0
  33. package/package.json +7 -7
  34. package/src/browser/openai-frontend-application-contribution.ts +10 -4
  35. package/src/common/openai-language-models-manager.ts +6 -0
  36. package/src/common/openai-preferences.ts +18 -0
  37. package/src/node/openai-backend-module.ts +2 -0
  38. package/src/node/openai-language-model.ts +59 -42
  39. package/src/node/openai-language-models-manager-impl.ts +8 -1
  40. package/src/node/openai-model-utils.spec.ts +257 -22
  41. package/src/node/openai-request-api-context.ts +23 -0
  42. package/src/node/openai-response-api-utils.ts +801 -0
@@ -13,31 +13,35 @@
13
13
  //
14
14
  // SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
15
15
  // *****************************************************************************
16
- const { expect } = require('chai');
17
- const { OpenAiModelUtils } = require('./openai-language-model');
16
+ import { expect } from 'chai';
17
+ import { OpenAiModelUtils } from './openai-language-model';
18
+ import { LanguageModelMessage } from '@theia/ai-core';
19
+ import { OpenAiResponseApiUtils } from './openai-response-api-utils';
20
+
18
21
  const utils = new OpenAiModelUtils();
22
+ const responseUtils = new OpenAiResponseApiUtils();
19
23
 
20
24
  describe('OpenAiModelUtils - processMessages', () => {
21
25
  describe("when developerMessageSettings is 'skip'", () => {
22
26
  it('should remove all system messages', () => {
23
- const messages = [
27
+ const messages: LanguageModelMessage[] = [
24
28
  { actor: 'system', type: 'text', text: 'system message' },
25
29
  { actor: 'user', type: 'text', text: 'user message' },
26
30
  { actor: 'system', type: 'text', text: 'another system message' },
27
31
  ];
28
- const result = utils.processMessages(messages, 'skip');
32
+ const result = utils.processMessages(messages, 'skip', 'gpt-4');
29
33
  expect(result).to.deep.equal([
30
34
  { role: 'user', content: 'user message' }
31
35
  ]);
32
36
  });
33
37
 
34
38
  it('should do nothing if there is no system message', () => {
35
- const messages = [
39
+ const messages: LanguageModelMessage[] = [
36
40
  { actor: 'user', type: 'text', text: 'user message' },
37
41
  { actor: 'user', type: 'text', text: 'another user message' },
38
42
  { actor: 'ai', type: 'text', text: 'ai message' }
39
43
  ];
40
- const result = utils.processMessages(messages, 'skip');
44
+ const result = utils.processMessages(messages, 'skip', 'gpt-4');
41
45
  expect(result).to.deep.equal([
42
46
  { role: 'user', content: 'user message' },
43
47
  { role: 'user', content: 'another user message' },
@@ -48,12 +52,12 @@ describe('OpenAiModelUtils - processMessages', () => {
48
52
 
49
53
  describe("when developerMessageSettings is 'mergeWithFollowingUserMessage'", () => {
50
54
  it('should merge the system message with the next user message, assign role user, and remove the system message', () => {
51
- const messages = [
55
+ const messages: LanguageModelMessage[] = [
52
56
  { actor: 'system', type: 'text', text: 'system msg' },
53
57
  { actor: 'user', type: 'text', text: 'user msg' },
54
58
  { actor: 'ai', type: 'text', text: 'ai message' }
55
59
  ];
56
- const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
60
+ const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage', 'gpt-4');
57
61
  expect(result).to.deep.equal([
58
62
  { role: 'user', content: 'system msg\nuser msg' },
59
63
  { role: 'assistant', content: 'ai message' }
@@ -61,11 +65,11 @@ describe('OpenAiModelUtils - processMessages', () => {
61
65
  });
62
66
 
63
67
  it('should create a new user message if no user message exists, and remove the system message', () => {
64
- const messages = [
68
+ const messages: LanguageModelMessage[] = [
65
69
  { actor: 'system', type: 'text', text: 'system only msg' },
66
70
  { actor: 'ai', type: 'text', text: 'ai message' }
67
71
  ];
68
- const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
72
+ const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage', 'gpt-4');
69
73
  expect(result).to.deep.equal([
70
74
  { role: 'user', content: 'system only msg' },
71
75
  { role: 'assistant', content: 'ai message' }
@@ -73,14 +77,14 @@ describe('OpenAiModelUtils - processMessages', () => {
73
77
  });
74
78
 
75
79
  it('should create a merge multiple system message with the next user message', () => {
76
- const messages = [
80
+ const messages: LanguageModelMessage[] = [
77
81
  { actor: 'user', type: 'text', text: 'user message' },
78
82
  { actor: 'system', type: 'text', text: 'system message' },
79
83
  { actor: 'system', type: 'text', text: 'system message2' },
80
84
  { actor: 'user', type: 'text', text: 'user message2' },
81
85
  { actor: 'ai', type: 'text', text: 'ai message' }
82
86
  ];
83
- const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
87
+ const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage', 'gpt-4');
84
88
  expect(result).to.deep.equal([
85
89
  { role: 'user', content: 'user message' },
86
90
  { role: 'user', content: 'system message\nsystem message2\nuser message2' },
@@ -89,13 +93,13 @@ describe('OpenAiModelUtils - processMessages', () => {
89
93
  });
90
94
 
91
95
  it('should create a new user message from several system messages if the next message is not a user message', () => {
92
- const messages = [
96
+ const messages: LanguageModelMessage[] = [
93
97
  { actor: 'user', type: 'text', text: 'user message' },
94
98
  { actor: 'system', type: 'text', text: 'system message' },
95
99
  { actor: 'system', type: 'text', text: 'system message2' },
96
100
  { actor: 'ai', type: 'text', text: 'ai message' }
97
101
  ];
98
- const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
102
+ const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage', 'gpt-4');
99
103
  expect(result).to.deep.equal([
100
104
  { role: 'user', content: 'user message' },
101
105
  { role: 'user', content: 'system message\nsystem message2' },
@@ -106,13 +110,13 @@ describe('OpenAiModelUtils - processMessages', () => {
106
110
 
107
111
  describe('when no special merging or skipping is needed', () => {
108
112
  it('should leave messages unchanged in ordering and assign roles based on developerMessageSettings', () => {
109
- const messages = [
113
+ const messages: LanguageModelMessage[] = [
110
114
  { actor: 'user', type: 'text', text: 'user message' },
111
115
  { actor: 'system', type: 'text', text: 'system message' },
112
116
  { actor: 'ai', type: 'text', text: 'ai message' }
113
117
  ];
114
118
  // Using a developerMessageSettings that is not merge/skip, e.g., 'developer'
115
- const result = utils.processMessages(messages, 'developer');
119
+ const result = utils.processMessages(messages, 'developer', 'gpt-4');
116
120
  expect(result).to.deep.equal([
117
121
  { role: 'user', content: 'user message' },
118
122
  { role: 'developer', content: 'system message' },
@@ -123,12 +127,12 @@ describe('OpenAiModelUtils - processMessages', () => {
123
127
 
124
128
  describe('role assignment for system messages when developerMessageSettings is one of the role strings', () => {
125
129
  it('should assign role as specified for a system message when developerMessageSettings is "user"', () => {
126
- const messages = [
130
+ const messages: LanguageModelMessage[] = [
127
131
  { actor: 'system', type: 'text', text: 'system msg' },
128
132
  { actor: 'ai', type: 'text', text: 'ai msg' }
129
133
  ];
130
134
  // Since the first message is system and developerMessageSettings is not merge/skip, ordering is not adjusted
131
- const result = utils.processMessages(messages, 'user');
135
+ const result = utils.processMessages(messages, 'user', 'gpt-4');
132
136
  expect(result).to.deep.equal([
133
137
  { role: 'user', content: 'system msg' },
134
138
  { role: 'assistant', content: 'ai msg' }
@@ -136,11 +140,11 @@ describe('OpenAiModelUtils - processMessages', () => {
136
140
  });
137
141
 
138
142
  it('should assign role as specified for a system message when developerMessageSettings is "system"', () => {
139
- const messages = [
143
+ const messages: LanguageModelMessage[] = [
140
144
  { actor: 'system', type: 'text', text: 'system msg' },
141
145
  { actor: 'ai', type: 'text', text: 'ai msg' }
142
146
  ];
143
- const result = utils.processMessages(messages, 'system');
147
+ const result = utils.processMessages(messages, 'system', 'gpt-4');
144
148
  expect(result).to.deep.equal([
145
149
  { role: 'system', content: 'system msg' },
146
150
  { role: 'assistant', content: 'ai msg' }
@@ -148,12 +152,12 @@ describe('OpenAiModelUtils - processMessages', () => {
148
152
  });
149
153
 
150
154
  it('should assign role as specified for a system message when developerMessageSettings is "developer"', () => {
151
- const messages = [
155
+ const messages: LanguageModelMessage[] = [
152
156
  { actor: 'system', type: 'text', text: 'system msg' },
153
157
  { actor: 'user', type: 'text', text: 'user msg' },
154
158
  { actor: 'ai', type: 'text', text: 'ai msg' }
155
159
  ];
156
- const result = utils.processMessages(messages, 'developer');
160
+ const result = utils.processMessages(messages, 'developer', 'gpt-4');
157
161
  expect(result).to.deep.equal([
158
162
  { role: 'developer', content: 'system msg' },
159
163
  { role: 'user', content: 'user msg' },
@@ -162,3 +166,234 @@ describe('OpenAiModelUtils - processMessages', () => {
162
166
  });
163
167
  });
164
168
  });
169
+
170
+ describe('OpenAiModelUtils - processMessagesForResponseApi', () => {
171
+ describe("when developerMessageSettings is 'skip'", () => {
172
+ it('should remove all system messages and return no instructions', () => {
173
+ const messages: LanguageModelMessage[] = [
174
+ { actor: 'system', type: 'text', text: 'system message' },
175
+ { actor: 'user', type: 'text', text: 'user message' },
176
+ { actor: 'system', type: 'text', text: 'another system message' },
177
+ ];
178
+ const result = responseUtils.processMessages(messages, 'skip', 'gpt-4');
179
+ expect(result.instructions).to.be.undefined;
180
+ expect(result.input).to.deep.equal([
181
+ {
182
+ type: 'message',
183
+ role: 'user',
184
+ content: [{ type: 'input_text', text: 'user message' }]
185
+ }
186
+ ]);
187
+ });
188
+ });
189
+
190
+ describe("when developerMessageSettings is 'mergeWithFollowingUserMessage'", () => {
191
+ it('should merge system message with user message and return no instructions', () => {
192
+ const messages: LanguageModelMessage[] = [
193
+ { actor: 'system', type: 'text', text: 'system msg' },
194
+ { actor: 'user', type: 'text', text: 'user msg' },
195
+ { actor: 'ai', type: 'text', text: 'ai message' }
196
+ ];
197
+ const result = responseUtils.processMessages(messages, 'mergeWithFollowingUserMessage', 'gpt-4');
198
+ expect(result.instructions).to.be.undefined;
199
+ expect(result.input).to.have.lengthOf(2);
200
+ expect(result.input[0]).to.deep.equal({
201
+ type: 'message',
202
+ role: 'user',
203
+ content: [{ type: 'input_text', text: 'system msg\nuser msg' }]
204
+ });
205
+ const assistantMessage = result.input[1];
206
+ expect(assistantMessage).to.deep.include({
207
+ type: 'message',
208
+ role: 'assistant',
209
+ status: 'completed',
210
+ content: [{ type: 'output_text', text: 'ai message', annotations: [] }]
211
+ });
212
+ if (assistantMessage.type === 'message' && 'id' in assistantMessage) {
213
+ expect(assistantMessage.id).to.be.a('string').and.to.match(/^msg_/);
214
+ } else {
215
+ throw new Error('Expected assistant message to have an id');
216
+ }
217
+ });
218
+ });
219
+
220
+ describe('when system messages should be converted to instructions', () => {
221
+ it('should extract system messages as instructions and convert other messages to input items', () => {
222
+ const messages: LanguageModelMessage[] = [
223
+ { actor: 'system', type: 'text', text: 'You are a helpful assistant' },
224
+ { actor: 'user', type: 'text', text: 'Hello!' },
225
+ { actor: 'ai', type: 'text', text: 'Hi there!' }
226
+ ];
227
+ const result = responseUtils.processMessages(messages, 'developer', 'gpt-4');
228
+ expect(result.instructions).to.equal('You are a helpful assistant');
229
+ expect(result.input).to.have.lengthOf(2);
230
+ expect(result.input[0]).to.deep.equal({
231
+ type: 'message',
232
+ role: 'user',
233
+ content: [{ type: 'input_text', text: 'Hello!' }]
234
+ });
235
+ const assistantMessage = result.input[1];
236
+ expect(assistantMessage).to.deep.include({
237
+ type: 'message',
238
+ role: 'assistant',
239
+ status: 'completed',
240
+ content: [{ type: 'output_text', text: 'Hi there!', annotations: [] }]
241
+ });
242
+ if (assistantMessage.type === 'message' && 'id' in assistantMessage) {
243
+ expect(assistantMessage.id).to.be.a('string').and.to.match(/^msg_/);
244
+ } else {
245
+ throw new Error('Expected assistant message to have an id');
246
+ }
247
+ });
248
+
249
+ it('should combine multiple system messages into instructions', () => {
250
+ const messages: LanguageModelMessage[] = [
251
+ { actor: 'system', type: 'text', text: 'You are helpful' },
252
+ { actor: 'system', type: 'text', text: 'Be concise' },
253
+ { actor: 'user', type: 'text', text: 'What is 2+2?' }
254
+ ];
255
+ const result = responseUtils.processMessages(messages, 'developer', 'gpt-4');
256
+ expect(result.instructions).to.equal('You are helpful\nBe concise');
257
+ expect(result.input).to.deep.equal([
258
+ {
259
+ type: 'message',
260
+ role: 'user',
261
+ content: [{ type: 'input_text', text: 'What is 2+2?' }]
262
+ }
263
+ ]);
264
+ });
265
+ });
266
+
267
+ describe('tool use and tool result messages', () => {
268
+ it('should convert tool use messages to function calls', () => {
269
+ const messages: LanguageModelMessage[] = [
270
+ { actor: 'user', type: 'text', text: 'Calculate 2+2' },
271
+ {
272
+ actor: 'ai',
273
+ type: 'tool_use',
274
+ id: 'call_123',
275
+ name: 'calculator',
276
+ input: { expression: '2+2' }
277
+ }
278
+ ];
279
+ const result = responseUtils.processMessages(messages, 'developer', 'gpt-4');
280
+ expect(result.input).to.deep.equal([
281
+ {
282
+ type: 'message',
283
+ role: 'user',
284
+ content: [{ type: 'input_text', text: 'Calculate 2+2' }]
285
+ },
286
+ {
287
+ type: 'function_call',
288
+ call_id: 'call_123',
289
+ name: 'calculator',
290
+ arguments: '{"expression":"2+2"}'
291
+ }
292
+ ]);
293
+ });
294
+
295
+ it('should convert tool result messages to function call outputs', () => {
296
+ const messages: LanguageModelMessage[] = [
297
+ {
298
+ actor: 'user',
299
+ type: 'tool_result',
300
+ name: 'calculator',
301
+ tool_use_id: 'call_123',
302
+ content: '4'
303
+ }
304
+ ];
305
+ const result = responseUtils.processMessages(messages, 'developer', 'gpt-4');
306
+ expect(result.input).to.deep.equal([
307
+ {
308
+ type: 'function_call_output',
309
+ call_id: 'call_123',
310
+ output: '4'
311
+ }
312
+ ]);
313
+ });
314
+
315
+ it('should stringify non-string tool result content', () => {
316
+ const messages: LanguageModelMessage[] = [
317
+ {
318
+ actor: 'user',
319
+ type: 'tool_result',
320
+ name: 'data_processor',
321
+ tool_use_id: 'call_456',
322
+ content: { result: 'success', data: [1, 2, 3] }
323
+ }
324
+ ];
325
+ const result = responseUtils.processMessages(messages, 'developer', 'gpt-4');
326
+ expect(result.input).to.deep.equal([
327
+ {
328
+ type: 'function_call_output',
329
+ call_id: 'call_456',
330
+ output: '{"result":"success","data":[1,2,3]}'
331
+ }
332
+ ]);
333
+ });
334
+ });
335
+
336
+ describe('image messages', () => {
337
+ it('should convert base64 image messages to input image items', () => {
338
+ const messages: LanguageModelMessage[] = [
339
+ {
340
+ actor: 'user',
341
+ type: 'image',
342
+ image: {
343
+ mimeType: 'image/png',
344
+ base64data: 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=='
345
+ }
346
+ }
347
+ ];
348
+ const result = responseUtils.processMessages(messages, 'developer', 'gpt-4');
349
+ expect(result.input).to.deep.equal([
350
+ {
351
+ type: 'message',
352
+ role: 'user',
353
+ content: [{
354
+ type: 'input_image',
355
+ detail: 'auto',
356
+ image_url: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=='
357
+ }]
358
+ }
359
+ ]);
360
+ });
361
+
362
+ it('should convert URL image messages to input image items', () => {
363
+ const messages: LanguageModelMessage[] = [
364
+ {
365
+ actor: 'user',
366
+ type: 'image',
367
+ image: {
368
+ url: 'https://example.com/image.png'
369
+ }
370
+ }
371
+ ];
372
+ const result = responseUtils.processMessages(messages, 'developer', 'gpt-4');
373
+ expect(result.input).to.deep.equal([
374
+ {
375
+ type: 'message',
376
+ role: 'user',
377
+ content: [{
378
+ type: 'input_image',
379
+ detail: 'auto',
380
+ image_url: 'https://example.com/image.png'
381
+ }]
382
+ }
383
+ ]);
384
+ });
385
+ });
386
+
387
+ describe('error handling', () => {
388
+ it('should throw error for unknown message types', () => {
389
+ const invalidMessage = {
390
+ actor: 'user',
391
+ type: 'unknown_type',
392
+ someProperty: 'value'
393
+ };
394
+ const messages = [invalidMessage] as unknown as LanguageModelMessage[];
395
+ expect(() => responseUtils.processMessages(messages, 'developer', 'gpt-4'))
396
+ .to.throw('unhandled case');
397
+ });
398
+ });
399
+ });
@@ -0,0 +1,23 @@
1
+ // *****************************************************************************
2
+ // Copyright (C) 2025 EclipseSource GmbH.
3
+ //
4
+ // This program and the accompanying materials are made available under the
5
+ // terms of the Eclipse Public License v. 2.0 which is available at
6
+ // http://www.eclipse.org/legal/epl-2.0.
7
+ //
8
+ // This Source Code may also be made available under the following Secondary
9
+ // Licenses when the conditions for such availability set forth in the Eclipse
10
+ // Public License v. 2.0 are satisfied: GNU General Public License, version 2
11
+ // with the GNU Classpath Exception which is available at
12
+ // https://www.gnu.org/software/classpath/license.html.
13
+ //
14
+ // SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
15
+ // *****************************************************************************
16
+
17
+ interface OpenAIRequestApiContext {
18
+ parent?: OpenAIRequestApiContext;
19
+
20
+ }
21
+
22
+ // export class OpenAIRequestApiContext {
23
+ // }