@theia/ai-openai 1.58.2 → 1.59.0-next.62
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -9
- package/lib/browser/openai-frontend-application-contribution.d.ts.map +1 -1
- package/lib/browser/openai-frontend-application-contribution.js +11 -7
- package/lib/browser/openai-frontend-application-contribution.js.map +1 -1
- package/lib/browser/openai-preferences.d.ts.map +1 -1
- package/lib/browser/openai-preferences.js +29 -15
- package/lib/browser/openai-preferences.js.map +1 -1
- package/lib/common/openai-language-models-manager.d.ts +9 -2
- package/lib/common/openai-language-models-manager.d.ts.map +1 -1
- package/lib/node/openai-backend-module.d.ts.map +1 -1
- package/lib/node/openai-backend-module.js +2 -0
- package/lib/node/openai-backend-module.js.map +1 -1
- package/lib/node/openai-language-model.d.ts +29 -7
- package/lib/node/openai-language-model.d.ts.map +1 -1
- package/lib/node/openai-language-model.js +93 -103
- package/lib/node/openai-language-model.js.map +1 -1
- package/lib/node/openai-language-models-manager-impl.d.ts +2 -0
- package/lib/node/openai-language-models-manager-impl.d.ts.map +1 -1
- package/lib/node/openai-language-models-manager-impl.js +7 -2
- package/lib/node/openai-language-models-manager-impl.js.map +1 -1
- package/lib/node/openai-model-utils.spec.d.ts +4 -0
- package/lib/node/openai-model-utils.spec.d.ts.map +1 -0
- package/lib/node/openai-model-utils.spec.js +155 -0
- package/lib/node/openai-model-utils.spec.js.map +1 -0
- package/lib/node/openai-streaming-iterator.d.ts +21 -0
- package/lib/node/openai-streaming-iterator.d.ts.map +1 -0
- package/lib/node/openai-streaming-iterator.js +126 -0
- package/lib/node/openai-streaming-iterator.js.map +1 -0
- package/lib/node/openai-streaming-iterator.spec.d.ts +2 -0
- package/lib/node/openai-streaming-iterator.spec.d.ts.map +1 -0
- package/lib/node/openai-streaming-iterator.spec.js +208 -0
- package/lib/node/openai-streaming-iterator.spec.js.map +1 -0
- package/package.json +7 -7
- package/src/browser/openai-frontend-application-contribution.ts +9 -5
- package/src/browser/openai-preferences.ts +36 -15
- package/src/common/openai-language-models-manager.ts +10 -2
- package/src/node/openai-backend-module.ts +2 -0
- package/src/node/openai-language-model.ts +106 -108
- package/src/node/openai-language-models-manager-impl.ts +9 -3
- package/src/node/openai-model-utils.spec.ts +164 -0
- package/src/node/openai-streaming-iterator.spec.ts +254 -0
- package/src/node/openai-streaming-iterator.ts +124 -0
- package/lib/package.spec.d.ts +0 -1
- package/lib/package.spec.d.ts.map +0 -1
- package/lib/package.spec.js +0 -26
- package/lib/package.spec.js.map +0 -1
- package/src/package.spec.ts +0 -28
|
@@ -20,17 +20,20 @@ import {
|
|
|
20
20
|
LanguageModelRequest,
|
|
21
21
|
LanguageModelRequestMessage,
|
|
22
22
|
LanguageModelResponse,
|
|
23
|
-
LanguageModelStreamResponsePart,
|
|
24
23
|
LanguageModelTextResponse
|
|
25
24
|
} from '@theia/ai-core';
|
|
26
25
|
import { CancellationToken } from '@theia/core';
|
|
26
|
+
import { injectable } from '@theia/core/shared/inversify';
|
|
27
27
|
import { OpenAI, AzureOpenAI } from 'openai';
|
|
28
28
|
import { ChatCompletionStream } from 'openai/lib/ChatCompletionStream';
|
|
29
29
|
import { RunnableToolFunctionWithoutParse } from 'openai/lib/RunnableFunction';
|
|
30
30
|
import { ChatCompletionMessageParam } from 'openai/resources';
|
|
31
|
+
import { StreamingAsyncIterator } from './openai-streaming-iterator';
|
|
31
32
|
|
|
32
33
|
export const OpenAiModelIdentifier = Symbol('OpenAiModelIdentifier');
|
|
33
34
|
|
|
35
|
+
export type DeveloperMessageSettings = 'user' | 'system' | 'developer' | 'mergeWithFollowingUserMessage' | 'skip';
|
|
36
|
+
|
|
34
37
|
export class OpenAiModel implements LanguageModel {
|
|
35
38
|
|
|
36
39
|
/**
|
|
@@ -39,7 +42,7 @@ export class OpenAiModel implements LanguageModel {
|
|
|
39
42
|
* @param enableStreaming whether the streaming API shall be used
|
|
40
43
|
* @param apiKey a function that returns the API key to use for this model, called on each request
|
|
41
44
|
* @param apiVersion a function that returns the OpenAPI version to use for this model, called on each request
|
|
42
|
-
* @param
|
|
45
|
+
* @param developerMessageSettings how to handle system messages
|
|
43
46
|
* @param url the OpenAI API compatible endpoint where the model is hosted. If not provided the default OpenAI endpoint will be used.
|
|
44
47
|
* @param defaultRequestSettings optional default settings for requests made using this model.
|
|
45
48
|
*/
|
|
@@ -49,9 +52,11 @@ export class OpenAiModel implements LanguageModel {
|
|
|
49
52
|
public enableStreaming: boolean,
|
|
50
53
|
public apiKey: () => string | undefined,
|
|
51
54
|
public apiVersion: () => string | undefined,
|
|
52
|
-
public
|
|
55
|
+
public supportsStructuredOutput: boolean,
|
|
53
56
|
public url: string | undefined,
|
|
54
|
-
public
|
|
57
|
+
public openAiModelUtils: OpenAiModelUtils,
|
|
58
|
+
public developerMessageSettings: DeveloperMessageSettings = 'developer',
|
|
59
|
+
public defaultRequestSettings?: { [key: string]: unknown },
|
|
55
60
|
) { }
|
|
56
61
|
|
|
57
62
|
protected getSettings(request: LanguageModelRequest): Record<string, unknown> {
|
|
@@ -66,23 +71,23 @@ export class OpenAiModel implements LanguageModel {
|
|
|
66
71
|
const settings = this.getSettings(request);
|
|
67
72
|
const openai = this.initializeOpenAi();
|
|
68
73
|
|
|
74
|
+
if (request.response_format?.type === 'json_schema' && this.supportsStructuredOutput) {
|
|
75
|
+
return this.handleStructuredOutputRequest(openai, request);
|
|
76
|
+
}
|
|
77
|
+
|
|
69
78
|
if (this.isNonStreamingModel(this.model) || (typeof settings.stream === 'boolean' && !settings.stream)) {
|
|
70
79
|
return this.handleNonStreamingRequest(openai, request);
|
|
71
80
|
}
|
|
72
81
|
|
|
73
|
-
if (request.response_format?.type === 'json_schema' && this.supportsStructuredOutput()) {
|
|
74
|
-
return this.handleStructuredOutputRequest(openai, request);
|
|
75
|
-
}
|
|
76
82
|
if (cancellationToken?.isCancellationRequested) {
|
|
77
83
|
return { text: '' };
|
|
78
84
|
}
|
|
79
|
-
|
|
80
85
|
let runner: ChatCompletionStream;
|
|
81
86
|
const tools = this.createTools(request);
|
|
82
87
|
if (tools) {
|
|
83
88
|
runner = openai.beta.chat.completions.runTools({
|
|
84
89
|
model: this.model,
|
|
85
|
-
messages: request.messages
|
|
90
|
+
messages: this.processMessages(request.messages),
|
|
86
91
|
stream: true,
|
|
87
92
|
tools: tools,
|
|
88
93
|
tool_choice: 'auto',
|
|
@@ -91,81 +96,20 @@ export class OpenAiModel implements LanguageModel {
|
|
|
91
96
|
} else {
|
|
92
97
|
runner = openai.beta.chat.completions.stream({
|
|
93
98
|
model: this.model,
|
|
94
|
-
messages: request.messages
|
|
99
|
+
messages: this.processMessages(request.messages),
|
|
95
100
|
stream: true,
|
|
96
101
|
...settings
|
|
97
102
|
});
|
|
98
103
|
}
|
|
99
|
-
cancellationToken?.onCancellationRequested(() => {
|
|
100
|
-
runner.abort();
|
|
101
|
-
});
|
|
102
104
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
let resolve: ((part: LanguageModelStreamResponsePart) => void) | undefined;
|
|
106
|
-
runner.on('error', error => {
|
|
107
|
-
console.error('Error in OpenAI chat completion stream:', error);
|
|
108
|
-
runnerEnd = true;
|
|
109
|
-
resolve?.({ content: error.message });
|
|
110
|
-
});
|
|
111
|
-
// we need to also listen for the emitted errors, as otherwise any error actually thrown by the API will not be caught
|
|
112
|
-
runner.emitted('error').then(error => {
|
|
113
|
-
console.error('Error in OpenAI chat completion stream:', error);
|
|
114
|
-
runnerEnd = true;
|
|
115
|
-
resolve?.({ content: error.message });
|
|
116
|
-
});
|
|
117
|
-
runner.emitted('abort').then(() => {
|
|
118
|
-
// cancel async iterator
|
|
119
|
-
runnerEnd = true;
|
|
120
|
-
});
|
|
121
|
-
runner.on('message', message => {
|
|
122
|
-
if (message.role === 'tool') {
|
|
123
|
-
resolve?.({ tool_calls: [{ id: message.tool_call_id, finished: true, result: this.getCompletionContent(message) }] });
|
|
124
|
-
}
|
|
125
|
-
console.debug('Received Open AI message', JSON.stringify(message));
|
|
126
|
-
});
|
|
127
|
-
runner.once('end', () => {
|
|
128
|
-
runnerEnd = true;
|
|
129
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
130
|
-
resolve?.(runner.finalChatCompletion as any);
|
|
131
|
-
});
|
|
132
|
-
if (cancellationToken?.isCancellationRequested) {
|
|
133
|
-
return { text: '' };
|
|
134
|
-
}
|
|
135
|
-
const asyncIterator = {
|
|
136
|
-
async *[Symbol.asyncIterator](): AsyncIterator<LanguageModelStreamResponsePart> {
|
|
137
|
-
runner.on('chunk', chunk => {
|
|
138
|
-
if (cancellationToken?.isCancellationRequested) {
|
|
139
|
-
resolve = undefined;
|
|
140
|
-
return;
|
|
141
|
-
}
|
|
142
|
-
if (resolve && chunk.choices[0]?.delta) {
|
|
143
|
-
resolve({ ...chunk.choices[0]?.delta });
|
|
144
|
-
}
|
|
145
|
-
});
|
|
146
|
-
while (!runnerEnd) {
|
|
147
|
-
if (cancellationToken?.isCancellationRequested) {
|
|
148
|
-
throw new Error('Iterator canceled');
|
|
149
|
-
}
|
|
150
|
-
const promise = new Promise<LanguageModelStreamResponsePart>((res, rej) => {
|
|
151
|
-
resolve = res;
|
|
152
|
-
cancellationToken?.onCancellationRequested(() => {
|
|
153
|
-
rej(new Error('Canceled'));
|
|
154
|
-
runnerEnd = true; // Stop the iterator
|
|
155
|
-
});
|
|
156
|
-
});
|
|
157
|
-
yield promise;
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
};
|
|
161
|
-
return { stream: asyncIterator };
|
|
105
|
+
return { stream: new StreamingAsyncIterator(runner, cancellationToken) };
|
|
162
106
|
}
|
|
163
107
|
|
|
164
108
|
protected async handleNonStreamingRequest(openai: OpenAI, request: LanguageModelRequest): Promise<LanguageModelTextResponse> {
|
|
165
109
|
const settings = this.getSettings(request);
|
|
166
110
|
const response = await openai.chat.completions.create({
|
|
167
111
|
model: this.model,
|
|
168
|
-
messages: request.messages
|
|
112
|
+
messages: this.processMessages(request.messages),
|
|
169
113
|
...settings
|
|
170
114
|
});
|
|
171
115
|
|
|
@@ -176,43 +120,16 @@ export class OpenAiModel implements LanguageModel {
|
|
|
176
120
|
};
|
|
177
121
|
}
|
|
178
122
|
|
|
179
|
-
protected toOpenAIMessage(message: LanguageModelRequestMessage): ChatCompletionMessageParam {
|
|
180
|
-
return {
|
|
181
|
-
role: this.toOpenAiRole(message),
|
|
182
|
-
content: message.query || ''
|
|
183
|
-
};
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
protected toOpenAiRole(message: LanguageModelRequestMessage): 'developer' | 'user' | 'assistant' {
|
|
187
|
-
switch (message.actor) {
|
|
188
|
-
case 'system':
|
|
189
|
-
return this.supportsDeveloperMessage ? 'developer' : 'user';
|
|
190
|
-
case 'ai':
|
|
191
|
-
return 'assistant';
|
|
192
|
-
default:
|
|
193
|
-
return 'user';
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
|
|
197
123
|
protected isNonStreamingModel(_model: string): boolean {
|
|
198
124
|
return !this.enableStreaming;
|
|
199
125
|
}
|
|
200
126
|
|
|
201
|
-
protected supportsStructuredOutput(): boolean {
|
|
202
|
-
// see https://platform.openai.com/docs/models/gpt-4o
|
|
203
|
-
return [
|
|
204
|
-
'gpt-4o',
|
|
205
|
-
'gpt-4o-2024-08-06',
|
|
206
|
-
'gpt-4o-mini'
|
|
207
|
-
].includes(this.model);
|
|
208
|
-
}
|
|
209
|
-
|
|
210
127
|
protected async handleStructuredOutputRequest(openai: OpenAI, request: LanguageModelRequest): Promise<LanguageModelParsedResponse> {
|
|
211
128
|
const settings = this.getSettings(request);
|
|
212
129
|
// TODO implement tool support for structured output (parse() seems to require different tool format)
|
|
213
130
|
const result = await openai.beta.chat.completions.parse({
|
|
214
131
|
model: this.model,
|
|
215
|
-
messages: request.messages
|
|
132
|
+
messages: this.processMessages(request.messages),
|
|
216
133
|
response_format: request.response_format,
|
|
217
134
|
...settings
|
|
218
135
|
});
|
|
@@ -226,13 +143,6 @@ export class OpenAiModel implements LanguageModel {
|
|
|
226
143
|
};
|
|
227
144
|
}
|
|
228
145
|
|
|
229
|
-
private getCompletionContent(message: OpenAI.Chat.Completions.ChatCompletionToolMessageParam): string {
|
|
230
|
-
if (Array.isArray(message.content)) {
|
|
231
|
-
return message.content.join('');
|
|
232
|
-
}
|
|
233
|
-
return message.content;
|
|
234
|
-
}
|
|
235
|
-
|
|
236
146
|
protected createTools(request: LanguageModelRequest): RunnableToolFunctionWithoutParse[] | undefined {
|
|
237
147
|
return request.tools?.map(tool => ({
|
|
238
148
|
type: 'function',
|
|
@@ -260,4 +170,92 @@ export class OpenAiModel implements LanguageModel {
|
|
|
260
170
|
return new OpenAI({ apiKey: apiKey ?? 'no-key', baseURL: this.url });
|
|
261
171
|
}
|
|
262
172
|
}
|
|
173
|
+
|
|
174
|
+
protected processMessages(messages: LanguageModelRequestMessage[]): ChatCompletionMessageParam[] {
|
|
175
|
+
return this.openAiModelUtils.processMessages(messages, this.developerMessageSettings, this.model);
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
/**
|
|
180
|
+
* Utility class for processing messages for the OpenAI language model.
|
|
181
|
+
*
|
|
182
|
+
* Adopters can rebind this class to implement custom message processing behavior.
|
|
183
|
+
*/
|
|
184
|
+
@injectable()
|
|
185
|
+
export class OpenAiModelUtils {
|
|
186
|
+
|
|
187
|
+
protected processSystemMessages(
|
|
188
|
+
messages: LanguageModelRequestMessage[],
|
|
189
|
+
developerMessageSettings: DeveloperMessageSettings
|
|
190
|
+
): LanguageModelRequestMessage[] {
|
|
191
|
+
if (developerMessageSettings === 'skip') {
|
|
192
|
+
return messages.filter(message => message.actor !== 'system');
|
|
193
|
+
} else if (developerMessageSettings === 'mergeWithFollowingUserMessage') {
|
|
194
|
+
const updated = messages.slice();
|
|
195
|
+
for (let i = updated.length - 1; i >= 0; i--) {
|
|
196
|
+
if (updated[i].actor === 'system') {
|
|
197
|
+
if (i + 1 < updated.length && updated[i + 1].actor === 'user') {
|
|
198
|
+
// Merge system message with the next user message
|
|
199
|
+
updated[i + 1] = {
|
|
200
|
+
...updated[i + 1],
|
|
201
|
+
query: updated[i].query + '\n' + updated[i + 1].query
|
|
202
|
+
};
|
|
203
|
+
updated.splice(i, 1);
|
|
204
|
+
} else {
|
|
205
|
+
// The message directly after is not a user message (or none exists), so create a new user message right after
|
|
206
|
+
updated.splice(i + 1, 0, { actor: 'user', type: 'text', query: updated[i].query });
|
|
207
|
+
updated.splice(i, 1);
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
return updated;
|
|
212
|
+
}
|
|
213
|
+
return messages;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
protected toOpenAiRole(
|
|
217
|
+
message: LanguageModelRequestMessage,
|
|
218
|
+
developerMessageSettings: DeveloperMessageSettings
|
|
219
|
+
): 'developer' | 'user' | 'assistant' | 'system' {
|
|
220
|
+
if (message.actor === 'system') {
|
|
221
|
+
if (developerMessageSettings === 'user' || developerMessageSettings === 'system' || developerMessageSettings === 'developer') {
|
|
222
|
+
return developerMessageSettings;
|
|
223
|
+
} else {
|
|
224
|
+
return 'developer';
|
|
225
|
+
}
|
|
226
|
+
} else if (message.actor === 'ai') {
|
|
227
|
+
return 'assistant';
|
|
228
|
+
}
|
|
229
|
+
return 'user';
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
protected toOpenAIMessage(
|
|
233
|
+
message: LanguageModelRequestMessage,
|
|
234
|
+
developerMessageSettings: DeveloperMessageSettings
|
|
235
|
+
): ChatCompletionMessageParam {
|
|
236
|
+
return {
|
|
237
|
+
role: this.toOpenAiRole(message, developerMessageSettings),
|
|
238
|
+
content: message.query || ''
|
|
239
|
+
};
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
/**
|
|
243
|
+
* Processes the provided list of messages by applying system message adjustments and converting
|
|
244
|
+
* them to the format expected by the OpenAI API.
|
|
245
|
+
*
|
|
246
|
+
* Adopters can rebind this processing to implement custom behavior.
|
|
247
|
+
*
|
|
248
|
+
* @param messages the list of messages to process.
|
|
249
|
+
* @param developerMessageSettings how system and developer messages are handled during processing.
|
|
250
|
+
* @param model the OpenAI model identifier. Currently not used, but allows subclasses to implement model-specific behavior.
|
|
251
|
+
* @returns an array of messages formatted for the OpenAI API.
|
|
252
|
+
*/
|
|
253
|
+
processMessages(
|
|
254
|
+
messages: LanguageModelRequestMessage[],
|
|
255
|
+
developerMessageSettings: DeveloperMessageSettings,
|
|
256
|
+
model: string
|
|
257
|
+
): ChatCompletionMessageParam[] {
|
|
258
|
+
const processed = this.processSystemMessages(messages, developerMessageSettings);
|
|
259
|
+
return processed.map(m => this.toOpenAIMessage(m, developerMessageSettings));
|
|
260
|
+
}
|
|
263
261
|
}
|
|
@@ -16,12 +16,15 @@
|
|
|
16
16
|
|
|
17
17
|
import { LanguageModelRegistry } from '@theia/ai-core';
|
|
18
18
|
import { inject, injectable } from '@theia/core/shared/inversify';
|
|
19
|
-
import { OpenAiModel } from './openai-language-model';
|
|
19
|
+
import { OpenAiModel, OpenAiModelUtils } from './openai-language-model';
|
|
20
20
|
import { OpenAiLanguageModelsManager, OpenAiModelDescription } from '../common';
|
|
21
21
|
|
|
22
22
|
@injectable()
|
|
23
23
|
export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsManager {
|
|
24
24
|
|
|
25
|
+
@inject(OpenAiModelUtils)
|
|
26
|
+
protected readonly openAiModelUtils: OpenAiModelUtils;
|
|
27
|
+
|
|
25
28
|
protected _apiKey: string | undefined;
|
|
26
29
|
protected _apiVersion: string | undefined;
|
|
27
30
|
|
|
@@ -70,7 +73,8 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana
|
|
|
70
73
|
model.url = modelDescription.url;
|
|
71
74
|
model.apiKey = apiKeyProvider;
|
|
72
75
|
model.apiVersion = apiVersionProvider;
|
|
73
|
-
model.
|
|
76
|
+
model.developerMessageSettings = modelDescription.developerMessageSettings || 'developer';
|
|
77
|
+
model.supportsStructuredOutput = modelDescription.supportsStructuredOutput;
|
|
74
78
|
model.defaultRequestSettings = modelDescription.defaultRequestSettings;
|
|
75
79
|
} else {
|
|
76
80
|
this.languageModelRegistry.addLanguageModels([
|
|
@@ -80,8 +84,10 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana
|
|
|
80
84
|
modelDescription.enableStreaming,
|
|
81
85
|
apiKeyProvider,
|
|
82
86
|
apiVersionProvider,
|
|
83
|
-
modelDescription.
|
|
87
|
+
modelDescription.supportsStructuredOutput,
|
|
84
88
|
modelDescription.url,
|
|
89
|
+
this.openAiModelUtils,
|
|
90
|
+
modelDescription.developerMessageSettings,
|
|
85
91
|
modelDescription.defaultRequestSettings
|
|
86
92
|
)
|
|
87
93
|
]);
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
// *****************************************************************************
|
|
2
|
+
// Copyright (C) 2024 EclipseSource GmbH.
|
|
3
|
+
//
|
|
4
|
+
// This program and the accompanying materials are made available under the
|
|
5
|
+
// terms of the Eclipse Public License v. 2.0 which is available at
|
|
6
|
+
// http://www.eclipse.org/legal/epl-2.0.
|
|
7
|
+
//
|
|
8
|
+
// This Source Code may also be made available under the following Secondary
|
|
9
|
+
// Licenses when the conditions for such availability set forth in the Eclipse
|
|
10
|
+
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
|
|
11
|
+
// with the GNU Classpath Exception which is available at
|
|
12
|
+
// https://www.gnu.org/software/classpath/license.html.
|
|
13
|
+
//
|
|
14
|
+
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
|
|
15
|
+
// *****************************************************************************
|
|
16
|
+
const { expect } = require('chai');
|
|
17
|
+
const { OpenAiModelUtils } = require('./openai-language-model');
|
|
18
|
+
const utils = new OpenAiModelUtils();
|
|
19
|
+
|
|
20
|
+
describe('OpenAiModelUtils - processMessages', () => {
|
|
21
|
+
describe("when developerMessageSettings is 'skip'", () => {
|
|
22
|
+
it('should remove all system messages', () => {
|
|
23
|
+
const messages = [
|
|
24
|
+
{ actor: 'system', type: 'text', query: 'system message' },
|
|
25
|
+
{ actor: 'user', type: 'text', query: 'user message' },
|
|
26
|
+
{ actor: 'system', type: 'text', query: 'another system message' },
|
|
27
|
+
];
|
|
28
|
+
const result = utils.processMessages(messages, 'skip');
|
|
29
|
+
expect(result).to.deep.equal([
|
|
30
|
+
{ role: 'user', content: 'user message' }
|
|
31
|
+
]);
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
it('should do nothing if there is no system message', () => {
|
|
35
|
+
const messages = [
|
|
36
|
+
{ actor: 'user', type: 'text', query: 'user message' },
|
|
37
|
+
{ actor: 'user', type: 'text', query: 'another user message' },
|
|
38
|
+
{ actor: 'ai', type: 'text', query: 'ai message' }
|
|
39
|
+
];
|
|
40
|
+
const result = utils.processMessages(messages, 'skip');
|
|
41
|
+
expect(result).to.deep.equal([
|
|
42
|
+
{ role: 'user', content: 'user message' },
|
|
43
|
+
{ role: 'user', content: 'another user message' },
|
|
44
|
+
{ role: 'assistant', content: 'ai message' }
|
|
45
|
+
]);
|
|
46
|
+
});
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
describe("when developerMessageSettings is 'mergeWithFollowingUserMessage'", () => {
|
|
50
|
+
it('should merge the system message with the next user message, assign role user, and remove the system message', () => {
|
|
51
|
+
const messages = [
|
|
52
|
+
{ actor: 'system', type: 'text', query: 'system msg' },
|
|
53
|
+
{ actor: 'user', type: 'text', query: 'user msg' },
|
|
54
|
+
{ actor: 'ai', type: 'text', query: 'ai message' }
|
|
55
|
+
];
|
|
56
|
+
const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
|
|
57
|
+
expect(result).to.deep.equal([
|
|
58
|
+
{ role: 'user', content: 'system msg\nuser msg' },
|
|
59
|
+
{ role: 'assistant', content: 'ai message' }
|
|
60
|
+
]);
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
it('should create a new user message if no user message exists, and remove the system message', () => {
|
|
64
|
+
const messages = [
|
|
65
|
+
{ actor: 'system', type: 'text', query: 'system only msg' },
|
|
66
|
+
{ actor: 'ai', type: 'text', query: 'ai message' }
|
|
67
|
+
];
|
|
68
|
+
const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
|
|
69
|
+
expect(result).to.deep.equal([
|
|
70
|
+
{ role: 'user', content: 'system only msg' },
|
|
71
|
+
{ role: 'assistant', content: 'ai message' }
|
|
72
|
+
]);
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
it('should create a merge multiple system message with the next user message', () => {
|
|
76
|
+
const messages = [
|
|
77
|
+
{ actor: 'user', type: 'text', query: 'user message' },
|
|
78
|
+
{ actor: 'system', type: 'text', query: 'system message' },
|
|
79
|
+
{ actor: 'system', type: 'text', query: 'system message2' },
|
|
80
|
+
{ actor: 'user', type: 'text', query: 'user message2' },
|
|
81
|
+
{ actor: 'ai', type: 'text', query: 'ai message' }
|
|
82
|
+
];
|
|
83
|
+
const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
|
|
84
|
+
expect(result).to.deep.equal([
|
|
85
|
+
{ role: 'user', content: 'user message' },
|
|
86
|
+
{ role: 'user', content: 'system message\nsystem message2\nuser message2' },
|
|
87
|
+
{ role: 'assistant', content: 'ai message' }
|
|
88
|
+
]);
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
it('should create a new user message from several system messages if the next message is not a user message', () => {
|
|
92
|
+
const messages = [
|
|
93
|
+
{ actor: 'user', type: 'text', query: 'user message' },
|
|
94
|
+
{ actor: 'system', type: 'text', query: 'system message' },
|
|
95
|
+
{ actor: 'system', type: 'text', query: 'system message2' },
|
|
96
|
+
{ actor: 'ai', type: 'text', query: 'ai message' }
|
|
97
|
+
];
|
|
98
|
+
const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage');
|
|
99
|
+
expect(result).to.deep.equal([
|
|
100
|
+
{ role: 'user', content: 'user message' },
|
|
101
|
+
{ role: 'user', content: 'system message\nsystem message2' },
|
|
102
|
+
{ role: 'assistant', content: 'ai message' }
|
|
103
|
+
]);
|
|
104
|
+
});
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
describe('when no special merging or skipping is needed', () => {
|
|
108
|
+
it('should leave messages unchanged in ordering and assign roles based on developerMessageSettings', () => {
|
|
109
|
+
const messages = [
|
|
110
|
+
{ actor: 'user', type: 'text', query: 'user message' },
|
|
111
|
+
{ actor: 'system', type: 'text', query: 'system message' },
|
|
112
|
+
{ actor: 'ai', type: 'text', query: 'ai message' }
|
|
113
|
+
];
|
|
114
|
+
// Using a developerMessageSettings that is not merge/skip, e.g., 'developer'
|
|
115
|
+
const result = utils.processMessages(messages, 'developer');
|
|
116
|
+
expect(result).to.deep.equal([
|
|
117
|
+
{ role: 'user', content: 'user message' },
|
|
118
|
+
{ role: 'developer', content: 'system message' },
|
|
119
|
+
{ role: 'assistant', content: 'ai message' }
|
|
120
|
+
]);
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
describe('role assignment for system messages when developerMessageSettings is one of the role strings', () => {
|
|
125
|
+
it('should assign role as specified for a system message when developerMessageSettings is "user"', () => {
|
|
126
|
+
const messages = [
|
|
127
|
+
{ actor: 'system', type: 'text', query: 'system msg' },
|
|
128
|
+
{ actor: 'ai', type: 'text', query: 'ai msg' }
|
|
129
|
+
];
|
|
130
|
+
// Since the first message is system and developerMessageSettings is not merge/skip, ordering is not adjusted
|
|
131
|
+
const result = utils.processMessages(messages, 'user');
|
|
132
|
+
expect(result).to.deep.equal([
|
|
133
|
+
{ role: 'user', content: 'system msg' },
|
|
134
|
+
{ role: 'assistant', content: 'ai msg' }
|
|
135
|
+
]);
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
it('should assign role as specified for a system message when developerMessageSettings is "system"', () => {
|
|
139
|
+
const messages = [
|
|
140
|
+
{ actor: 'system', type: 'text', query: 'system msg' },
|
|
141
|
+
{ actor: 'ai', type: 'text', query: 'ai msg' }
|
|
142
|
+
];
|
|
143
|
+
const result = utils.processMessages(messages, 'system');
|
|
144
|
+
expect(result).to.deep.equal([
|
|
145
|
+
{ role: 'system', content: 'system msg' },
|
|
146
|
+
{ role: 'assistant', content: 'ai msg' }
|
|
147
|
+
]);
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
it('should assign role as specified for a system message when developerMessageSettings is "developer"', () => {
|
|
151
|
+
const messages = [
|
|
152
|
+
{ actor: 'system', type: 'text', query: 'system msg' },
|
|
153
|
+
{ actor: 'user', type: 'text', query: 'user msg' },
|
|
154
|
+
{ actor: 'ai', type: 'text', query: 'ai msg' }
|
|
155
|
+
];
|
|
156
|
+
const result = utils.processMessages(messages, 'developer');
|
|
157
|
+
expect(result).to.deep.equal([
|
|
158
|
+
{ role: 'developer', content: 'system msg' },
|
|
159
|
+
{ role: 'user', content: 'user msg' },
|
|
160
|
+
{ role: 'assistant', content: 'ai msg' }
|
|
161
|
+
]);
|
|
162
|
+
});
|
|
163
|
+
});
|
|
164
|
+
});
|