@theia/ai-openai 1.55.1 → 1.57.0-next.112
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +56 -1
- package/lib/browser/openai-frontend-application-contribution.d.ts +8 -0
- package/lib/browser/openai-frontend-application-contribution.d.ts.map +1 -1
- package/lib/browser/openai-frontend-application-contribution.js +93 -39
- package/lib/browser/openai-frontend-application-contribution.js.map +1 -1
- package/lib/browser/openai-preferences.d.ts.map +1 -1
- package/lib/browser/openai-preferences.js +25 -1
- package/lib/browser/openai-preferences.js.map +1 -1
- package/lib/common/openai-language-models-manager.d.ts +23 -0
- package/lib/common/openai-language-models-manager.d.ts.map +1 -1
- package/lib/node/openai-backend-module.d.ts.map +1 -1
- package/lib/node/openai-backend-module.js +6 -1
- package/lib/node/openai-backend-module.js.map +1 -1
- package/lib/node/openai-language-model.d.ts +23 -6
- package/lib/node/openai-language-model.d.ts.map +1 -1
- package/lib/node/openai-language-model.js +83 -46
- package/lib/node/openai-language-model.js.map +1 -1
- package/lib/node/openai-language-models-manager-impl.d.ts +3 -0
- package/lib/node/openai-language-models-manager-impl.d.ts.map +1 -1
- package/lib/node/openai-language-models-manager-impl.js +31 -8
- package/lib/node/openai-language-models-manager-impl.js.map +1 -1
- package/package.json +8 -8
- package/src/browser/openai-frontend-application-contribution.ts +116 -43
- package/src/browser/openai-preferences.ts +25 -1
- package/src/common/openai-language-models-manager.ts +21 -0
- package/src/node/openai-backend-module.ts +7 -1
- package/src/node/openai-language-model.ts +93 -50
- package/src/node/openai-language-models-manager-impl.ts +43 -8
|
@@ -34,7 +34,7 @@ export const OpenAiPreferencesSchema: PreferenceSchema = {
|
|
|
34
34
|
type: 'array',
|
|
35
35
|
description: 'Official OpenAI models to use',
|
|
36
36
|
title: AI_CORE_PREFERENCES_TITLE,
|
|
37
|
-
default: ['gpt-4o', 'gpt-4o-2024-
|
|
37
|
+
default: ['gpt-4o', 'gpt-4o-2024-11-20', 'gpt-4o-2024-08-06', 'gpt-4o-mini', 'o1', 'o1-mini', 'o3-mini'],
|
|
38
38
|
items: {
|
|
39
39
|
type: 'string'
|
|
40
40
|
}
|
|
@@ -50,6 +50,14 @@ export const OpenAiPreferencesSchema: PreferenceSchema = {
|
|
|
50
50
|
\n\
|
|
51
51
|
- provide an `apiKey` to access the API served at the given url. Use `true` to indicate the use of the global OpenAI API key.\
|
|
52
52
|
\n\
|
|
53
|
+
- provide an `apiVersion` to access the API served at the given url in Azure. Use `true` to indicate the use of the global OpenAI API version.\
|
|
54
|
+
\n\
|
|
55
|
+
- specify `supportsDeveloperMessage: false` to indicate that the developer role shall not be used.\
|
|
56
|
+
\n\
|
|
57
|
+
- specify `supportsStructuredOutput: false` to indicate that structured output shall not be used.\
|
|
58
|
+
\n\
|
|
59
|
+
- specify `enableStreaming: false` to indicate that streaming shall not be used.\
|
|
60
|
+
\n\
|
|
53
61
|
Refer to [our documentation](https://theia-ide.org/docs/user_ai/#openai-compatible-models-eg-via-vllm) for more information.',
|
|
54
62
|
default: [],
|
|
55
63
|
items: {
|
|
@@ -71,6 +79,22 @@ export const OpenAiPreferencesSchema: PreferenceSchema = {
|
|
|
71
79
|
type: ['string', 'boolean'],
|
|
72
80
|
title: 'Either the key to access the API served at the given url or `true` to use the global OpenAI API key',
|
|
73
81
|
},
|
|
82
|
+
apiVersion: {
|
|
83
|
+
type: ['string', 'boolean'],
|
|
84
|
+
title: 'Either the version to access the API served at the given url in Azure or `true` to use the global OpenAI API version',
|
|
85
|
+
},
|
|
86
|
+
supportsDeveloperMessage: {
|
|
87
|
+
type: 'boolean',
|
|
88
|
+
title: 'Indicates whether the model supports the `developer` role. `true` by default.',
|
|
89
|
+
},
|
|
90
|
+
supportsStructuredOutput: {
|
|
91
|
+
type: 'boolean',
|
|
92
|
+
title: 'Indicates whether the model supports structured output. `true` by default.',
|
|
93
|
+
},
|
|
94
|
+
enableStreaming: {
|
|
95
|
+
type: 'boolean',
|
|
96
|
+
title: 'Indicates whether the streaming API shall be used. `true` by default.',
|
|
97
|
+
}
|
|
74
98
|
}
|
|
75
99
|
}
|
|
76
100
|
}
|
|
@@ -32,10 +32,31 @@ export interface OpenAiModelDescription {
|
|
|
32
32
|
* The key for the model. If 'true' is provided the global OpenAI API key will be used.
|
|
33
33
|
*/
|
|
34
34
|
apiKey: string | true | undefined;
|
|
35
|
+
/**
|
|
36
|
+
* The version for the api. If 'true' is provided the global OpenAI version will be used.
|
|
37
|
+
*/
|
|
38
|
+
apiVersion: string | true | undefined;
|
|
39
|
+
/**
|
|
40
|
+
* Indicate whether the streaming API shall be used.
|
|
41
|
+
*/
|
|
42
|
+
enableStreaming: boolean;
|
|
43
|
+
/**
|
|
44
|
+
* Flag to configure whether the OpenAPI model supports the `developer` role. Default is `true`.
|
|
45
|
+
*/
|
|
46
|
+
supportsDeveloperMessage: boolean;
|
|
47
|
+
/**
|
|
48
|
+
* Flag to configure whether the OpenAPI model supports structured output. Default is `true`.
|
|
49
|
+
*/
|
|
50
|
+
supportsStructuredOutput: boolean;
|
|
51
|
+
/**
|
|
52
|
+
* Default request settings for the OpenAI model.
|
|
53
|
+
*/
|
|
54
|
+
defaultRequestSettings?: { [key: string]: unknown };
|
|
35
55
|
}
|
|
36
56
|
export interface OpenAiLanguageModelsManager {
|
|
37
57
|
apiKey: string | undefined;
|
|
38
58
|
setApiKey(key: string | undefined): void;
|
|
59
|
+
setApiVersion(version: string | undefined): void;
|
|
39
60
|
createOrUpdateLanguageModels(...models: OpenAiModelDescription[]): Promise<void>;
|
|
40
61
|
removeLanguageModels(...modelIds: string[]): void
|
|
41
62
|
}
|
|
@@ -18,13 +18,19 @@ import { ContainerModule } from '@theia/core/shared/inversify';
|
|
|
18
18
|
import { OPENAI_LANGUAGE_MODELS_MANAGER_PATH, OpenAiLanguageModelsManager } from '../common/openai-language-models-manager';
|
|
19
19
|
import { ConnectionHandler, RpcConnectionHandler } from '@theia/core';
|
|
20
20
|
import { OpenAiLanguageModelsManagerImpl } from './openai-language-models-manager-impl';
|
|
21
|
+
import { ConnectionContainerModule } from '@theia/core/lib/node/messaging/connection-container-module';
|
|
21
22
|
|
|
22
23
|
export const OpenAiModelFactory = Symbol('OpenAiModelFactory');
|
|
23
24
|
|
|
24
|
-
|
|
25
|
+
// We use a connection module to handle AI services separately for each frontend.
|
|
26
|
+
const openAiConnectionModule = ConnectionContainerModule.create(({ bind, bindBackendService, bindFrontendService }) => {
|
|
25
27
|
bind(OpenAiLanguageModelsManagerImpl).toSelf().inSingletonScope();
|
|
26
28
|
bind(OpenAiLanguageModelsManager).toService(OpenAiLanguageModelsManagerImpl);
|
|
27
29
|
bind(ConnectionHandler).toDynamicValue(ctx =>
|
|
28
30
|
new RpcConnectionHandler(OPENAI_LANGUAGE_MODELS_MANAGER_PATH, () => ctx.container.get(OpenAiLanguageModelsManager))
|
|
29
31
|
).inSingletonScope();
|
|
30
32
|
});
|
|
33
|
+
|
|
34
|
+
export default new ContainerModule(bind => {
|
|
35
|
+
bind(ConnectionContainerModule).toConstantValue(openAiConnectionModule);
|
|
36
|
+
});
|
|
@@ -24,49 +24,59 @@ import {
|
|
|
24
24
|
LanguageModelTextResponse
|
|
25
25
|
} from '@theia/ai-core';
|
|
26
26
|
import { CancellationToken } from '@theia/core';
|
|
27
|
-
import OpenAI from 'openai';
|
|
27
|
+
import { OpenAI, AzureOpenAI } from 'openai';
|
|
28
28
|
import { ChatCompletionStream } from 'openai/lib/ChatCompletionStream';
|
|
29
29
|
import { RunnableToolFunctionWithoutParse } from 'openai/lib/RunnableFunction';
|
|
30
30
|
import { ChatCompletionMessageParam } from 'openai/resources';
|
|
31
31
|
|
|
32
32
|
export const OpenAiModelIdentifier = Symbol('OpenAiModelIdentifier');
|
|
33
33
|
|
|
34
|
-
function toOpenAIMessage(message: LanguageModelRequestMessage): ChatCompletionMessageParam {
|
|
35
|
-
return {
|
|
36
|
-
role: toOpenAiRole(message),
|
|
37
|
-
content: message.query || ''
|
|
38
|
-
};
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
function toOpenAiRole(message: LanguageModelRequestMessage): 'system' | 'user' | 'assistant' {
|
|
42
|
-
switch (message.actor) {
|
|
43
|
-
case 'system':
|
|
44
|
-
return 'system';
|
|
45
|
-
case 'ai':
|
|
46
|
-
return 'assistant';
|
|
47
|
-
default:
|
|
48
|
-
return 'user';
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
|
|
52
34
|
export class OpenAiModel implements LanguageModel {
|
|
53
35
|
|
|
54
36
|
/**
|
|
55
37
|
* @param id the unique id for this language model. It will be used to identify the model in the UI.
|
|
56
38
|
* @param model the model id as it is used by the OpenAI API
|
|
57
|
-
* @param
|
|
39
|
+
* @param enableStreaming whether the streaming API shall be used
|
|
40
|
+
* @param apiKey a function that returns the API key to use for this model, called on each request
|
|
41
|
+
* @param apiVersion a function that returns the OpenAPI version to use for this model, called on each request
|
|
42
|
+
* @param supportsDeveloperMessage whether the model supports the `developer` role
|
|
43
|
+
* @param url the OpenAI API compatible endpoint where the model is hosted. If not provided the default OpenAI endpoint will be used.
|
|
44
|
+
* @param defaultRequestSettings optional default settings for requests made using this model.
|
|
58
45
|
*/
|
|
59
|
-
constructor(
|
|
46
|
+
constructor(
|
|
47
|
+
public readonly id: string,
|
|
48
|
+
public model: string,
|
|
49
|
+
public enableStreaming: boolean,
|
|
50
|
+
public apiKey: () => string | undefined,
|
|
51
|
+
public apiVersion: () => string | undefined,
|
|
52
|
+
public supportsDeveloperMessage: boolean,
|
|
53
|
+
public supportsStructuredOutput: boolean,
|
|
54
|
+
public url: string | undefined,
|
|
55
|
+
public defaultRequestSettings?: { [key: string]: unknown }
|
|
56
|
+
) { }
|
|
57
|
+
|
|
58
|
+
protected getSettings(request: LanguageModelRequest): Record<string, unknown> {
|
|
59
|
+
const settings = request.settings ? request.settings : this.defaultRequestSettings;
|
|
60
|
+
if (!settings) {
|
|
61
|
+
return {};
|
|
62
|
+
}
|
|
63
|
+
return settings;
|
|
64
|
+
}
|
|
60
65
|
|
|
61
66
|
async request(request: LanguageModelRequest, cancellationToken?: CancellationToken): Promise<LanguageModelResponse> {
|
|
67
|
+
const settings = this.getSettings(request);
|
|
62
68
|
const openai = this.initializeOpenAi();
|
|
63
69
|
|
|
64
|
-
if (
|
|
70
|
+
if (request.response_format?.type === 'json_schema' && this.supportsStructuredOutput) {
|
|
71
|
+
return this.handleStructuredOutputRequest(openai, request);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if (this.isNonStreamingModel(this.model) || (typeof settings.stream === 'boolean' && !settings.stream)) {
|
|
65
75
|
return this.handleNonStreamingRequest(openai, request);
|
|
66
76
|
}
|
|
67
77
|
|
|
68
|
-
if (
|
|
69
|
-
return
|
|
78
|
+
if (cancellationToken?.isCancellationRequested) {
|
|
79
|
+
return { text: '' };
|
|
70
80
|
}
|
|
71
81
|
|
|
72
82
|
let runner: ChatCompletionStream;
|
|
@@ -74,18 +84,18 @@ export class OpenAiModel implements LanguageModel {
|
|
|
74
84
|
if (tools) {
|
|
75
85
|
runner = openai.beta.chat.completions.runTools({
|
|
76
86
|
model: this.model,
|
|
77
|
-
messages: request.messages.map(toOpenAIMessage),
|
|
87
|
+
messages: request.messages.map(this.toOpenAIMessage.bind(this)),
|
|
78
88
|
stream: true,
|
|
79
89
|
tools: tools,
|
|
80
90
|
tool_choice: 'auto',
|
|
81
|
-
...
|
|
91
|
+
...settings
|
|
82
92
|
});
|
|
83
93
|
} else {
|
|
84
94
|
runner = openai.beta.chat.completions.stream({
|
|
85
95
|
model: this.model,
|
|
86
|
-
messages: request.messages.map(toOpenAIMessage),
|
|
96
|
+
messages: request.messages.map(this.toOpenAIMessage.bind(this)),
|
|
87
97
|
stream: true,
|
|
88
|
-
...
|
|
98
|
+
...settings
|
|
89
99
|
});
|
|
90
100
|
}
|
|
91
101
|
cancellationToken?.onCancellationRequested(() => {
|
|
@@ -94,42 +104,57 @@ export class OpenAiModel implements LanguageModel {
|
|
|
94
104
|
|
|
95
105
|
let runnerEnd = false;
|
|
96
106
|
|
|
97
|
-
let resolve: (part: LanguageModelStreamResponsePart) => void;
|
|
107
|
+
let resolve: ((part: LanguageModelStreamResponsePart) => void) | undefined;
|
|
98
108
|
runner.on('error', error => {
|
|
99
109
|
console.error('Error in OpenAI chat completion stream:', error);
|
|
100
110
|
runnerEnd = true;
|
|
101
|
-
resolve({ content: error.message });
|
|
111
|
+
resolve?.({ content: error.message });
|
|
102
112
|
});
|
|
103
113
|
// we need to also listen for the emitted errors, as otherwise any error actually thrown by the API will not be caught
|
|
104
114
|
runner.emitted('error').then(error => {
|
|
105
115
|
console.error('Error in OpenAI chat completion stream:', error);
|
|
106
116
|
runnerEnd = true;
|
|
107
|
-
resolve({ content: error.message });
|
|
117
|
+
resolve?.({ content: error.message });
|
|
108
118
|
});
|
|
109
119
|
runner.emitted('abort').then(() => {
|
|
110
|
-
//
|
|
120
|
+
// cancel async iterator
|
|
121
|
+
runnerEnd = true;
|
|
111
122
|
});
|
|
112
123
|
runner.on('message', message => {
|
|
113
124
|
if (message.role === 'tool') {
|
|
114
|
-
resolve({ tool_calls: [{ id: message.tool_call_id, finished: true, result: this.getCompletionContent(message) }] });
|
|
125
|
+
resolve?.({ tool_calls: [{ id: message.tool_call_id, finished: true, result: this.getCompletionContent(message) }] });
|
|
115
126
|
}
|
|
116
127
|
console.debug('Received Open AI message', JSON.stringify(message));
|
|
117
128
|
});
|
|
118
129
|
runner.once('end', () => {
|
|
119
130
|
runnerEnd = true;
|
|
120
131
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
121
|
-
resolve(runner.finalChatCompletion as any);
|
|
132
|
+
resolve?.(runner.finalChatCompletion as any);
|
|
122
133
|
});
|
|
134
|
+
if (cancellationToken?.isCancellationRequested) {
|
|
135
|
+
return { text: '' };
|
|
136
|
+
}
|
|
123
137
|
const asyncIterator = {
|
|
124
138
|
async *[Symbol.asyncIterator](): AsyncIterator<LanguageModelStreamResponsePart> {
|
|
125
139
|
runner.on('chunk', chunk => {
|
|
126
|
-
if (
|
|
140
|
+
if (cancellationToken?.isCancellationRequested) {
|
|
141
|
+
resolve = undefined;
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
if (resolve && chunk.choices[0]?.delta) {
|
|
127
145
|
resolve({ ...chunk.choices[0]?.delta });
|
|
128
146
|
}
|
|
129
147
|
});
|
|
130
148
|
while (!runnerEnd) {
|
|
149
|
+
if (cancellationToken?.isCancellationRequested) {
|
|
150
|
+
throw new Error('Iterator canceled');
|
|
151
|
+
}
|
|
131
152
|
const promise = new Promise<LanguageModelStreamResponsePart>((res, rej) => {
|
|
132
153
|
resolve = res;
|
|
154
|
+
cancellationToken?.onCancellationRequested(() => {
|
|
155
|
+
rej(new Error('Canceled'));
|
|
156
|
+
runnerEnd = true; // Stop the iterator
|
|
157
|
+
});
|
|
133
158
|
});
|
|
134
159
|
yield promise;
|
|
135
160
|
}
|
|
@@ -139,10 +164,11 @@ export class OpenAiModel implements LanguageModel {
|
|
|
139
164
|
}
|
|
140
165
|
|
|
141
166
|
protected async handleNonStreamingRequest(openai: OpenAI, request: LanguageModelRequest): Promise<LanguageModelTextResponse> {
|
|
167
|
+
const settings = this.getSettings(request);
|
|
142
168
|
const response = await openai.chat.completions.create({
|
|
143
169
|
model: this.model,
|
|
144
|
-
messages: request.messages.map(toOpenAIMessage),
|
|
145
|
-
...
|
|
170
|
+
messages: request.messages.map(this.toOpenAIMessage.bind(this)),
|
|
171
|
+
...settings
|
|
146
172
|
});
|
|
147
173
|
|
|
148
174
|
const message = response.choices[0].message;
|
|
@@ -152,26 +178,36 @@ export class OpenAiModel implements LanguageModel {
|
|
|
152
178
|
};
|
|
153
179
|
}
|
|
154
180
|
|
|
155
|
-
protected
|
|
156
|
-
return
|
|
181
|
+
protected toOpenAIMessage(message: LanguageModelRequestMessage): ChatCompletionMessageParam {
|
|
182
|
+
return {
|
|
183
|
+
role: this.toOpenAiRole(message),
|
|
184
|
+
content: message.query || ''
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
protected toOpenAiRole(message: LanguageModelRequestMessage): 'developer' | 'user' | 'assistant' {
|
|
189
|
+
switch (message.actor) {
|
|
190
|
+
case 'system':
|
|
191
|
+
return this.supportsDeveloperMessage ? 'developer' : 'user';
|
|
192
|
+
case 'ai':
|
|
193
|
+
return 'assistant';
|
|
194
|
+
default:
|
|
195
|
+
return 'user';
|
|
196
|
+
}
|
|
157
197
|
}
|
|
158
198
|
|
|
159
|
-
protected
|
|
160
|
-
|
|
161
|
-
return [
|
|
162
|
-
'gpt-4o',
|
|
163
|
-
'gpt-4o-2024-08-06',
|
|
164
|
-
'gpt-4o-mini'
|
|
165
|
-
].includes(this.model);
|
|
199
|
+
protected isNonStreamingModel(_model: string): boolean {
|
|
200
|
+
return !this.enableStreaming;
|
|
166
201
|
}
|
|
167
202
|
|
|
168
203
|
protected async handleStructuredOutputRequest(openai: OpenAI, request: LanguageModelRequest): Promise<LanguageModelParsedResponse> {
|
|
204
|
+
const settings = this.getSettings(request);
|
|
169
205
|
// TODO implement tool support for structured output (parse() seems to require different tool format)
|
|
170
206
|
const result = await openai.beta.chat.completions.parse({
|
|
171
207
|
model: this.model,
|
|
172
|
-
messages: request.messages.map(toOpenAIMessage),
|
|
208
|
+
messages: request.messages.map(this.toOpenAIMessage.bind(this)),
|
|
173
209
|
response_format: request.response_format,
|
|
174
|
-
...
|
|
210
|
+
...settings
|
|
175
211
|
});
|
|
176
212
|
const message = result.choices[0].message;
|
|
177
213
|
if (message.refusal || message.parsed === undefined) {
|
|
@@ -207,7 +243,14 @@ export class OpenAiModel implements LanguageModel {
|
|
|
207
243
|
if (!apiKey && !(this.url)) {
|
|
208
244
|
throw new Error('Please provide OPENAI_API_KEY in preferences or via environment variable');
|
|
209
245
|
}
|
|
210
|
-
|
|
211
|
-
|
|
246
|
+
|
|
247
|
+
const apiVersion = this.apiVersion();
|
|
248
|
+
if (apiVersion) {
|
|
249
|
+
// We need to hand over "some" key, even if a custom url is not key protected as otherwise the OpenAI client will throw an error
|
|
250
|
+
return new AzureOpenAI({ apiKey: apiKey ?? 'no-key', baseURL: this.url, apiVersion: apiVersion });
|
|
251
|
+
} else {
|
|
252
|
+
// We need to hand over "some" key, even if a custom url is not key protected as otherwise the OpenAI client will throw an error
|
|
253
|
+
return new OpenAI({ apiKey: apiKey ?? 'no-key', baseURL: this.url });
|
|
254
|
+
}
|
|
212
255
|
}
|
|
213
256
|
}
|
|
@@ -23,6 +23,7 @@ import { OpenAiLanguageModelsManager, OpenAiModelDescription } from '../common';
|
|
|
23
23
|
export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsManager {
|
|
24
24
|
|
|
25
25
|
protected _apiKey: string | undefined;
|
|
26
|
+
protected _apiVersion: string | undefined;
|
|
26
27
|
|
|
27
28
|
@inject(LanguageModelRegistry)
|
|
28
29
|
protected readonly languageModelRegistry: LanguageModelRegistry;
|
|
@@ -31,6 +32,10 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana
|
|
|
31
32
|
return this._apiKey ?? process.env.OPENAI_API_KEY;
|
|
32
33
|
}
|
|
33
34
|
|
|
35
|
+
get apiVersion(): string | undefined {
|
|
36
|
+
return this._apiVersion ?? process.env.OPENAI_API_VERSION;
|
|
37
|
+
}
|
|
38
|
+
|
|
34
39
|
// Triggered from frontend. In case you want to use the models on the backend
|
|
35
40
|
// without a frontend then call this yourself
|
|
36
41
|
async createOrUpdateLanguageModels(...modelDescriptions: OpenAiModelDescription[]): Promise<void> {
|
|
@@ -45,21 +50,43 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana
|
|
|
45
50
|
}
|
|
46
51
|
return undefined;
|
|
47
52
|
};
|
|
53
|
+
const apiVersionProvider = () => {
|
|
54
|
+
if (modelDescription.apiVersion === true) {
|
|
55
|
+
return this.apiVersion;
|
|
56
|
+
}
|
|
57
|
+
if (modelDescription.apiVersion) {
|
|
58
|
+
return modelDescription.apiVersion;
|
|
59
|
+
}
|
|
60
|
+
return undefined;
|
|
61
|
+
};
|
|
62
|
+
|
|
48
63
|
if (model) {
|
|
49
64
|
if (!(model instanceof OpenAiModel)) {
|
|
50
|
-
console.warn(`
|
|
65
|
+
console.warn(`OpenAI: model ${modelDescription.id} is not an OpenAI model`);
|
|
51
66
|
continue;
|
|
52
67
|
}
|
|
53
|
-
if (!modelDescription.url) {
|
|
54
|
-
// This seems to be an official model, but it was already created. This can happen during the initializing of more than one frontend.
|
|
55
|
-
console.info(`Open AI: skip creating model ${modelDescription.id} because it already exists`);
|
|
56
|
-
continue;
|
|
57
|
-
}
|
|
58
|
-
model.url = modelDescription.url;
|
|
59
68
|
model.model = modelDescription.model;
|
|
69
|
+
model.enableStreaming = modelDescription.enableStreaming;
|
|
70
|
+
model.url = modelDescription.url;
|
|
60
71
|
model.apiKey = apiKeyProvider;
|
|
72
|
+
model.apiVersion = apiVersionProvider;
|
|
73
|
+
model.supportsDeveloperMessage = modelDescription.supportsDeveloperMessage;
|
|
74
|
+
model.supportsStructuredOutput = modelDescription.supportsStructuredOutput;
|
|
75
|
+
model.defaultRequestSettings = modelDescription.defaultRequestSettings;
|
|
61
76
|
} else {
|
|
62
|
-
this.languageModelRegistry.addLanguageModels([
|
|
77
|
+
this.languageModelRegistry.addLanguageModels([
|
|
78
|
+
new OpenAiModel(
|
|
79
|
+
modelDescription.id,
|
|
80
|
+
modelDescription.model,
|
|
81
|
+
modelDescription.enableStreaming,
|
|
82
|
+
apiKeyProvider,
|
|
83
|
+
apiVersionProvider,
|
|
84
|
+
modelDescription.supportsDeveloperMessage,
|
|
85
|
+
modelDescription.supportsStructuredOutput,
|
|
86
|
+
modelDescription.url,
|
|
87
|
+
modelDescription.defaultRequestSettings
|
|
88
|
+
)
|
|
89
|
+
]);
|
|
63
90
|
}
|
|
64
91
|
}
|
|
65
92
|
}
|
|
@@ -75,4 +102,12 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana
|
|
|
75
102
|
this._apiKey = undefined;
|
|
76
103
|
}
|
|
77
104
|
}
|
|
105
|
+
|
|
106
|
+
setApiVersion(apiVersion: string | undefined): void {
|
|
107
|
+
if (apiVersion) {
|
|
108
|
+
this._apiVersion = apiVersion;
|
|
109
|
+
} else {
|
|
110
|
+
this._apiVersion = undefined;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
78
113
|
}
|