@deepseekdev/coder 1.0.81 → 1.0.83
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/contracts/agent-schemas.json +0 -152
- package/dist/contracts/unified-schema.json +19 -479
- package/dist/core/embeddingProviders.d.ts +2 -29
- package/dist/core/embeddingProviders.d.ts.map +1 -1
- package/dist/core/embeddingProviders.js +3 -101
- package/dist/core/embeddingProviders.js.map +1 -1
- package/dist/core/modelDiscovery.d.ts +1 -1
- package/dist/core/modelDiscovery.d.ts.map +1 -1
- package/dist/core/modelDiscovery.js +5 -349
- package/dist/core/modelDiscovery.js.map +1 -1
- package/dist/headless/interactiveShell.js +1 -1
- package/dist/headless/interactiveShell.js.map +1 -1
- package/dist/plugins/providers/index.d.ts.map +1 -1
- package/dist/plugins/providers/index.js +1 -7
- package/dist/plugins/providers/index.js.map +1 -1
- package/dist/ui/UnifiedUIRenderer.d.ts +3 -1
- package/dist/ui/UnifiedUIRenderer.d.ts.map +1 -1
- package/dist/ui/UnifiedUIRenderer.js +14 -16
- package/dist/ui/UnifiedUIRenderer.js.map +1 -1
- package/package.json +1 -1
- package/dist/plugins/providers/anthropic/index.d.ts +0 -9
- package/dist/plugins/providers/anthropic/index.d.ts.map +0 -1
- package/dist/plugins/providers/anthropic/index.js +0 -48
- package/dist/plugins/providers/anthropic/index.js.map +0 -1
- package/dist/plugins/providers/openai/index.d.ts +0 -10
- package/dist/plugins/providers/openai/index.d.ts.map +0 -1
- package/dist/plugins/providers/openai/index.js +0 -47
- package/dist/plugins/providers/openai/index.js.map +0 -1
- package/dist/plugins/providers/xai/index.d.ts +0 -10
- package/dist/plugins/providers/xai/index.d.ts.map +0 -1
- package/dist/plugins/providers/xai/index.js +0 -47
- package/dist/plugins/providers/xai/index.js.map +0 -1
|
@@ -3,82 +3,9 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Supports:
|
|
5
5
|
* - Simple (built-in, no dependencies)
|
|
6
|
-
* -
|
|
7
|
-
* - Ollama local embeddings
|
|
6
|
+
* - DeepSeek embeddings
|
|
8
7
|
* - Custom providers via interface
|
|
9
8
|
*/
|
|
10
|
-
export class OpenAIEmbeddingProvider {
|
|
11
|
-
name = 'openai';
|
|
12
|
-
dimension;
|
|
13
|
-
apiKey;
|
|
14
|
-
model;
|
|
15
|
-
baseUrl;
|
|
16
|
-
constructor(config) {
|
|
17
|
-
this.apiKey = config.apiKey;
|
|
18
|
-
this.model = config.model || 'text-embedding-3-small';
|
|
19
|
-
this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
|
|
20
|
-
// Set dimension based on model
|
|
21
|
-
switch (this.model) {
|
|
22
|
-
case 'text-embedding-3-large':
|
|
23
|
-
this.dimension = 3072;
|
|
24
|
-
break;
|
|
25
|
-
case 'text-embedding-ada-002':
|
|
26
|
-
this.dimension = 1536;
|
|
27
|
-
break;
|
|
28
|
-
case 'text-embedding-3-small':
|
|
29
|
-
default:
|
|
30
|
-
this.dimension = 1536;
|
|
31
|
-
break;
|
|
32
|
-
}
|
|
33
|
-
}
|
|
34
|
-
async embed(text) {
|
|
35
|
-
const response = await fetch(`${this.baseUrl}/embeddings`, {
|
|
36
|
-
method: 'POST',
|
|
37
|
-
headers: {
|
|
38
|
-
'Content-Type': 'application/json',
|
|
39
|
-
'Authorization': `Bearer ${this.apiKey}`,
|
|
40
|
-
},
|
|
41
|
-
body: JSON.stringify({
|
|
42
|
-
model: this.model,
|
|
43
|
-
input: text,
|
|
44
|
-
}),
|
|
45
|
-
});
|
|
46
|
-
if (!response.ok) {
|
|
47
|
-
const error = await response.text();
|
|
48
|
-
throw new Error(`OpenAI embedding failed: ${response.status} ${error}`);
|
|
49
|
-
}
|
|
50
|
-
const data = await response.json();
|
|
51
|
-
return data.data[0]?.embedding || [];
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
export class OllamaEmbeddingProvider {
|
|
55
|
-
name = 'ollama';
|
|
56
|
-
dimension = 4096; // Default for most Ollama models
|
|
57
|
-
model;
|
|
58
|
-
baseUrl;
|
|
59
|
-
constructor(config = {}) {
|
|
60
|
-
this.model = config.model || 'nomic-embed-text';
|
|
61
|
-
this.baseUrl = config.baseUrl || 'http://localhost:11434';
|
|
62
|
-
}
|
|
63
|
-
async embed(text) {
|
|
64
|
-
const response = await fetch(`${this.baseUrl}/api/embeddings`, {
|
|
65
|
-
method: 'POST',
|
|
66
|
-
headers: {
|
|
67
|
-
'Content-Type': 'application/json',
|
|
68
|
-
},
|
|
69
|
-
body: JSON.stringify({
|
|
70
|
-
model: this.model,
|
|
71
|
-
prompt: text,
|
|
72
|
-
}),
|
|
73
|
-
});
|
|
74
|
-
if (!response.ok) {
|
|
75
|
-
const error = await response.text();
|
|
76
|
-
throw new Error(`Ollama embedding failed: ${response.status} ${error}`);
|
|
77
|
-
}
|
|
78
|
-
const data = await response.json();
|
|
79
|
-
return data.embedding || [];
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
9
|
export class DeepSeekEmbeddingProvider {
|
|
83
10
|
name = 'deepseek';
|
|
84
11
|
dimension = 1024;
|
|
@@ -158,22 +85,6 @@ export class CachedEmbeddingProvider {
|
|
|
158
85
|
export function createEmbeddingProvider(options) {
|
|
159
86
|
let provider;
|
|
160
87
|
switch (options.type) {
|
|
161
|
-
case 'openai':
|
|
162
|
-
if (!options.apiKey) {
|
|
163
|
-
throw new Error('OpenAI embedding provider requires apiKey');
|
|
164
|
-
}
|
|
165
|
-
provider = new OpenAIEmbeddingProvider({
|
|
166
|
-
apiKey: options.apiKey,
|
|
167
|
-
model: options.model,
|
|
168
|
-
baseUrl: options.baseUrl,
|
|
169
|
-
});
|
|
170
|
-
break;
|
|
171
|
-
case 'ollama':
|
|
172
|
-
provider = new OllamaEmbeddingProvider({
|
|
173
|
-
model: options.model,
|
|
174
|
-
baseUrl: options.baseUrl,
|
|
175
|
-
});
|
|
176
|
-
break;
|
|
177
88
|
case 'deepseek':
|
|
178
89
|
if (!options.apiKey) {
|
|
179
90
|
throw new Error('DeepSeek embedding provider requires apiKey');
|
|
@@ -217,16 +128,8 @@ export function createEmbeddingProvider(options) {
|
|
|
217
128
|
* Auto-detect best available embedding provider
|
|
218
129
|
*/
|
|
219
130
|
export function autoDetectEmbeddingProvider() {
|
|
220
|
-
// Check for API
|
|
221
|
-
const openaiKey = process.env['OPENAI_API_KEY'];
|
|
131
|
+
// Check for DeepSeek API key in environment
|
|
222
132
|
const deepseekKey = process.env['DEEPSEEK_API_KEY'];
|
|
223
|
-
if (openaiKey) {
|
|
224
|
-
return createEmbeddingProvider({
|
|
225
|
-
type: 'openai',
|
|
226
|
-
apiKey: openaiKey,
|
|
227
|
-
useCache: true,
|
|
228
|
-
});
|
|
229
|
-
}
|
|
230
133
|
if (deepseekKey) {
|
|
231
134
|
return createEmbeddingProvider({
|
|
232
135
|
type: 'deepseek',
|
|
@@ -234,8 +137,7 @@ export function autoDetectEmbeddingProvider() {
|
|
|
234
137
|
useCache: true,
|
|
235
138
|
});
|
|
236
139
|
}
|
|
237
|
-
//
|
|
238
|
-
// Could add a ping check here, but for now just use simple
|
|
140
|
+
// Fall back to simple embedding provider
|
|
239
141
|
return createEmbeddingProvider({ type: 'simple' });
|
|
240
142
|
}
|
|
241
143
|
//# sourceMappingURL=embeddingProviders.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"embeddingProviders.js","sourceRoot":"","sources":["../../src/core/embeddingProviders.ts"],"names":[],"mappings":"AAAA
|
|
1
|
+
{"version":3,"file":"embeddingProviders.js","sourceRoot":"","sources":["../../src/core/embeddingProviders.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAaH,MAAM,OAAO,yBAAyB;IAC3B,IAAI,GAAG,UAAU,CAAC;IAClB,SAAS,GAAG,IAAI,CAAC;IAElB,MAAM,CAAS;IACf,OAAO,CAAS;IAExB,YAAY,MAA+B;QACzC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,CAAC;QAC5B,IAAI,CAAC,OAAO,GAAG,MAAM,CAAC,OAAO,IAAI,6BAA6B,CAAC;IACjE,CAAC;IAED,KAAK,CAAC,KAAK,CAAC,IAAY;QACtB,sCAAsC;QACtC,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,GAAG,IAAI,CAAC,OAAO,aAAa,EAAE;YACzD,MAAM,EAAE,MAAM;YACd,OAAO,EAAE;gBACP,cAAc,EAAE,kBAAkB;gBAClC,eAAe,EAAE,UAAU,IAAI,CAAC,MAAM,EAAE;aACzC;YACD,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC;gBACnB,KAAK,EAAE,oBAAoB;gBAC3B,KAAK,EAAE,IAAI;aACZ,CAAC;SACH,CAAC,CAAC;QAEH,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,CAAC;YACjB,MAAM,KAAK,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;YACpC,MAAM,IAAI,KAAK,CAAC,8BAA8B,QAAQ,CAAC,MAAM,IAAI,KAAK,EAAE,CAAC,CAAC;QAC5E,CAAC;QAED,MAAM,IAAI,GAAG,MAAM,QAAQ,CAAC,IAAI,EAE/B,CAAC;QAEF,OAAO,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,SAAS,IAAI,EAAE,CAAC;IACvC,CAAC;CACF;AAED,+EAA+E;AAC/E,oCAAoC;AACpC,+EAA+E;AAE/E,MAAM,OAAO,uBAAuB;IACzB,IAAI,CAAS;IACb,SAAS,CAAS;IAEnB,QAAQ,CAAoB;IAC5B,KAAK,GAA0B,IAAI,GAAG,EAAE,CAAC;IACzC,YAAY,CAAS;IAE7B,YAAY,QAA2B,EAAE,YAAY,GAAG,KAAK;QAC3D,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACzB,IAAI,CAAC,IAAI,GAAG,UAAU,QAAQ,CAAC,IAAI,EAAE,CAAC;QACtC,IAAI,CAAC,SAAS,GAAG,QAAQ,CAAC,SAAS,CAAC;QACpC,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;IACnC,CAAC;IAED,KAAK,CAAC,KAAK,CAAC,IAAY;QACtB,oBAAoB;QACpB,MAAM,MAAM,GAAG,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QACpC,IAAI,MAAM,EAAE,CAAC;YACX,OAAO,MAAM,CAAC;QAChB,CAAC;QAED,qBAAqB;QACrB,MAAM,SAAS,GAAG,MAAM,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;QAElD,eAAe;QACf,IAAI,IAAI,CAAC,KAAK,CAAC,IAAI,IAAI,IAAI,CAAC,YAAY,EAAE,CAAC;YACzC,kCAAkC;YAClC,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,EAAE,CAAC,IAAI,EAAE,CAAC,KAAK,CAAC;YAChD,IAAI,QAAQ;gBAAE,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC;QAC5C,CAAC;QACD,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,IAAI,EAAE,SAAS,CAAC,CAAC;QAEhC,OAAO,SAAS,CAAC;IACnB,CAAC;IAED,UAAU;QACR,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;IACrB,CAAC;IAED,aAAa;QACX,OAAO;YACL,IAAI,EAAE,IAAI,CAAC,KAAK,CAAC,IAAI;YACrB,OAAO,EAAE,IAAI,CAAC,YAAY;SAC3B,CAAC;IACJ,CAAC;CACF;AAiBD;;GAEG;AACH,MAAM,UAAU,uBAAuB,CAAC,OAAiC;IACvE,IAAI,QAA2B,CAAC;IAEhC,QAAQ,OAAO,CAAC,IAAI,EAAE,CAAC;QACrB,KAAK,UAAU;YACb,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,CAAC;gBACpB,MAAM,IAAI,KAAK,CAAC,6CAA6C,CAAC,CAAC;YACjE,CAAC;YACD,QAAQ,GAAG,IAAI,yBAAyB,CAAC;gBACvC,MAAM,EAAE,OAAO,CAAC,MAAM;gBACtB,OAAO,EAAE,OAAO,CAAC,OAAO;aACzB,CAAC,CAAC;YACH,MAAM;QAER,KAAK,QAAQ,CAAC;QACd;YACE,kDAAkD;YAClD,iDAAiD;YACjD,OAAO;gBACL,IAAI,EAAE,QAAQ;gBACd,SAAS,EAAE,GAAG;gBACd,KAAK,EAAE,KAAK,EAAE,IAAY,EAAE,EAAE;oBAC5B,sEAAsE;oBACtE,MAAM,MAAM,GAAG,IAAI,CAAC,WAAW,EAAE,CAAC,OAAO,CAAC,cAAc,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;oBACtG,MAAM,MAAM,GAAG,IAAI,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;oBACtC,KAAK,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;wBAC3B,IAAI,IAAI,GAAG,CAAC,CAAC;wBACb,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;4BACtC,IAAI,GAAG,CAAC,CAAC,IAAI,IAAI,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;4BAClD,IAAI,GAAG,IAAI,GAAG,IAAI,CAAC;wBACrB,CAAC;wBACD,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC,CAAC;oBACpC,CAAC;oBACD,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;oBAC7D,OAAO,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;gBACrD,CAAC;aACF,CAAC;IACN,CAAC;IAED,+BAA+B;IAC/B,IAAI,OAAO,CAAC,QAAQ,EAAE,CAAC;QACrB,OAAO,IAAI,uBAAuB,CAAC,QAAQ,EAAE,OAAO,CAAC,YAAY,CAAC,CAAC;IACrE,CAAC;IAED,OAAO,QAAQ,CAAC;AAClB,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,2BAA2B;IACzC,4CAA4C;IAC5C,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,CAAC,kBAAkB,CAAC,CAAC;IAEpD,IAAI,WAAW,EAAE,CAAC;QAChB,OAAO,uBAAuB,CAAC;YAC7B,IAAI,EAAE,UAAU;YAChB,MAAM,EAAE,WAAW;YACnB,QAAQ,EAAE,IAAI;SACf,CAAC,CAAC;IACL,CAAC;IAED,yCAAyC;IACzC,OAAO,uBAAuB,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,CAAC,CAAC;AACrD,CAAC"}
|
|
@@ -65,7 +65,7 @@ export declare function sortModelsByPriority(provider: ProviderId, models: strin
|
|
|
65
65
|
*/
|
|
66
66
|
export declare function getBestModel(provider: ProviderId, models: string[]): string;
|
|
67
67
|
/**
|
|
68
|
-
* Check if a provider is configured (has API key
|
|
68
|
+
* Check if a provider is configured (has API key)
|
|
69
69
|
*/
|
|
70
70
|
export declare function isProviderConfigured(providerId: ProviderId): boolean;
|
|
71
71
|
/**
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"modelDiscovery.d.ts","sourceRoot":"","sources":["../../src/core/modelDiscovery.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAMH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,YAAY,CAAC;AAC7C,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,wBAAwB,CAAC;
|
|
1
|
+
{"version":3,"file":"modelDiscovery.d.ts","sourceRoot":"","sources":["../../src/core/modelDiscovery.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAMH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,YAAY,CAAC;AAC7C,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,wBAAwB,CAAC;AA4B1D;;GAEG;AACH,wBAAgB,wBAAwB,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI,GAAG,SAAS,GAAG,UAAU,GAAG,IAAI,CAS9F;AAED;;GAEG;AACH,MAAM,WAAW,uBAAuB;IACtC,QAAQ,EAAE,UAAU,CAAC;IACrB,OAAO,EAAE,OAAO,CAAC;IACjB,MAAM,EAAE,WAAW,EAAE,CAAC;IACtB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,uBAAuB,EAAE,CAAC;IACnC,qBAAqB,EAAE,MAAM,CAAC;IAC9B,MAAM,EAAE,MAAM,EAAE,CAAC;CAClB;AAED;;GAEG;AACH,wBAAgB,yBAAyB,IAAI,WAAW,EAAE,CAqBzD;AA+DD;;;;;GAKG;AACH,wBAAsB,iBAAiB,IAAI,OAAO,CAAC,eAAe,CAAC,CAsFlE;AAED;;GAEG;AACH,wBAAgB,0BAA0B,IAAI,IAAI,CAQjD;AAMD;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B,EAAE,EAAE,UAAU,CAAC;IACf,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,EAAE,OAAO,CAAC;IACpB,WAAW,EAAE,MAAM,CAAC;IACpB,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;CACnB;AAuDD;;GAEG;AACH,wBAAgB,oBAAoB,CAAC,QAAQ,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,EAAE,GAAG,MAAM,EAAE,CAMrF;AAED;;GAEG;AACH,wBAAgB,YAAY,CAAC,QAAQ,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,EAAE,GAAG,MAAM,CAQ3E;AAED;;GAEG;AACH,wBAAgB,oBAAoB,CAAC,UAAU,EAAE,UAAU,GAAG,OAAO,CAmBpE;AAED;;GAEG;AACH,wBAAgB,kBAAkB,IAAI,YAAY,EAAE,CAenD;AAED;;GAEG;AACH,wBAAgB,sBAAsB,IAAI,YAAY,EAAE,CAEvD;AAED;;GAEG;AACH,wBAAgB,wBAAwB,IAAI,YAAY,EAAE,CAEzD;AAED;;GAEG;AACH,wBAAgB,yBAAyB,IAAI,YAAY,GAAG,IAAI,CAc/D;AAED;;GAEG;AACH,wBAAgB,yBAAyB,CAAC,UAAU,EAAE,UAAU,GAAG,MAAM,CAaxE;AAED;;GAEG;AACH,MAAM,WAAW,mBAAmB;IAClC,QAAQ,EAAE,UAAU,CAAC;IACrB,SAAS,EAAE,OAAO,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AA0BD;;;GAGG;AACH,wBAAsB,mBAAmB,IAAI,OAAO,CAAC,mBAAmB,EAAE,CAAC,CAgD1E"}
|
|
@@ -21,12 +21,7 @@ const CACHE_FILE = join(CACHE_DIR, 'discovered-models.json');
|
|
|
21
21
|
*/
|
|
22
22
|
const CACHE_EXPIRATION_MS = 24 * 60 * 60 * 1000;
|
|
23
23
|
const MODEL_PROVIDER_HINTS = [
|
|
24
|
-
{ provider: 'openai', patterns: [/^gpt/i, /^o1/i, /^text-embedding/i] },
|
|
25
|
-
{ provider: 'anthropic', patterns: [/^claude/i] },
|
|
26
|
-
{ provider: 'google', patterns: [/^gemini/i] },
|
|
27
24
|
{ provider: 'deepseek', patterns: [/^deepseek/i] },
|
|
28
|
-
{ provider: 'ollama', patterns: [/^llama/i, /^mistral/i, /^gemma/i, /^phi/i, /^vicuna/i] },
|
|
29
|
-
{ provider: 'xai', patterns: [/^grok/i] },
|
|
30
25
|
];
|
|
31
26
|
/**
|
|
32
27
|
* Infer provider from a model identifier.
|
|
@@ -82,153 +77,6 @@ async function saveDiscoveredModels(models) {
|
|
|
82
77
|
logDebug('Failed to save discovered models cache:', safeErrorMessage(error));
|
|
83
78
|
}
|
|
84
79
|
}
|
|
85
|
-
/**
|
|
86
|
-
* Discover models from OpenAI
|
|
87
|
-
*/
|
|
88
|
-
async function discoverOpenAIModels(apiKey) {
|
|
89
|
-
const provider = 'openai';
|
|
90
|
-
try {
|
|
91
|
-
const response = await fetch('https://api.openai.com/v1/models', {
|
|
92
|
-
headers: {
|
|
93
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
94
|
-
},
|
|
95
|
-
});
|
|
96
|
-
if (!response.ok) {
|
|
97
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
98
|
-
}
|
|
99
|
-
const data = await response.json();
|
|
100
|
-
// Filter for GPT models only and create ModelConfig objects
|
|
101
|
-
const models = data.data
|
|
102
|
-
.filter(model => {
|
|
103
|
-
// Allow both dash and non-dash variants for newer reasoning SKUs (o1/o3)
|
|
104
|
-
return (model.id.startsWith('gpt-') ||
|
|
105
|
-
model.id.startsWith('o1') ||
|
|
106
|
-
model.id.startsWith('o3') ||
|
|
107
|
-
model.id.startsWith('codex-'));
|
|
108
|
-
})
|
|
109
|
-
.map(model => ({
|
|
110
|
-
id: model.id,
|
|
111
|
-
label: model.id,
|
|
112
|
-
provider,
|
|
113
|
-
description: `OpenAI ${model.id} (auto-discovered)`,
|
|
114
|
-
capabilities: ['chat', 'tools', 'streaming'],
|
|
115
|
-
}));
|
|
116
|
-
return {
|
|
117
|
-
provider,
|
|
118
|
-
success: true,
|
|
119
|
-
models,
|
|
120
|
-
};
|
|
121
|
-
}
|
|
122
|
-
catch (error) {
|
|
123
|
-
return {
|
|
124
|
-
provider,
|
|
125
|
-
success: false,
|
|
126
|
-
models: [],
|
|
127
|
-
error: error instanceof Error ? error.message : String(error),
|
|
128
|
-
};
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
/**
|
|
132
|
-
* Discover models from Anthropic
|
|
133
|
-
*/
|
|
134
|
-
async function discoverAnthropicModels(apiKey) {
|
|
135
|
-
const provider = 'anthropic';
|
|
136
|
-
try {
|
|
137
|
-
const response = await fetch('https://api.anthropic.com/v1/models', {
|
|
138
|
-
headers: {
|
|
139
|
-
'x-api-key': apiKey,
|
|
140
|
-
'anthropic-version': '2023-06-01',
|
|
141
|
-
},
|
|
142
|
-
});
|
|
143
|
-
if (!response.ok) {
|
|
144
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
145
|
-
}
|
|
146
|
-
const data = await response.json();
|
|
147
|
-
const models = data.data
|
|
148
|
-
.filter(model => model.type === 'model')
|
|
149
|
-
.map(model => ({
|
|
150
|
-
id: model.id,
|
|
151
|
-
label: model.display_name || model.id,
|
|
152
|
-
provider,
|
|
153
|
-
description: `Anthropic ${model.display_name || model.id} (auto-discovered)`,
|
|
154
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming'],
|
|
155
|
-
}));
|
|
156
|
-
return {
|
|
157
|
-
provider,
|
|
158
|
-
success: true,
|
|
159
|
-
models,
|
|
160
|
-
};
|
|
161
|
-
}
|
|
162
|
-
catch (error) {
|
|
163
|
-
return {
|
|
164
|
-
provider,
|
|
165
|
-
success: false,
|
|
166
|
-
models: [],
|
|
167
|
-
error: error instanceof Error ? error.message : String(error),
|
|
168
|
-
};
|
|
169
|
-
}
|
|
170
|
-
}
|
|
171
|
-
/**
|
|
172
|
-
* Discover models from Google Gemini
|
|
173
|
-
* Note: Google's models API often requires special permissions.
|
|
174
|
-
* Falls back to known models if API access fails.
|
|
175
|
-
*/
|
|
176
|
-
async function discoverGoogleModels(apiKey) {
|
|
177
|
-
const provider = 'google';
|
|
178
|
-
// Known Google Gemini models (fallback if API doesn't work) - Updated Dec 2025
|
|
179
|
-
const knownModels = [
|
|
180
|
-
{ id: 'gemini-3.0-pro', label: 'Gemini 3.0 Pro', provider, description: 'Latest Gemini with best reasoning and coding', capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'] },
|
|
181
|
-
{ id: 'gemini-3.0-flash', label: 'Gemini 3.0 Flash', provider, description: 'Fast Gemini 3.0 model', capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'] },
|
|
182
|
-
{ id: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro', provider, description: 'Capable Gemini model with advanced reasoning', capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'] },
|
|
183
|
-
{ id: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash', provider, description: 'Fast Gemini 2.5 with reasoning', capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'] },
|
|
184
|
-
{ id: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash', provider, description: 'Gemini 2.0 with native tool use', capabilities: ['chat', 'tools', 'streaming', 'multimodal'] },
|
|
185
|
-
];
|
|
186
|
-
try {
|
|
187
|
-
// SECURITY: Use x-goog-api-key header instead of URL query parameter
|
|
188
|
-
// to prevent API key leakage in logs, error messages, and browser history
|
|
189
|
-
const response = await fetch('https://generativelanguage.googleapis.com/v1beta/models', {
|
|
190
|
-
headers: {
|
|
191
|
-
'x-goog-api-key': apiKey,
|
|
192
|
-
},
|
|
193
|
-
signal: AbortSignal.timeout(24 * 60 * 60 * 1000),
|
|
194
|
-
});
|
|
195
|
-
if (!response.ok) {
|
|
196
|
-
// API access restricted - return known models with warning
|
|
197
|
-
return {
|
|
198
|
-
provider,
|
|
199
|
-
success: true, // Consider it success with fallback
|
|
200
|
-
models: knownModels,
|
|
201
|
-
};
|
|
202
|
-
}
|
|
203
|
-
const data = await response.json();
|
|
204
|
-
const models = data.models
|
|
205
|
-
.filter(model => model.name.includes('gemini') &&
|
|
206
|
-
model.supportedGenerationMethods?.includes('generateContent'))
|
|
207
|
-
.map(model => {
|
|
208
|
-
const id = model.name.replace('models/', '');
|
|
209
|
-
return {
|
|
210
|
-
id,
|
|
211
|
-
label: id,
|
|
212
|
-
provider,
|
|
213
|
-
description: `${model.displayName} (auto-discovered)`,
|
|
214
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'],
|
|
215
|
-
};
|
|
216
|
-
});
|
|
217
|
-
return {
|
|
218
|
-
provider,
|
|
219
|
-
success: true,
|
|
220
|
-
models: models.length > 0 ? models : knownModels,
|
|
221
|
-
};
|
|
222
|
-
}
|
|
223
|
-
catch {
|
|
224
|
-
// Network error or timeout - return known models
|
|
225
|
-
return {
|
|
226
|
-
provider,
|
|
227
|
-
success: true,
|
|
228
|
-
models: knownModels,
|
|
229
|
-
};
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
80
|
/**
|
|
233
81
|
* Discover models from DeepSeek (OpenAI-compatible)
|
|
234
82
|
*/
|
|
@@ -266,126 +114,6 @@ async function discoverDeepSeekModels(apiKey) {
|
|
|
266
114
|
};
|
|
267
115
|
}
|
|
268
116
|
}
|
|
269
|
-
/**
|
|
270
|
-
* Discover models from xAI (OpenAI-compatible)
|
|
271
|
-
*/
|
|
272
|
-
async function discoverXAIModels(apiKey) {
|
|
273
|
-
const provider = 'xai';
|
|
274
|
-
try {
|
|
275
|
-
const response = await fetch('https://api.x.ai/v1/models', {
|
|
276
|
-
headers: {
|
|
277
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
278
|
-
},
|
|
279
|
-
});
|
|
280
|
-
if (!response.ok) {
|
|
281
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
282
|
-
}
|
|
283
|
-
const data = await response.json();
|
|
284
|
-
const models = data.data.map(model => ({
|
|
285
|
-
id: model.id,
|
|
286
|
-
label: model.id,
|
|
287
|
-
provider,
|
|
288
|
-
description: `xAI ${model.id} (auto-discovered)`,
|
|
289
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming'],
|
|
290
|
-
}));
|
|
291
|
-
return {
|
|
292
|
-
provider,
|
|
293
|
-
success: true,
|
|
294
|
-
models,
|
|
295
|
-
};
|
|
296
|
-
}
|
|
297
|
-
catch (error) {
|
|
298
|
-
return {
|
|
299
|
-
provider,
|
|
300
|
-
success: false,
|
|
301
|
-
models: [],
|
|
302
|
-
error: error instanceof Error ? error.message : String(error),
|
|
303
|
-
};
|
|
304
|
-
}
|
|
305
|
-
}
|
|
306
|
-
/**
|
|
307
|
-
* Discover models from Ollama (local)
|
|
308
|
-
*/
|
|
309
|
-
async function discoverOllamaModels() {
|
|
310
|
-
const provider = 'ollama';
|
|
311
|
-
const baseURL = process.env['OLLAMA_BASE_URL'] || 'http://localhost:11434';
|
|
312
|
-
try {
|
|
313
|
-
const response = await fetch(`${baseURL}/api/tags`, {
|
|
314
|
-
signal: AbortSignal.timeout(24 * 60 * 60 * 1000), // 24 hour timeout
|
|
315
|
-
});
|
|
316
|
-
if (!response.ok) {
|
|
317
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
318
|
-
}
|
|
319
|
-
const data = await response.json();
|
|
320
|
-
const models = (data.models || []).map(model => ({
|
|
321
|
-
id: model.name,
|
|
322
|
-
label: model.name,
|
|
323
|
-
provider,
|
|
324
|
-
description: `Local Ollama model: ${model.name} (auto-discovered)`,
|
|
325
|
-
capabilities: ['chat', 'tools', 'streaming'],
|
|
326
|
-
}));
|
|
327
|
-
return {
|
|
328
|
-
provider,
|
|
329
|
-
success: true,
|
|
330
|
-
models,
|
|
331
|
-
};
|
|
332
|
-
}
|
|
333
|
-
catch (error) {
|
|
334
|
-
return {
|
|
335
|
-
provider,
|
|
336
|
-
success: false,
|
|
337
|
-
models: [],
|
|
338
|
-
error: error instanceof Error ? error.message : String(error),
|
|
339
|
-
};
|
|
340
|
-
}
|
|
341
|
-
}
|
|
342
|
-
/**
|
|
343
|
-
* Discover models from Qwen (Alibaba Cloud DashScope - OpenAI-compatible)
|
|
344
|
-
*/
|
|
345
|
-
async function discoverQwenModels(apiKey) {
|
|
346
|
-
const provider = 'qwen';
|
|
347
|
-
try {
|
|
348
|
-
// DashScope uses OpenAI-compatible API
|
|
349
|
-
const response = await fetch('https://dashscope.aliyuncs.com/compatible-mode/v1/models', {
|
|
350
|
-
headers: {
|
|
351
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
352
|
-
},
|
|
353
|
-
});
|
|
354
|
-
if (!response.ok) {
|
|
355
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
356
|
-
}
|
|
357
|
-
const data = await response.json();
|
|
358
|
-
// Filter for Qwen models and prioritize latest versions
|
|
359
|
-
const qwenModels = data.data
|
|
360
|
-
.filter(model => model.id.toLowerCase().includes('qwen'))
|
|
361
|
-
.map(model => ({
|
|
362
|
-
id: model.id,
|
|
363
|
-
label: model.id,
|
|
364
|
-
provider,
|
|
365
|
-
description: `Alibaba Qwen ${model.id} (auto-discovered)`,
|
|
366
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming'],
|
|
367
|
-
}));
|
|
368
|
-
return {
|
|
369
|
-
provider,
|
|
370
|
-
success: true,
|
|
371
|
-
models: qwenModels,
|
|
372
|
-
};
|
|
373
|
-
}
|
|
374
|
-
catch (error) {
|
|
375
|
-
// Fallback to known models if API fails
|
|
376
|
-
const fallbackModels = [
|
|
377
|
-
{ id: 'qwen-max', label: 'Qwen Max', provider, description: 'Qwen Max - most capable', capabilities: ['chat', 'reasoning', 'tools', 'streaming'] },
|
|
378
|
-
{ id: 'qwen-plus', label: 'Qwen Plus', provider, description: 'Qwen Plus - balanced', capabilities: ['chat', 'reasoning', 'tools', 'streaming'] },
|
|
379
|
-
{ id: 'qwen-turbo', label: 'Qwen Turbo', provider, description: 'Qwen Turbo - fast', capabilities: ['chat', 'tools', 'streaming'] },
|
|
380
|
-
];
|
|
381
|
-
return {
|
|
382
|
-
provider,
|
|
383
|
-
success: false,
|
|
384
|
-
models: fallbackModels,
|
|
385
|
-
error: error instanceof Error ? error.message : String(error),
|
|
386
|
-
};
|
|
387
|
-
}
|
|
388
|
-
}
|
|
389
117
|
/**
|
|
390
118
|
* Discover models from all configured providers
|
|
391
119
|
*
|
|
@@ -434,7 +162,7 @@ export async function discoverAllModels() {
|
|
|
434
162
|
// Promise rejected (shouldn't happen with our error handling, but be safe)
|
|
435
163
|
const providerId = index < providers.length
|
|
436
164
|
? providers[index].id
|
|
437
|
-
: '
|
|
165
|
+
: 'deepseek';
|
|
438
166
|
return {
|
|
439
167
|
provider: providerId,
|
|
440
168
|
success: false,
|
|
@@ -448,13 +176,6 @@ export async function discoverAllModels() {
|
|
|
448
176
|
totalModelsDiscovered += result.models.length;
|
|
449
177
|
}
|
|
450
178
|
else if (result.error) {
|
|
451
|
-
// Don't add Ollama connection errors (it's often not running)
|
|
452
|
-
if (result.provider === 'ollama' &&
|
|
453
|
-
(result.error.includes('ECONNREFUSED') ||
|
|
454
|
-
result.error.includes('fetch failed') ||
|
|
455
|
-
result.error.includes('Connection failed'))) {
|
|
456
|
-
continue;
|
|
457
|
-
}
|
|
458
179
|
errors.push(`${result.provider}: ${result.error}`);
|
|
459
180
|
}
|
|
460
181
|
}
|
|
@@ -508,29 +229,6 @@ const MODEL_PRIORITIES = {
|
|
|
508
229
|
'deepseek-chat': 90,
|
|
509
230
|
'deepseek-coder': 85,
|
|
510
231
|
},
|
|
511
|
-
openai: {
|
|
512
|
-
'gpt-5.2-pro': 110,
|
|
513
|
-
'gpt-5.2-codex': 105,
|
|
514
|
-
'gpt-5.2-codex-mini': 100,
|
|
515
|
-
'gpt-5': 95,
|
|
516
|
-
'gpt-4': 90,
|
|
517
|
-
'o3': 85,
|
|
518
|
-
'o1': 80,
|
|
519
|
-
},
|
|
520
|
-
anthropic: {
|
|
521
|
-
'claude-opus-4-5': 110,
|
|
522
|
-
'claude-sonnet-4-5': 105,
|
|
523
|
-
'claude-haiku-4-5': 100,
|
|
524
|
-
'claude-3': 90,
|
|
525
|
-
},
|
|
526
|
-
xai: {
|
|
527
|
-
'grok-4-1-fast-reasoning': 100,
|
|
528
|
-
'grok-4': 90,
|
|
529
|
-
},
|
|
530
|
-
google: {
|
|
531
|
-
'gemini-3.0-pro': 100,
|
|
532
|
-
'gemini-2.5-flash': 90,
|
|
533
|
-
},
|
|
534
232
|
};
|
|
535
233
|
/**
|
|
536
234
|
* Get model priority for sorting
|
|
@@ -573,18 +271,12 @@ export function getBestModel(provider, models) {
|
|
|
573
271
|
return sorted[0] ?? models[0] ?? '';
|
|
574
272
|
}
|
|
575
273
|
/**
|
|
576
|
-
* Check if a provider is configured (has API key
|
|
274
|
+
* Check if a provider is configured (has API key)
|
|
577
275
|
*/
|
|
578
276
|
export function isProviderConfigured(providerId) {
|
|
579
277
|
const config = PROVIDER_CONFIGS.find(p => p.id === providerId);
|
|
580
278
|
if (!config)
|
|
581
279
|
return false;
|
|
582
|
-
// Ollama is special - it's available if the server is running (no API key needed)
|
|
583
|
-
if (providerId === 'ollama') {
|
|
584
|
-
// We'll check this via actual connection, return true for now
|
|
585
|
-
// The actual check happens in getConfiguredProviders
|
|
586
|
-
return true;
|
|
587
|
-
}
|
|
588
280
|
// Check main env var
|
|
589
281
|
if (process.env[config.envVar]) {
|
|
590
282
|
return true;
|
|
@@ -604,17 +296,9 @@ export function isProviderConfigured(providerId) {
|
|
|
604
296
|
*/
|
|
605
297
|
export function getProvidersStatus() {
|
|
606
298
|
return PROVIDER_CONFIGS.map(config => {
|
|
607
|
-
let configured =
|
|
608
|
-
if (config.
|
|
609
|
-
|
|
610
|
-
// Mark as potentially available
|
|
611
|
-
configured = !!process.env['OLLAMA_BASE_URL'] || true; // Always show Ollama as option
|
|
612
|
-
}
|
|
613
|
-
else {
|
|
614
|
-
configured = !!process.env[config.envVar];
|
|
615
|
-
if (!configured && config.altEnvVars) {
|
|
616
|
-
configured = config.altEnvVars.some(v => !!process.env[v]);
|
|
617
|
-
}
|
|
299
|
+
let configured = !!process.env[config.envVar];
|
|
300
|
+
if (!configured && config.altEnvVars) {
|
|
301
|
+
configured = config.altEnvVars.some(v => !!process.env[v]);
|
|
618
302
|
}
|
|
619
303
|
return {
|
|
620
304
|
id: config.id,
|
|
@@ -695,34 +379,6 @@ async function quickFetchProviderModels(providerId, apiKey, timeoutMs = 24 * 60
|
|
|
695
379
|
export async function quickCheckProviders() {
|
|
696
380
|
const checks = [];
|
|
697
381
|
for (const config of PROVIDER_CONFIGS) {
|
|
698
|
-
// Handle Ollama separately (no API key needed)
|
|
699
|
-
if (config.id === 'ollama') {
|
|
700
|
-
checks.push((async () => {
|
|
701
|
-
try {
|
|
702
|
-
const baseURL = process.env['OLLAMA_BASE_URL'] || 'http://localhost:11434';
|
|
703
|
-
const response = await fetch(`${baseURL}/api/tags`, {
|
|
704
|
-
signal: AbortSignal.timeout(24 * 60 * 60 * 1000),
|
|
705
|
-
});
|
|
706
|
-
if (response.ok) {
|
|
707
|
-
const data = await response.json();
|
|
708
|
-
const models = data.models?.map(m => m.name) || [];
|
|
709
|
-
return {
|
|
710
|
-
provider: 'ollama',
|
|
711
|
-
available: models.length > 0,
|
|
712
|
-
latestModel: models[0] || config.defaultLatestModel,
|
|
713
|
-
};
|
|
714
|
-
}
|
|
715
|
-
}
|
|
716
|
-
catch { /* ignore */ }
|
|
717
|
-
return {
|
|
718
|
-
provider: 'ollama',
|
|
719
|
-
available: false,
|
|
720
|
-
latestModel: config.defaultLatestModel,
|
|
721
|
-
error: 'Not running',
|
|
722
|
-
};
|
|
723
|
-
})());
|
|
724
|
-
continue;
|
|
725
|
-
}
|
|
726
382
|
// Check for API key
|
|
727
383
|
let apiKey = process.env[config.envVar];
|
|
728
384
|
if (!apiKey && config.altEnvVars) {
|