ohlcv-ai 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +202 -0
- package/README.md +3 -0
- package/dist/aliyun/index.d.ts.map +1 -0
- package/dist/aliyun/model.d.ts.map +1 -0
- package/dist/deepseek/index.d.ts.map +1 -0
- package/dist/deepseek/model.d.ts.map +1 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +40 -0
- package/dist/openai/index.d.ts.map +1 -0
- package/dist/openai/model.d.ts.map +1 -0
- package/dist/types.d.ts.map +1 -0
- package/package.json +42 -0
- package/src/aliyun/index.ts +446 -0
- package/src/aliyun/model.ts +475 -0
- package/src/deepseek/index.ts +479 -0
- package/src/deepseek/model.ts +455 -0
- package/src/index.ts +50 -0
- package/src/openai/index.ts +766 -0
- package/src/openai/model.ts +430 -0
- package/src/types.ts +12 -0
- package/tsconfig.json +26 -0
|
@@ -0,0 +1,430 @@
|
|
|
1
|
+
export enum OpenAIModelType {
|
|
2
|
+
// GPT-4 Series
|
|
3
|
+
GPT4 = 'gpt-4',
|
|
4
|
+
GPT4_0314 = 'gpt-4-0314',
|
|
5
|
+
GPT4_0613 = 'gpt-4-0613',
|
|
6
|
+
GPT4_32K = 'gpt-4-32k',
|
|
7
|
+
GPT4_32K_0314 = 'gpt-4-32k-0314',
|
|
8
|
+
GPT4_32K_0613 = 'gpt-4-32k-0613',
|
|
9
|
+
GPT4_TURBO = 'gpt-4-turbo',
|
|
10
|
+
GPT4_TURBO_PREVIEW = 'gpt-4-turbo-preview',
|
|
11
|
+
GPT4_TURBO_2024_04_09 = 'gpt-4-turbo-2024-04-09',
|
|
12
|
+
GPT4_OMNI = 'gpt-4o',
|
|
13
|
+
GPT4_OMNI_2024_05_13 = 'gpt-4o-2024-05-13',
|
|
14
|
+
GPT4_OMNI_MINI = 'gpt-4o-mini',
|
|
15
|
+
GPT4_OMNI_MINI_2024_07_18 = 'gpt-4o-mini-2024-07-18',
|
|
16
|
+
// GPT-3.5 Series
|
|
17
|
+
GPT3_5_TURBO = 'gpt-3.5-turbo',
|
|
18
|
+
GPT3_5_TURBO_0125 = 'gpt-3.5-turbo-0125',
|
|
19
|
+
GPT3_5_TURBO_1106 = 'gpt-3.5-turbo-1106',
|
|
20
|
+
GPT3_5_TURBO_INSTRUCT = 'gpt-3.5-turbo-instruct',
|
|
21
|
+
GPT3_5_TURBO_16K = 'gpt-3.5-turbo-16k',
|
|
22
|
+
GPT3_5_TURBO_16K_0613 = 'gpt-3.5-turbo-16k-0613',
|
|
23
|
+
// GPT-3 Series
|
|
24
|
+
DAVINCI_002 = 'davinci-002',
|
|
25
|
+
BABBAGE_002 = 'babbage-002',
|
|
26
|
+
TEXT_DAVINCI_003 = 'text-davinci-003',
|
|
27
|
+
TEXT_DAVINCI_002 = 'text-davinci-002',
|
|
28
|
+
TEXT_DAVINCI_001 = 'text-davinci-001',
|
|
29
|
+
TEXT_CURIE_001 = 'text-curie-001',
|
|
30
|
+
TEXT_BABBAGE_001 = 'text-babbage-001',
|
|
31
|
+
TEXT_ADA_001 = 'text-ada-001',
|
|
32
|
+
// Embedding Models
|
|
33
|
+
TEXT_EMBEDDING_ADA_002 = 'text-embedding-ada-002',
|
|
34
|
+
TEXT_EMBEDDING_3_SMALL = 'text-embedding-3-small',
|
|
35
|
+
TEXT_EMBEDDING_3_LARGE = 'text-embedding-3-large',
|
|
36
|
+
// DALL-E Image Generation
|
|
37
|
+
DALL_E_2 = 'dall-e-2',
|
|
38
|
+
DALL_E_3 = 'dall-e-3',
|
|
39
|
+
// Whisper Audio
|
|
40
|
+
WHISPER_1 = 'whisper-1',
|
|
41
|
+
// TTS Text-to-Speech
|
|
42
|
+
TTS_1 = 'tts-1',
|
|
43
|
+
TTS_1_HD = 'tts-1-hd',
|
|
44
|
+
// Moderation Models
|
|
45
|
+
MODERATION_LATEST = 'text-moderation-latest',
|
|
46
|
+
MODERATION_STABLE = 'text-moderation-stable',
|
|
47
|
+
|
|
48
|
+
// Fine-tuned Models
|
|
49
|
+
GPT3_5_TURBO_FINETUNED = 'ft:gpt-3.5-turbo-0125:personal:',
|
|
50
|
+
GPT4_FINETUNED = 'ft:gpt-4-0125-preview:personal:',
|
|
51
|
+
|
|
52
|
+
// Vision Models
|
|
53
|
+
GPT4_VISION_PREVIEW = 'gpt-4-vision-preview',
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export interface OpenAIModel {
|
|
57
|
+
name: string;
|
|
58
|
+
displayName: string;
|
|
59
|
+
endpoint: string;
|
|
60
|
+
format: 'openai';
|
|
61
|
+
description: string;
|
|
62
|
+
maxTokens?: number;
|
|
63
|
+
contextLength?: number;
|
|
64
|
+
capabilities: string[];
|
|
65
|
+
inputCostPer1KTokens?: number; // Input token cost (USD per 1K tokens)
|
|
66
|
+
outputCostPer1KTokens?: number; // Output token cost (USD per 1K tokens)
|
|
67
|
+
supportedFeatures?: string[]; // Supported API features
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
export const OPENAI_MODELS: Map<OpenAIModelType, OpenAIModel> = new Map([
|
|
71
|
+
// GPT-4 Series
|
|
72
|
+
[
|
|
73
|
+
OpenAIModelType.GPT4,
|
|
74
|
+
{
|
|
75
|
+
name: OpenAIModelType.GPT4,
|
|
76
|
+
displayName: 'GPT-4',
|
|
77
|
+
endpoint: 'https://api.openai.com/v1/chat/completions',
|
|
78
|
+
format: 'openai',
|
|
79
|
+
description: 'Powerful multi-purpose model for complex tasks',
|
|
80
|
+
maxTokens: 8192,
|
|
81
|
+
contextLength: 8192,
|
|
82
|
+
capabilities: ['chat', 'text-generation', 'reasoning', 'analysis'],
|
|
83
|
+
inputCostPer1KTokens: 0.03,
|
|
84
|
+
outputCostPer1KTokens: 0.06,
|
|
85
|
+
supportedFeatures: ['chat', 'function-calling']
|
|
86
|
+
}
|
|
87
|
+
],
|
|
88
|
+
[
|
|
89
|
+
OpenAIModelType.GPT4_TURBO,
|
|
90
|
+
{
|
|
91
|
+
name: OpenAIModelType.GPT4_TURBO,
|
|
92
|
+
displayName: 'GPT-4 Turbo',
|
|
93
|
+
endpoint: 'https://api.openai.com/v1/chat/completions',
|
|
94
|
+
format: 'openai',
|
|
95
|
+
description: 'Enhanced GPT-4 with 128K context, knowledge cutoff April 2023',
|
|
96
|
+
maxTokens: 4096,
|
|
97
|
+
contextLength: 128000,
|
|
98
|
+
capabilities: ['chat', 'text-generation', 'reasoning', 'analysis', 'vision'],
|
|
99
|
+
inputCostPer1KTokens: 0.01,
|
|
100
|
+
outputCostPer1KTokens: 0.03,
|
|
101
|
+
supportedFeatures: ['chat', 'function-calling', 'vision', 'json-mode']
|
|
102
|
+
}
|
|
103
|
+
],
|
|
104
|
+
[
|
|
105
|
+
OpenAIModelType.GPT4_OMNI,
|
|
106
|
+
{
|
|
107
|
+
name: OpenAIModelType.GPT4_OMNI,
|
|
108
|
+
displayName: 'GPT-4o',
|
|
109
|
+
endpoint: 'https://api.openai.com/v1/chat/completions',
|
|
110
|
+
format: 'openai',
|
|
111
|
+
description: 'Versatile model supporting text, images, audio with fast response',
|
|
112
|
+
maxTokens: 4096,
|
|
113
|
+
contextLength: 128000,
|
|
114
|
+
capabilities: ['chat', 'text-generation', 'vision', 'audio-processing', 'multimodal'],
|
|
115
|
+
inputCostPer1KTokens: 0.005,
|
|
116
|
+
outputCostPer1KTokens: 0.015,
|
|
117
|
+
supportedFeatures: ['chat', 'function-calling', 'vision', 'audio', 'json-mode']
|
|
118
|
+
}
|
|
119
|
+
],
|
|
120
|
+
[
|
|
121
|
+
OpenAIModelType.GPT4_OMNI_MINI,
|
|
122
|
+
{
|
|
123
|
+
name: OpenAIModelType.GPT4_OMNI_MINI,
|
|
124
|
+
displayName: 'GPT-4o Mini',
|
|
125
|
+
endpoint: 'https://api.openai.com/v1/chat/completions',
|
|
126
|
+
format: 'openai',
|
|
127
|
+
description: 'Compact and efficient version of GPT-4o with lower cost',
|
|
128
|
+
maxTokens: 16384,
|
|
129
|
+
contextLength: 128000,
|
|
130
|
+
capabilities: ['chat', 'text-generation', 'vision'],
|
|
131
|
+
inputCostPer1KTokens: 0.00015,
|
|
132
|
+
outputCostPer1KTokens: 0.0006,
|
|
133
|
+
supportedFeatures: ['chat', 'function-calling', 'vision', 'json-mode']
|
|
134
|
+
}
|
|
135
|
+
],
|
|
136
|
+
|
|
137
|
+
// GPT-3.5 Series
|
|
138
|
+
[
|
|
139
|
+
OpenAIModelType.GPT3_5_TURBO,
|
|
140
|
+
{
|
|
141
|
+
name: OpenAIModelType.GPT3_5_TURBO,
|
|
142
|
+
displayName: 'GPT-3.5 Turbo',
|
|
143
|
+
endpoint: 'https://api.openai.com/v1/chat/completions',
|
|
144
|
+
format: 'openai',
|
|
145
|
+
description: 'Fast and cost-effective, suitable for most conversational tasks',
|
|
146
|
+
maxTokens: 4096,
|
|
147
|
+
contextLength: 16385,
|
|
148
|
+
capabilities: ['chat', 'text-generation', 'code-generation'],
|
|
149
|
+
inputCostPer1KTokens: 0.0005,
|
|
150
|
+
outputCostPer1KTokens: 0.0015,
|
|
151
|
+
supportedFeatures: ['chat', 'function-calling']
|
|
152
|
+
}
|
|
153
|
+
],
|
|
154
|
+
[
|
|
155
|
+
OpenAIModelType.GPT3_5_TURBO_INSTRUCT,
|
|
156
|
+
{
|
|
157
|
+
name: OpenAIModelType.GPT3_5_TURBO_INSTRUCT,
|
|
158
|
+
displayName: 'GPT-3.5 Turbo Instruct',
|
|
159
|
+
endpoint: 'https://api.openai.com/v1/completions',
|
|
160
|
+
format: 'openai',
|
|
161
|
+
description: 'Instruction-tuned version for text completion tasks',
|
|
162
|
+
maxTokens: 4096,
|
|
163
|
+
contextLength: 4097,
|
|
164
|
+
capabilities: ['text-completion', 'instruction-following'],
|
|
165
|
+
inputCostPer1KTokens: 0.0015,
|
|
166
|
+
outputCostPer1KTokens: 0.0020,
|
|
167
|
+
supportedFeatures: ['completions']
|
|
168
|
+
}
|
|
169
|
+
],
|
|
170
|
+
|
|
171
|
+
// Embedding Models
|
|
172
|
+
[
|
|
173
|
+
OpenAIModelType.TEXT_EMBEDDING_ADA_002,
|
|
174
|
+
{
|
|
175
|
+
name: OpenAIModelType.TEXT_EMBEDDING_ADA_002,
|
|
176
|
+
displayName: 'Text Embedding Ada 002',
|
|
177
|
+
endpoint: 'https://api.openai.com/v1/embeddings',
|
|
178
|
+
format: 'openai',
|
|
179
|
+
description: 'Text embedding model, 1536 dimensions, suitable for retrieval and similarity',
|
|
180
|
+
contextLength: 8191,
|
|
181
|
+
capabilities: ['embeddings', 'semantic-search'],
|
|
182
|
+
inputCostPer1KTokens: 0.0001,
|
|
183
|
+
supportedFeatures: ['embeddings']
|
|
184
|
+
}
|
|
185
|
+
],
|
|
186
|
+
[
|
|
187
|
+
OpenAIModelType.TEXT_EMBEDDING_3_SMALL,
|
|
188
|
+
{
|
|
189
|
+
name: OpenAIModelType.TEXT_EMBEDDING_3_SMALL,
|
|
190
|
+
displayName: 'Text Embedding 3 Small',
|
|
191
|
+
endpoint: 'https://api.openai.com/v1/embeddings',
|
|
192
|
+
format: 'openai',
|
|
193
|
+
description: 'Small text embedding model, 1536 dimensions, balance of performance and cost',
|
|
194
|
+
contextLength: 8191,
|
|
195
|
+
capabilities: ['embeddings', 'semantic-search'],
|
|
196
|
+
inputCostPer1KTokens: 0.00002,
|
|
197
|
+
supportedFeatures: ['embeddings']
|
|
198
|
+
}
|
|
199
|
+
],
|
|
200
|
+
|
|
201
|
+
// DALL-E Image Generation
|
|
202
|
+
[
|
|
203
|
+
OpenAIModelType.DALL_E_3,
|
|
204
|
+
{
|
|
205
|
+
name: OpenAIModelType.DALL_E_3,
|
|
206
|
+
displayName: 'DALL-E 3',
|
|
207
|
+
endpoint: 'https://api.openai.com/v1/images/generations',
|
|
208
|
+
format: 'openai',
|
|
209
|
+
description: 'Advanced image generation model producing high-quality, high-resolution images',
|
|
210
|
+
capabilities: ['image-generation', 'creative-design'],
|
|
211
|
+
inputCostPer1KTokens: 0.04, // Cost per image
|
|
212
|
+
supportedFeatures: ['image-generation', 'variations', 'edits']
|
|
213
|
+
}
|
|
214
|
+
],
|
|
215
|
+
|
|
216
|
+
// Whisper Speech Recognition
|
|
217
|
+
[
|
|
218
|
+
OpenAIModelType.WHISPER_1,
|
|
219
|
+
{
|
|
220
|
+
name: OpenAIModelType.WHISPER_1,
|
|
221
|
+
displayName: 'Whisper',
|
|
222
|
+
endpoint: 'https://api.openai.com/v1/audio/transcriptions',
|
|
223
|
+
format: 'openai',
|
|
224
|
+
description: 'Speech recognition model supporting multilingual transcription and translation',
|
|
225
|
+
capabilities: ['speech-recognition', 'audio-transcription', 'translation'],
|
|
226
|
+
inputCostPer1KTokens: 0.006, // Cost per minute of audio
|
|
227
|
+
supportedFeatures: ['transcriptions', 'translations']
|
|
228
|
+
}
|
|
229
|
+
],
|
|
230
|
+
|
|
231
|
+
// TTS Text-to-Speech
|
|
232
|
+
[
|
|
233
|
+
OpenAIModelType.TTS_1_HD,
|
|
234
|
+
{
|
|
235
|
+
name: OpenAIModelType.TTS_1_HD,
|
|
236
|
+
displayName: 'TTS-1 HD',
|
|
237
|
+
endpoint: 'https://api.openai.com/v1/audio/speech',
|
|
238
|
+
format: 'openai',
|
|
239
|
+
description: 'High-quality text-to-speech with multiple voice options',
|
|
240
|
+
capabilities: ['speech-synthesis', 'text-to-speech'],
|
|
241
|
+
inputCostPer1KTokens: 0.015, // Cost per thousand characters
|
|
242
|
+
supportedFeatures: ['speech', 'voice-selection']
|
|
243
|
+
}
|
|
244
|
+
],
|
|
245
|
+
|
|
246
|
+
// Moderation Models
|
|
247
|
+
[
|
|
248
|
+
OpenAIModelType.MODERATION_LATEST,
|
|
249
|
+
{
|
|
250
|
+
name: OpenAIModelType.MODERATION_LATEST,
|
|
251
|
+
displayName: 'Moderation Latest',
|
|
252
|
+
endpoint: 'https://api.openai.com/v1/moderations',
|
|
253
|
+
format: 'openai',
|
|
254
|
+
description: 'Content moderation model for detecting harmful content',
|
|
255
|
+
capabilities: ['content-moderation', 'safety'],
|
|
256
|
+
inputCostPer1KTokens: 0.0001,
|
|
257
|
+
supportedFeatures: ['moderation']
|
|
258
|
+
}
|
|
259
|
+
],
|
|
260
|
+
]);
|
|
261
|
+
|
|
262
|
+
// Helper functions
|
|
263
|
+
export function getOpenAIModel(type: OpenAIModelType): OpenAIModel | undefined {
|
|
264
|
+
return OPENAI_MODELS.get(type);
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
export function getAllOpenAIModels(): OpenAIModel[] {
|
|
268
|
+
return Array.from(OPENAI_MODELS.values());
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
export function getOpenAIModelByName(name: string): OpenAIModel | undefined {
|
|
272
|
+
for (const model of OPENAI_MODELS.values()) {
|
|
273
|
+
if (model.name === name) {
|
|
274
|
+
return model;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
return undefined;
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
export function getAvailableOpenAIModelTypes(): OpenAIModelType[] {
|
|
281
|
+
return Array.from(OPENAI_MODELS.keys());
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// Category-based helper functions
|
|
285
|
+
export function getChatModels(): OpenAIModel[] {
|
|
286
|
+
return getAllOpenAIModels().filter(model =>
|
|
287
|
+
model.capabilities.includes('chat')
|
|
288
|
+
);
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
export function getCompletionModels(): OpenAIModel[] {
|
|
292
|
+
return getAllOpenAIModels().filter(model =>
|
|
293
|
+
model.capabilities.includes('text-completion')
|
|
294
|
+
);
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
export function getEmbeddingModels(): OpenAIModel[] {
|
|
298
|
+
return getAllOpenAIModels().filter(model =>
|
|
299
|
+
model.capabilities.includes('embeddings')
|
|
300
|
+
);
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
export function getVisionModelsOpenAI(): OpenAIModel[] {
|
|
304
|
+
return getAllOpenAIModels().filter(model =>
|
|
305
|
+
model.capabilities.includes('vision') ||
|
|
306
|
+
model.capabilities.includes('image-generation')
|
|
307
|
+
);
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
export function getAudioModelsOpenAI(): OpenAIModel[] {
|
|
311
|
+
return getAllOpenAIModels().filter(model =>
|
|
312
|
+
model.capabilities.includes('audio-processing') ||
|
|
313
|
+
model.capabilities.includes('speech-recognition') ||
|
|
314
|
+
model.capabilities.includes('speech-synthesis')
|
|
315
|
+
);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
export function getMultimodalModelsOpenAI(): OpenAIModel[] {
|
|
319
|
+
return getAllOpenAIModels().filter(model =>
|
|
320
|
+
model.capabilities.includes('multimodal')
|
|
321
|
+
);
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
export function getLatestModels(): OpenAIModel[] {
|
|
325
|
+
const latestModels = [
|
|
326
|
+
OpenAIModelType.GPT4_OMNI,
|
|
327
|
+
OpenAIModelType.GPT4_OMNI_MINI,
|
|
328
|
+
OpenAIModelType.GPT4_TURBO,
|
|
329
|
+
OpenAIModelType.GPT3_5_TURBO,
|
|
330
|
+
OpenAIModelType.TEXT_EMBEDDING_3_SMALL,
|
|
331
|
+
OpenAIModelType.DALL_E_3,
|
|
332
|
+
];
|
|
333
|
+
return getAllOpenAIModels().filter(model =>
|
|
334
|
+
latestModels.includes(model.name as OpenAIModelType)
|
|
335
|
+
);
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
export function getCostEfficientModels(): OpenAIModel[] {
|
|
339
|
+
return getAllOpenAIModels()
|
|
340
|
+
.filter(model => model.inputCostPer1KTokens && model.inputCostPer1KTokens < 0.001)
|
|
341
|
+
.sort((a, b) => (a.inputCostPer1KTokens || 0) - (b.inputCostPer1KTokens || 0));
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
export function getHighContextModels(): OpenAIModel[] {
|
|
345
|
+
return getAllOpenAIModels()
|
|
346
|
+
.filter(model => model.contextLength && model.contextLength >= 128000)
|
|
347
|
+
.sort((a, b) => (b.contextLength || 0) - (a.contextLength || 0));
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Cost estimation helper
|
|
351
|
+
export interface CostEstimate {
|
|
352
|
+
inputTokens: number;
|
|
353
|
+
outputTokens: number;
|
|
354
|
+
inputCost: number;
|
|
355
|
+
outputCost: number;
|
|
356
|
+
totalCost: number;
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
export function estimateCost(
|
|
360
|
+
model: OpenAIModel,
|
|
361
|
+
inputTokens: number,
|
|
362
|
+
outputTokens: number = 0
|
|
363
|
+
): CostEstimate {
|
|
364
|
+
const inputCost = ((model.inputCostPer1KTokens || 0) / 1000) * inputTokens;
|
|
365
|
+
const outputCost = ((model.outputCostPer1KTokens || 0) / 1000) * outputTokens;
|
|
366
|
+
return {
|
|
367
|
+
inputTokens,
|
|
368
|
+
outputTokens,
|
|
369
|
+
inputCost,
|
|
370
|
+
outputCost,
|
|
371
|
+
totalCost: inputCost + outputCost
|
|
372
|
+
};
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
// Model suggestion
|
|
376
|
+
export function suggestModel(
|
|
377
|
+
requirements: {
|
|
378
|
+
taskType: 'chat' | 'completion' | 'embedding' | 'image' | 'audio';
|
|
379
|
+
budget?: number;
|
|
380
|
+
contextLength?: number;
|
|
381
|
+
features?: string[];
|
|
382
|
+
}
|
|
383
|
+
): OpenAIModel[] {
|
|
384
|
+
let candidates = getAllOpenAIModels();
|
|
385
|
+
// Filter by task type
|
|
386
|
+
switch (requirements.taskType) {
|
|
387
|
+
case 'chat':
|
|
388
|
+
candidates = candidates.filter(m => m.capabilities.includes('chat'));
|
|
389
|
+
break;
|
|
390
|
+
case 'completion':
|
|
391
|
+
candidates = candidates.filter(m => m.capabilities.includes('text-completion'));
|
|
392
|
+
break;
|
|
393
|
+
case 'embedding':
|
|
394
|
+
candidates = candidates.filter(m => m.capabilities.includes('embeddings'));
|
|
395
|
+
break;
|
|
396
|
+
case 'image':
|
|
397
|
+
candidates = candidates.filter(m =>
|
|
398
|
+
m.capabilities.includes('image-generation') ||
|
|
399
|
+
m.capabilities.includes('vision')
|
|
400
|
+
);
|
|
401
|
+
break;
|
|
402
|
+
case 'audio':
|
|
403
|
+
candidates = candidates.filter(m =>
|
|
404
|
+
m.capabilities.includes('speech-recognition') ||
|
|
405
|
+
m.capabilities.includes('speech-synthesis')
|
|
406
|
+
);
|
|
407
|
+
break;
|
|
408
|
+
}
|
|
409
|
+
// Filter by context length
|
|
410
|
+
if (requirements.contextLength) {
|
|
411
|
+
candidates = candidates.filter(m =>
|
|
412
|
+
m.contextLength && m.contextLength >= requirements.contextLength!
|
|
413
|
+
);
|
|
414
|
+
}
|
|
415
|
+
// Filter by feature requirements
|
|
416
|
+
if (requirements.features && requirements.features.length > 0) {
|
|
417
|
+
candidates = candidates.filter(m =>
|
|
418
|
+
requirements.features!.every(feature =>
|
|
419
|
+
m.supportedFeatures?.includes(feature) || m.capabilities.includes(feature)
|
|
420
|
+
)
|
|
421
|
+
);
|
|
422
|
+
}
|
|
423
|
+
// Sort by budget if provided
|
|
424
|
+
if (requirements.budget) {
|
|
425
|
+
candidates.sort((a, b) =>
|
|
426
|
+
(a.inputCostPer1KTokens || 0) - (b.inputCostPer1KTokens || 0)
|
|
427
|
+
);
|
|
428
|
+
}
|
|
429
|
+
return candidates.slice(0, 5); // Return top 5 recommendations
|
|
430
|
+
}
|
package/src/types.ts
ADDED
package/tsconfig.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2020",
|
|
4
|
+
"module": "commonjs",
|
|
5
|
+
"lib": ["ES2020"],
|
|
6
|
+
"outDir": "./dist",
|
|
7
|
+
"rootDir": "./src",
|
|
8
|
+
"strict": true,
|
|
9
|
+
"esModuleInterop": true,
|
|
10
|
+
"skipLibCheck": true,
|
|
11
|
+
"forceConsistentCasingInFileNames": true,
|
|
12
|
+
"moduleResolution": "node",
|
|
13
|
+
"baseUrl": ".",
|
|
14
|
+
"paths": {
|
|
15
|
+
"@/*": ["src/*"]
|
|
16
|
+
},
|
|
17
|
+
"declaration": true,
|
|
18
|
+
"declarationMap": true,
|
|
19
|
+
"sourceMap": true,
|
|
20
|
+
"removeComments": false,
|
|
21
|
+
"experimentalDecorators": true,
|
|
22
|
+
"emitDecoratorMetadata": true
|
|
23
|
+
},
|
|
24
|
+
"include": ["src/**/*"],
|
|
25
|
+
"exclude": ["node_modules", "dist", "**/*.test.ts", "**/*.spec.ts"]
|
|
26
|
+
}
|