@cogitator-ai/models 1.1.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/README.md +477 -24
  2. package/package.json +2 -2
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @cogitator-ai/models
2
2
 
3
- Dynamic model registry with pricing information for Cogitator. Fetches up-to-date model data from LiteLLM.
3
+ Dynamic model registry with pricing information for Cogitator. Fetches up-to-date model data from LiteLLM and provides built-in fallbacks for major providers.
4
4
 
5
5
  ## Installation
6
6
 
@@ -8,51 +8,504 @@ Dynamic model registry with pricing information for Cogitator. Fetches up-to-dat
8
8
  pnpm add @cogitator-ai/models
9
9
  ```
10
10
 
11
- ## Usage
11
+ ## Features
12
12
 
13
- ### Get Model Information
13
+ - **Dynamic Data** - Fetches latest model info from LiteLLM
14
+ - **Pricing Information** - Input/output costs per million tokens
15
+ - **Capability Tracking** - Vision, tools, streaming, JSON mode support
16
+ - **Multi-Provider** - OpenAI, Anthropic, Google, Ollama, Azure, AWS, and more
17
+ - **Caching** - Memory or file-based cache with configurable TTL
18
+ - **Fallback** - Built-in models when external data unavailable
19
+ - **Filtering** - Query models by provider, capabilities, price
20
+
21
+ ---
22
+
23
+ ## Quick Start
14
24
 
15
25
  ```typescript
16
- import { getModel, getPrice, listModels } from '@cogitator-ai/models';
26
+ import { initializeModels, getModel, getPrice, listModels } from '@cogitator-ai/models';
27
+
28
+ await initializeModels();
17
29
 
18
- // Get model details
19
- const model = await getModel('gpt-4o');
20
- console.log(model.contextWindow); // 128000
30
+ const model = getModel('gpt-4o');
31
+ console.log(model?.contextWindow);
32
+ console.log(model?.capabilities?.supportsVision);
21
33
 
22
- // Get pricing
23
- const price = await getPrice('claude-sonnet-4-20250514');
24
- console.log(price.inputPerMillion); // $3.00
25
- console.log(price.outputPerMillion); // $15.00
34
+ const price = getPrice('claude-sonnet-4-20250514');
35
+ console.log(`Input: $${price?.input}/M tokens`);
36
+ console.log(`Output: $${price?.output}/M tokens`);
26
37
 
27
- // List all models
28
- const models = await listModels({
38
+ const toolModels = listModels({
39
+ supportsTools: true,
29
40
  provider: 'openai',
30
- hasToolCalling: true,
31
41
  });
32
42
  ```
33
43
 
34
- ### Built-in Providers
44
+ ---
35
45
 
36
- - **OpenAI**: GPT-4o, GPT-4o Mini, o1, o3-mini
37
- - **Anthropic**: Claude Sonnet 4, Claude 3.5 Sonnet/Haiku, Claude 3 Opus
38
- - **Google**: Gemini 2.5 Pro/Flash, Gemini 2.0 Flash, Gemini 1.5 Pro/Flash
46
+ ## Model Registry
39
47
 
40
- ### Caching
48
+ The `ModelRegistry` class manages model data with caching and auto-refresh.
41
49
 
42
- Model data is cached for 24 hours with automatic refresh:
50
+ ### Initialization
43
51
 
44
52
  ```typescript
45
53
  import { ModelRegistry } from '@cogitator-ai/models';
46
54
 
47
55
  const registry = new ModelRegistry({
48
- cacheTtl: 3600 * 1000, // 1 hour
49
- cacheDir: './cache',
56
+ cache: {
57
+ ttl: 24 * 60 * 60 * 1000,
58
+ storage: 'file',
59
+ filePath: './cache/models.json',
60
+ },
61
+ autoRefresh: true,
62
+ refreshInterval: 24 * 60 * 60 * 1000,
63
+ fallbackToBuiltin: true,
64
+ });
65
+
66
+ await registry.initialize();
67
+ ```
68
+
69
+ ### Configuration Options
70
+
71
+ ```typescript
72
+ interface RegistryOptions {
73
+ cache?: CacheOptions;
74
+ autoRefresh?: boolean;
75
+ refreshInterval?: number;
76
+ fallbackToBuiltin?: boolean;
77
+ }
78
+
79
+ interface CacheOptions {
80
+ ttl: number;
81
+ storage: 'memory' | 'file';
82
+ filePath?: string;
83
+ }
84
+ ```
85
+
86
+ | Option | Default | Description |
87
+ | ------------------- | ---------- | ------------------------------------ |
88
+ | `cache.ttl` | 24 hours | Cache time-to-live in milliseconds |
89
+ | `cache.storage` | `'memory'` | Storage backend |
90
+ | `cache.filePath` | - | File path for file-based cache |
91
+ | `autoRefresh` | `false` | Enable automatic background refresh |
92
+ | `refreshInterval` | 24 hours | Refresh interval in milliseconds |
93
+ | `fallbackToBuiltin` | `true` | Use built-in models on fetch failure |
94
+
95
+ ### Registry Methods
96
+
97
+ ```typescript
98
+ await registry.initialize();
99
+
100
+ const model = registry.getModel('gpt-4o');
101
+
102
+ const price = registry.getPrice('claude-3-5-sonnet-20241022');
103
+
104
+ const models = registry.listModels({
105
+ provider: 'anthropic',
106
+ supportsVision: true,
107
+ });
108
+
109
+ const providers = registry.listProviders();
110
+
111
+ const provider = registry.getProvider('openai');
112
+
113
+ console.log(registry.getModelCount());
114
+ console.log(registry.isInitialized());
115
+
116
+ await registry.refresh();
117
+
118
+ registry.shutdown();
119
+ ```
120
+
121
+ ---
122
+
123
+ ## Global Functions
124
+
125
+ For convenience, the package provides global functions that use a default registry:
126
+
127
+ ```typescript
128
+ import {
129
+ initializeModels,
130
+ getModel,
131
+ getPrice,
132
+ listModels,
133
+ getModelRegistry,
134
+ shutdownModels,
135
+ } from '@cogitator-ai/models';
136
+
137
+ await initializeModels();
138
+
139
+ const model = getModel('gpt-4o-mini');
140
+ const price = getPrice('gpt-4o-mini');
141
+ const allModels = listModels();
142
+
143
+ const registry = getModelRegistry();
144
+ const count = registry.getModelCount();
145
+
146
+ shutdownModels();
147
+ ```
148
+
149
+ ---
150
+
151
+ ## Model Information
152
+
153
+ ### ModelInfo Type
154
+
155
+ ```typescript
156
+ interface ModelInfo {
157
+ id: string;
158
+ provider: string;
159
+ displayName: string;
160
+ pricing: ModelPricing;
161
+ contextWindow: number;
162
+ maxOutputTokens?: number;
163
+ capabilities?: ModelCapabilities;
164
+ deprecated?: boolean;
165
+ aliases?: string[];
166
+ }
167
+
168
+ interface ModelPricing {
169
+ input: number;
170
+ output: number;
171
+ inputCached?: number;
172
+ outputCached?: number;
173
+ }
174
+
175
+ interface ModelCapabilities {
176
+ supportsVision?: boolean;
177
+ supportsTools?: boolean;
178
+ supportsFunctions?: boolean;
179
+ supportsStreaming?: boolean;
180
+ supportsJson?: boolean;
181
+ }
182
+ ```
183
+
184
+ ### Example Model
185
+
186
+ ```typescript
187
+ const model = getModel('gpt-4o');
188
+ // {
189
+ // id: 'gpt-4o',
190
+ // provider: 'openai',
191
+ // displayName: 'GPT-4o',
192
+ // pricing: { input: 2.5, output: 10 },
193
+ // contextWindow: 128000,
194
+ // maxOutputTokens: 16384,
195
+ // capabilities: {
196
+ // supportsVision: true,
197
+ // supportsTools: true,
198
+ // supportsStreaming: true,
199
+ // supportsJson: true,
200
+ // }
201
+ // }
202
+ ```
203
+
204
+ ---
205
+
206
+ ## Filtering Models
207
+
208
+ Use `ModelFilter` to query specific models:
209
+
210
+ ```typescript
211
+ interface ModelFilter {
212
+ provider?: string;
213
+ supportsTools?: boolean;
214
+ supportsVision?: boolean;
215
+ minContextWindow?: number;
216
+ maxPricePerMillion?: number;
217
+ excludeDeprecated?: boolean;
218
+ }
219
+ ```
220
+
221
+ ### Filter Examples
222
+
223
+ ```typescript
224
+ const openaiModels = listModels({
225
+ provider: 'openai',
226
+ });
227
+
228
+ const visionModels = listModels({
229
+ supportsVision: true,
230
+ });
231
+
232
+ const toolModels = listModels({
233
+ supportsTools: true,
234
+ excludeDeprecated: true,
235
+ });
236
+
237
+ const largeContext = listModels({
238
+ minContextWindow: 100000,
239
+ });
240
+
241
+ const cheapModels = listModels({
242
+ maxPricePerMillion: 1.0,
243
+ });
244
+
245
+ const anthropicVision = listModels({
246
+ provider: 'anthropic',
247
+ supportsVision: true,
248
+ supportsTools: true,
249
+ });
250
+ ```
251
+
252
+ ---
253
+
254
+ ## Providers
255
+
256
+ ### Built-in Providers
257
+
258
+ ```typescript
259
+ import { BUILTIN_PROVIDERS } from '@cogitator-ai/models';
260
+ ```
261
+
262
+ | Provider | Website |
263
+ | ------------ | ---------------------- |
264
+ | OpenAI | openai.com |
265
+ | Anthropic | anthropic.com |
266
+ | Google | ai.google.dev |
267
+ | Ollama | ollama.com |
268
+ | Azure OpenAI | azure.microsoft.com |
269
+ | AWS Bedrock | aws.amazon.com/bedrock |
270
+ | Mistral AI | mistral.ai |
271
+ | Cohere | cohere.com |
272
+ | Groq | groq.com |
273
+ | Together AI | together.ai |
274
+ | Fireworks AI | fireworks.ai |
275
+ | DeepInfra | deepinfra.com |
276
+ | Perplexity | perplexity.ai |
277
+ | Replicate | replicate.com |
278
+ | xAI | x.ai |
279
+
280
+ ### Provider Information
281
+
282
+ ```typescript
283
+ interface ProviderInfo {
284
+ id: string;
285
+ name: string;
286
+ website?: string;
287
+ models: string[];
288
+ }
289
+
290
+ const providers = registry.listProviders();
291
+ const openai = registry.getProvider('openai');
292
+ console.log(openai?.models.length);
293
+ ```
294
+
295
+ ---
296
+
297
+ ## Built-in Models
298
+
299
+ Fallback models are available when LiteLLM data cannot be fetched:
300
+
301
+ ```typescript
302
+ import {
303
+ BUILTIN_MODELS,
304
+ OPENAI_MODELS,
305
+ ANTHROPIC_MODELS,
306
+ GOOGLE_MODELS,
307
+ } from '@cogitator-ai/models';
308
+ ```
309
+
310
+ ### OpenAI Models
311
+
312
+ - gpt-4o
313
+ - gpt-4o-mini
314
+ - o1
315
+ - o1-mini
316
+ - o3-mini
317
+
318
+ ### Anthropic Models
319
+
320
+ - claude-sonnet-4-20250514
321
+ - claude-3-5-sonnet-20241022
322
+ - claude-3-5-haiku-20241022
323
+ - claude-3-opus-20240229
324
+
325
+ ### Google Models
326
+
327
+ - gemini-2.5-pro
328
+ - gemini-2.5-flash
329
+ - gemini-2.0-flash
330
+ - gemini-1.5-pro
331
+ - gemini-1.5-flash
332
+
333
+ ---
334
+
335
+ ## Caching
336
+
337
+ ### Memory Cache
338
+
339
+ ```typescript
340
+ const registry = new ModelRegistry({
341
+ cache: {
342
+ ttl: 60 * 60 * 1000,
343
+ storage: 'memory',
344
+ },
345
+ });
346
+ ```
347
+
348
+ ### File Cache
349
+
350
+ ```typescript
351
+ const registry = new ModelRegistry({
352
+ cache: {
353
+ ttl: 24 * 60 * 60 * 1000,
354
+ storage: 'file',
355
+ filePath: './cache/models.json',
356
+ },
357
+ });
358
+ ```
359
+
360
+ ### ModelCache Class
361
+
362
+ ```typescript
363
+ import { ModelCache } from '@cogitator-ai/models';
364
+
365
+ const cache = new ModelCache({
366
+ ttl: 3600000,
367
+ storage: 'file',
368
+ filePath: './models-cache.json',
369
+ });
370
+
371
+ const models = await cache.get();
372
+
373
+ await cache.set(models);
374
+
375
+ const staleData = await cache.getStale();
376
+ ```
377
+
378
+ ---
379
+
380
+ ## Data Fetching
381
+
382
+ ### LiteLLM Integration
383
+
384
+ ```typescript
385
+ import { fetchLiteLLMData, transformLiteLLMData } from '@cogitator-ai/models';
386
+
387
+ const rawData = await fetchLiteLLMData();
388
+
389
+ const models = transformLiteLLMData(rawData);
390
+ ```
391
+
392
+ ### LiteLLM Data Structure
393
+
394
+ ```typescript
395
+ interface LiteLLMModelEntry {
396
+ max_tokens?: number;
397
+ max_input_tokens?: number;
398
+ max_output_tokens?: number;
399
+ input_cost_per_token?: number;
400
+ output_cost_per_token?: number;
401
+ litellm_provider?: string;
402
+ supports_function_calling?: boolean;
403
+ supports_vision?: boolean;
404
+ deprecation_date?: string;
405
+ }
406
+ ```
407
+
408
+ ---
409
+
410
+ ## Examples
411
+
412
+ ### Cost Calculator
413
+
414
+ ```typescript
415
+ import { getPrice } from '@cogitator-ai/models';
416
+
417
+ function calculateCost(modelId: string, inputTokens: number, outputTokens: number): number | null {
418
+ const price = getPrice(modelId);
419
+ if (!price) return null;
420
+
421
+ const inputCost = (inputTokens / 1_000_000) * price.input;
422
+ const outputCost = (outputTokens / 1_000_000) * price.output;
423
+
424
+ return inputCost + outputCost;
425
+ }
426
+
427
+ const cost = calculateCost('gpt-4o', 10000, 2000);
428
+ console.log(`Cost: $${cost?.toFixed(4)}`);
429
+ ```
430
+
431
+ ### Model Selector
432
+
433
+ ```typescript
434
+ import { listModels } from '@cogitator-ai/models';
435
+
436
+ function selectBestModel(options: {
437
+ needsVision?: boolean;
438
+ needsTools?: boolean;
439
+ maxCost?: number;
440
+ minContext?: number;
441
+ }): string | null {
442
+ const models = listModels({
443
+ supportsVision: options.needsVision,
444
+ supportsTools: options.needsTools,
445
+ maxPricePerMillion: options.maxCost,
446
+ minContextWindow: options.minContext,
447
+ excludeDeprecated: true,
448
+ });
449
+
450
+ if (models.length === 0) return null;
451
+
452
+ models.sort((a, b) => {
453
+ const aPrice = (a.pricing.input + a.pricing.output) / 2;
454
+ const bPrice = (b.pricing.input + b.pricing.output) / 2;
455
+ return aPrice - bPrice;
456
+ });
457
+
458
+ return models[0].id;
459
+ }
460
+
461
+ const cheapTool = selectBestModel({
462
+ needsTools: true,
463
+ maxCost: 2.0,
50
464
  });
51
465
  ```
52
466
 
53
- ## Documentation
467
+ ### Provider Dashboard
468
+
469
+ ```typescript
470
+ import { getModelRegistry, initializeModels } from '@cogitator-ai/models';
471
+
472
+ async function showDashboard() {
473
+ await initializeModels();
474
+ const registry = getModelRegistry();
475
+
476
+ console.log(`Total models: ${registry.getModelCount()}`);
477
+ console.log();
478
+
479
+ for (const provider of registry.listProviders()) {
480
+ const models = registry.listModels({ provider: provider.id });
481
+ console.log(`${provider.name}: ${models.length} models`);
482
+
483
+ const avgPrice =
484
+ models.reduce((sum, m) => sum + (m.pricing.input + m.pricing.output) / 2, 0) / models.length;
485
+ console.log(` Avg price: $${avgPrice.toFixed(2)}/M tokens`);
486
+ }
487
+ }
488
+ ```
489
+
490
+ ---
491
+
492
+ ## Type Reference
493
+
494
+ ```typescript
495
+ import type {
496
+ ModelInfo,
497
+ ModelPricing,
498
+ ModelCapabilities,
499
+ ModelFilter,
500
+ ProviderInfo,
501
+ CacheOptions,
502
+ RegistryOptions,
503
+ LiteLLMModelEntry,
504
+ LiteLLMModelData,
505
+ } from '@cogitator-ai/models';
506
+ ```
54
507
 
55
- See the [Cogitator documentation](https://github.com/eL1fe/cogitator) for full API reference.
508
+ ---
56
509
 
57
510
  ## License
58
511
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@cogitator-ai/models",
3
- "version": "1.1.0",
3
+ "version": "3.0.0",
4
4
  "description": "Dynamic model registry with pricing for Cogitator",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -23,7 +23,7 @@
23
23
  "vitest": "^2.1.8"
24
24
  },
25
25
  "peerDependencies": {
26
- "@cogitator-ai/types": "0.3.1"
26
+ "@cogitator-ai/types": "0.5.0"
27
27
  },
28
28
  "repository": {
29
29
  "type": "git",