aimodels 0.1.2 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -17,36 +17,76 @@ import { models } from 'aimodels';
17
17
  console.log(models.all);
18
18
 
19
19
  // Get list of all providers
20
- console.log(models.providers); // ['openai', 'anthropic', 'meta', ...]
20
+ console.log(models.providers); // ['openai', 'anthropic', 'mistral', ...]
21
+
22
+ // Get list of model creators
23
+ console.log(models.creators); // ['meta', 'mistral', ...]
21
24
 
22
25
  // Get models from a specific provider
23
- const openAiModels = models.from('openai');
26
+ const openAiModels = models.fromProvider('openai');
27
+
28
+ // Get models from a specific creator
29
+ const metaModels = models.fromCreator('meta');
24
30
 
25
31
  // Find a specific model
26
- const model = models.find('deepseek-r1');
27
- console.log(model.contextWindow);
28
- console.log(model.providers);
29
- console.log(model.pricing.input);
32
+ const model = models.find('gpt-4');
33
+ console.log(model?.context.total); // Context window size
34
+ console.log(model?.providers); // ['openai']
35
+
36
+ // Get model pricing for a specific provider
37
+ const price = models.getPrice('gpt-4', 'openai');
38
+ console.log(price); // { type: 'token', input: 0.03, output: 0.06 }
30
39
 
31
- // Filter models by capability
32
- const chatModels = models.withCapability('chat');
40
+ // Filter models by capabilities
41
+ const chatModels = models.can('chat');
42
+ const multimodalModels = models.can('chat', 'img-in');
33
43
 
34
44
  // Filter by context window
35
45
  const largeContextModels = models.withMinContext(32768);
36
-
37
- // Filter by maximum price
38
- const affordableModels = models.withMaxPrice(0.01); // Max $0.01 per 1K tokens
39
46
  ```
40
47
 
41
48
  ## Features
42
49
 
43
- - Comprehensive database of AI models
50
+ - Comprehensive database of AI models from major providers (OpenAI, Anthropic, Mistral, etc.)
44
51
  - Normalized data structure for easy comparison
45
- - Regular updates with new models
46
- - TypeScript support
52
+ - Model capabilities (chat, img-in, img-out, function-out, etc.)
53
+ - Context window information
54
+ - Pricing information per provider
55
+ - Creator and provider associations
56
+ - TypeScript support with full type safety
47
57
  - Zero dependencies
48
58
  - Universal JavaScript support (Node.js, browsers, Deno)
59
+ - Regular updates with new models
60
+
61
+
62
+ ## Types
49
63
 
64
+ ```typescript
65
+ interface Model {
66
+ id: string; // Unique model identifier
67
+ name: string; // Display name
68
+ can: string[]; // Capabilities (chat, img-in, img-out, etc.)
69
+ providers: string[]; // Available providers
70
+ context: {
71
+ total: number; // Total context window size
72
+ };
73
+ license: string; // License or creator
74
+ }
75
+
76
+ type ModelPrice =
77
+ | { type: 'token'; input: number; output: number } // Price per 1K tokens
78
+ | { type: 'image'; price: number; size: string } // Price per image
79
+ | { type: 'character'; price: number } // Price per character
80
+ | { type: 'minute'; price: number }; // Price per minute
81
+
82
+ interface Provider {
83
+ id: string; // Provider identifier
84
+ name: string; // Display name
85
+ websiteUrl: string; // Provider's website
86
+ apiUrl: string; // API documentation URL
87
+ models: Record<string, ModelPrice>; // Model pricing
88
+ }
89
+ ```
50
90
 
51
91
  ## License
52
92
 
package/dist/index.d.mts CHANGED
@@ -7,6 +7,18 @@ interface ModelPrice {
7
7
  type: 'token';
8
8
  }
9
9
 
10
+ /**
11
+ * Defines all possible model capabilities
12
+ */
13
+ type Capability = "chat" | "reason" | "text-in" | "text-out" | "img-in" | "img-out" | "sound-in" | "sound-out" | "json-out" | "function-out" | "vectors-out";
14
+
15
+ declare class ModelCollection extends Array<Model> {
16
+ constructor(models: Model[]);
17
+ can(...capabilities: Capability[]): ModelCollection;
18
+ know(...languages: string[]): ModelCollection;
19
+ filter(predicate: (value: Model, index: number, array: Model[]) => boolean): ModelCollection;
20
+ slice(start?: number, end?: number): ModelCollection;
21
+ }
10
22
  interface ModelContext {
11
23
  /** Maximum total tokens (input + output) */
12
24
  total: number;
@@ -23,27 +35,29 @@ interface Model {
23
35
  /** List of providers that can serve this model */
24
36
  providers: string[];
25
37
  /** Model capabilities */
26
- can: string[];
38
+ can: Capability[];
39
+ /** Languages the model knows */
40
+ languages?: string[];
27
41
  /** Context window information */
28
42
  context: ModelContext;
29
43
  }
30
- interface ModelCollection {
44
+ interface ModelsAPI {
31
45
  /** All available models */
32
- all: Model[];
46
+ all: ModelCollection;
33
47
  /** List of all creators */
34
48
  creators: string[];
35
49
  /** List of all providers */
36
50
  providers: string[];
37
51
  /** Get models from a specific creator */
38
- fromCreator(creator: string): Model[];
52
+ fromCreator(creator: string): ModelCollection;
39
53
  /** Get models from a specific provider */
40
- fromProvider(provider: string): Model[];
54
+ fromProvider(provider: string): ModelCollection;
41
55
  /** Find a specific model by ID */
42
56
  find(id: string): Model | undefined;
43
57
  /** Filter models by one or more capabilities (all must be present) */
44
- can(...capabilities: string[]): Model[];
58
+ can(...capabilities: string[]): ModelCollection;
45
59
  /** Filter models by minimum context window */
46
- withMinContext(tokens: number): Model[];
60
+ withMinContext(tokens: number): ModelCollection;
47
61
  /** Get pricing for a model from a specific provider */
48
62
  getPrice(modelId: string, provider: string): ModelPrice | undefined;
49
63
  }
@@ -70,6 +84,6 @@ var creators$1 = {
70
84
  creators: creators
71
85
  };
72
86
 
73
- declare const models: ModelCollection;
87
+ declare const models: ModelsAPI;
74
88
 
75
89
  export { creators$1 as creators, models };
package/dist/index.d.ts CHANGED
@@ -7,6 +7,18 @@ interface ModelPrice {
7
7
  type: 'token';
8
8
  }
9
9
 
10
+ /**
11
+ * Defines all possible model capabilities
12
+ */
13
+ type Capability = "chat" | "reason" | "text-in" | "text-out" | "img-in" | "img-out" | "sound-in" | "sound-out" | "json-out" | "function-out" | "vectors-out";
14
+
15
+ declare class ModelCollection extends Array<Model> {
16
+ constructor(models: Model[]);
17
+ can(...capabilities: Capability[]): ModelCollection;
18
+ know(...languages: string[]): ModelCollection;
19
+ filter(predicate: (value: Model, index: number, array: Model[]) => boolean): ModelCollection;
20
+ slice(start?: number, end?: number): ModelCollection;
21
+ }
10
22
  interface ModelContext {
11
23
  /** Maximum total tokens (input + output) */
12
24
  total: number;
@@ -23,27 +35,29 @@ interface Model {
23
35
  /** List of providers that can serve this model */
24
36
  providers: string[];
25
37
  /** Model capabilities */
26
- can: string[];
38
+ can: Capability[];
39
+ /** Languages the model knows */
40
+ languages?: string[];
27
41
  /** Context window information */
28
42
  context: ModelContext;
29
43
  }
30
- interface ModelCollection {
44
+ interface ModelsAPI {
31
45
  /** All available models */
32
- all: Model[];
46
+ all: ModelCollection;
33
47
  /** List of all creators */
34
48
  creators: string[];
35
49
  /** List of all providers */
36
50
  providers: string[];
37
51
  /** Get models from a specific creator */
38
- fromCreator(creator: string): Model[];
52
+ fromCreator(creator: string): ModelCollection;
39
53
  /** Get models from a specific provider */
40
- fromProvider(provider: string): Model[];
54
+ fromProvider(provider: string): ModelCollection;
41
55
  /** Find a specific model by ID */
42
56
  find(id: string): Model | undefined;
43
57
  /** Filter models by one or more capabilities (all must be present) */
44
- can(...capabilities: string[]): Model[];
58
+ can(...capabilities: string[]): ModelCollection;
45
59
  /** Filter models by minimum context window */
46
- withMinContext(tokens: number): Model[];
60
+ withMinContext(tokens: number): ModelCollection;
47
61
  /** Get pricing for a model from a specific provider */
48
62
  getPrice(modelId: string, provider: string): ModelPrice | undefined;
49
63
  }
@@ -70,6 +84,6 @@ var creators$1 = {
70
84
  creators: creators
71
85
  };
72
86
 
73
- declare const models: ModelCollection;
87
+ declare const models: ModelsAPI;
74
88
 
75
89
  export { creators$1 as creators, models };
package/dist/index.js CHANGED
@@ -25,15 +25,46 @@ __export(index_exports, {
25
25
  });
26
26
  module.exports = __toCommonJS(index_exports);
27
27
 
28
- // src/data/models/openai.json
29
- var openai_default = {
28
+ // src/types/models.ts
29
+ var ModelCollection = class _ModelCollection extends Array {
30
+ constructor(models2) {
31
+ super(...models2);
32
+ Object.setPrototypeOf(this, _ModelCollection.prototype);
33
+ }
34
+ can(...capabilities) {
35
+ return new _ModelCollection(
36
+ this.filter((model) => capabilities.every((cap) => model.can.includes(cap)))
37
+ );
38
+ }
39
+ know(...languages) {
40
+ return new _ModelCollection(
41
+ this.filter((model) => languages.every((lang) => model.languages?.includes(lang)))
42
+ );
43
+ }
44
+ // Override array methods to return ModelCollection
45
+ filter(predicate) {
46
+ return new _ModelCollection(super.filter(predicate));
47
+ }
48
+ slice(start, end) {
49
+ return new _ModelCollection(super.slice(start, end));
50
+ }
51
+ };
52
+
53
+ // src/data/models/openai-models.json
54
+ var openai_models_default = {
30
55
  models: [
31
56
  {
32
57
  id: "whisper-1",
33
58
  name: "Whisper-1",
34
59
  license: "proprietary",
35
- providers: ["openai"],
36
- can: ["sound-in", "text-out"],
60
+ providers: [
61
+ "openai",
62
+ "azure"
63
+ ],
64
+ can: [
65
+ "sound-in",
66
+ "text-out"
67
+ ],
37
68
  context: {
38
69
  total: null,
39
70
  maxOutput: null
@@ -43,8 +74,14 @@ var openai_default = {
43
74
  id: "tts-1",
44
75
  name: "TTS-1",
45
76
  license: "proprietary",
46
- providers: ["openai"],
47
- can: ["text-in", "sound-out"],
77
+ providers: [
78
+ "openai",
79
+ "azure"
80
+ ],
81
+ can: [
82
+ "text-in",
83
+ "sound-out"
84
+ ],
48
85
  context: {
49
86
  total: null,
50
87
  maxOutput: null
@@ -54,8 +91,14 @@ var openai_default = {
54
91
  id: "tts-1-hd",
55
92
  name: "TTS-1 HD",
56
93
  license: "proprietary",
57
- providers: ["openai"],
58
- can: ["text-in", "sound-out"],
94
+ providers: [
95
+ "openai",
96
+ "azure"
97
+ ],
98
+ can: [
99
+ "text-in",
100
+ "sound-out"
101
+ ],
59
102
  context: {
60
103
  total: null,
61
104
  maxOutput: null
@@ -65,8 +108,16 @@ var openai_default = {
65
108
  id: "gpt-4o",
66
109
  name: "GPT-4O",
67
110
  license: "proprietary",
68
- providers: ["openai"],
69
- can: ["chat", "img-in", "json-out", "function-out"],
111
+ providers: [
112
+ "openai",
113
+ "azure"
114
+ ],
115
+ can: [
116
+ "chat",
117
+ "img-in",
118
+ "json-out",
119
+ "function-out"
120
+ ],
70
121
  context: {
71
122
  total: 128e3,
72
123
  maxOutput: 16384
@@ -76,8 +127,16 @@ var openai_default = {
76
127
  id: "gpt-4o-mini",
77
128
  name: "GPT-4O Mini",
78
129
  license: "proprietary",
79
- providers: ["openai"],
80
- can: ["chat", "img-in", "json-out", "function-out"],
130
+ providers: [
131
+ "openai",
132
+ "azure"
133
+ ],
134
+ can: [
135
+ "chat",
136
+ "img-in",
137
+ "json-out",
138
+ "function-out"
139
+ ],
81
140
  context: {
82
141
  total: 128e3,
83
142
  maxOutput: 16384
@@ -87,8 +146,17 @@ var openai_default = {
87
146
  id: "o1",
88
147
  name: "OpenAI O1",
89
148
  license: "proprietary",
90
- providers: ["openai"],
91
- can: ["chat", "img-in", "json-out", "function-out"],
149
+ providers: [
150
+ "openai",
151
+ "azure"
152
+ ],
153
+ can: [
154
+ "chat",
155
+ "img-in",
156
+ "json-out",
157
+ "function-out",
158
+ "reason"
159
+ ],
92
160
  context: {
93
161
  total: 2e5,
94
162
  maxOutput: 1e5
@@ -98,8 +166,16 @@ var openai_default = {
98
166
  id: "o1-mini",
99
167
  name: "OpenAI O1 Mini",
100
168
  license: "proprietary",
101
- providers: ["openai"],
102
- can: ["chat", "json-out", "function-out"],
169
+ providers: [
170
+ "openai",
171
+ "azure"
172
+ ],
173
+ can: [
174
+ "chat",
175
+ "json-out",
176
+ "function-out",
177
+ "reason"
178
+ ],
103
179
  context: {
104
180
  total: 128e3,
105
181
  maxOutput: 65536
@@ -109,8 +185,16 @@ var openai_default = {
109
185
  id: "o3-mini",
110
186
  name: "OpenAI O3 Mini",
111
187
  license: "proprietary",
112
- providers: ["openai"],
113
- can: ["chat", "json-out", "function-out"],
188
+ providers: [
189
+ "openai",
190
+ "azure"
191
+ ],
192
+ can: [
193
+ "chat",
194
+ "json-out",
195
+ "function-out",
196
+ "reason"
197
+ ],
114
198
  context: {
115
199
  total: 2e5,
116
200
  maxOutput: 1e5
@@ -120,8 +204,16 @@ var openai_default = {
120
204
  id: "gpt-4o-audio-preview",
121
205
  name: "GPT-4O Audio",
122
206
  license: "proprietary",
123
- providers: ["openai"],
124
- can: ["chat", "sound-in", "json-out", "function-out"],
207
+ providers: [
208
+ "openai",
209
+ "azure"
210
+ ],
211
+ can: [
212
+ "chat",
213
+ "sound-in",
214
+ "json-out",
215
+ "function-out"
216
+ ],
125
217
  context: {
126
218
  total: 128e3,
127
219
  maxOutput: 16384
@@ -131,8 +223,16 @@ var openai_default = {
131
223
  id: "gpt-4o-realtime-preview",
132
224
  name: "GPT-4O Realtime",
133
225
  license: "proprietary",
134
- providers: ["openai"],
135
- can: ["chat", "sound-in", "json-out", "function-out"],
226
+ providers: [
227
+ "openai",
228
+ "azure"
229
+ ],
230
+ can: [
231
+ "chat",
232
+ "sound-in",
233
+ "json-out",
234
+ "function-out"
235
+ ],
136
236
  context: {
137
237
  total: 128e3,
138
238
  maxOutput: 4096
@@ -142,8 +242,13 @@ var openai_default = {
142
242
  id: "dall-e-3",
143
243
  name: "DALL-E 3",
144
244
  license: "proprietary",
145
- providers: ["openai"],
146
- can: ["img-out"],
245
+ providers: [
246
+ "openai",
247
+ "azure"
248
+ ],
249
+ can: [
250
+ "img-out"
251
+ ],
147
252
  context: {
148
253
  maxOutput: 1,
149
254
  sizes: [
@@ -160,14 +265,14 @@ var openai_default = {
160
265
  ]
161
266
  };
162
267
 
163
- // src/data/models/anthropic.json
164
- var anthropic_default = {
268
+ // src/data/models/anthropic-models.json
269
+ var anthropic_models_default = {
165
270
  models: [
166
271
  {
167
272
  id: "claude-3-opus",
168
273
  name: "Claude 3 Opus",
169
274
  license: "proprietary",
170
- providers: ["anthropic"],
275
+ providers: ["anthropic", "aws", "google"],
171
276
  can: ["chat", "img-in", "json-out", "function-out"],
172
277
  context: {
173
278
  total: 2e5,
@@ -178,7 +283,7 @@ var anthropic_default = {
178
283
  id: "claude-3-sonnet",
179
284
  name: "Claude 3 Sonnet",
180
285
  license: "proprietary",
181
- providers: ["anthropic"],
286
+ providers: ["anthropic", "aws", "google"],
182
287
  can: ["chat", "img-in", "json-out", "function-out"],
183
288
  context: {
184
289
  total: 2e5,
@@ -189,7 +294,7 @@ var anthropic_default = {
189
294
  id: "claude-3-haiku",
190
295
  name: "Claude 3 Haiku",
191
296
  license: "proprietary",
192
- providers: ["anthropic"],
297
+ providers: ["anthropic", "aws", "google"],
193
298
  can: ["chat", "img-in", "json-out", "function-out"],
194
299
  context: {
195
300
  total: 2e5,
@@ -199,8 +304,8 @@ var anthropic_default = {
199
304
  ]
200
305
  };
201
306
 
202
- // src/data/models/meta.json
203
- var meta_default = {
307
+ // src/data/models/meta-models.json
308
+ var meta_models_default = {
204
309
  models: [
205
310
  {
206
311
  id: "llama2-70b-4096",
@@ -216,15 +321,83 @@ var meta_default = {
216
321
  ]
217
322
  };
218
323
 
219
- // src/data/models/mistral.json
220
- var mistral_default = {
324
+ // src/data/models/mistral-models.json
325
+ var mistral_models_default = {
221
326
  models: [
222
327
  {
223
- id: "mixtral-8x7b-32768",
224
- name: "Mixtral 8x7B",
328
+ id: "mistral-large-2402",
329
+ name: "Mistral Large",
330
+ license: "proprietary",
331
+ providers: ["mistral", "azure"],
332
+ can: [
333
+ "chat",
334
+ "text-in",
335
+ "text-out",
336
+ "json-out",
337
+ "function-out"
338
+ ],
339
+ context: {
340
+ total: 32768,
341
+ maxOutput: 4096
342
+ }
343
+ },
344
+ {
345
+ id: "mistral-small-2402",
346
+ name: "Mistral Small",
347
+ license: "proprietary",
348
+ providers: ["mistral", "azure"],
349
+ can: [
350
+ "chat",
351
+ "text-in",
352
+ "text-out",
353
+ "json-out",
354
+ "function-out"
355
+ ],
356
+ context: {
357
+ total: 32768,
358
+ maxOutput: 4096
359
+ }
360
+ },
361
+ {
362
+ id: "mistral-medium",
363
+ name: "Mistral Medium",
364
+ license: "proprietary",
365
+ providers: ["mistral"],
366
+ can: [
367
+ "chat",
368
+ "text-in",
369
+ "text-out"
370
+ ],
371
+ context: {
372
+ total: 32768,
373
+ maxOutput: 4096
374
+ }
375
+ },
376
+ {
377
+ id: "open-mistral-7b",
378
+ name: "Open Mistral 7B",
225
379
  license: "apache-2.0",
226
- providers: ["groq"],
227
- can: ["chat", "json-out", "function-out"],
380
+ providers: ["mistral", "groq"],
381
+ can: [
382
+ "chat",
383
+ "text-in",
384
+ "text-out"
385
+ ],
386
+ context: {
387
+ total: 32768,
388
+ maxOutput: 4096
389
+ }
390
+ },
391
+ {
392
+ id: "open-mixtral-8x7b",
393
+ name: "Open Mixtral 8x7B",
394
+ license: "apache-2.0",
395
+ providers: ["mistral", "groq"],
396
+ can: [
397
+ "chat",
398
+ "text-in",
399
+ "text-out"
400
+ ],
228
401
  context: {
229
402
  total: 32768,
230
403
  maxOutput: 4096
@@ -236,15 +409,15 @@ var mistral_default = {
236
409
  // src/builders/models.ts
237
410
  function buildAllModels() {
238
411
  return [
239
- ...openai_default.models,
240
- ...anthropic_default.models,
241
- ...meta_default.models,
242
- ...mistral_default.models
412
+ ...openai_models_default.models,
413
+ ...anthropic_models_default.models,
414
+ ...meta_models_default.models,
415
+ ...mistral_models_default.models
243
416
  ];
244
417
  }
245
418
 
246
- // src/data/providers/openai.json
247
- var openai_default2 = {
419
+ // src/data/providers/openai-provider.json
420
+ var openai_provider_default = {
248
421
  id: "openai",
249
422
  name: "OpenAI",
250
423
  websiteUrl: "https://openai.com/",
@@ -334,8 +507,8 @@ var openai_default2 = {
334
507
  }
335
508
  };
336
509
 
337
- // src/data/providers/anthropic.json
338
- var anthropic_default2 = {
510
+ // src/data/providers/anthropic-provider.json
511
+ var anthropic_provider_default = {
339
512
  id: "anthropic",
340
513
  name: "Anthropic",
341
514
  websiteUrl: "https://www.anthropic.com/",
@@ -360,8 +533,8 @@ var anthropic_default2 = {
360
533
  }
361
534
  };
362
535
 
363
- // src/data/providers/mistral.json
364
- var mistral_default2 = {
536
+ // src/data/providers/mistral-provider.json
537
+ var mistral_provider_default = {
365
538
  id: "mistral",
366
539
  name: "Mistral",
367
540
  websiteUrl: "https://mistral.ai/",
@@ -430,9 +603,9 @@ function validateProvider(raw) {
430
603
  }
431
604
  function buildAllProviders() {
432
605
  return [
433
- validateProvider(openai_default2),
434
- validateProvider(anthropic_default2),
435
- validateProvider(mistral_default2)
606
+ validateProvider(openai_provider_default),
607
+ validateProvider(anthropic_provider_default),
608
+ validateProvider(mistral_provider_default)
436
609
  ];
437
610
  }
438
611
  function buildProvidersData() {
@@ -467,7 +640,7 @@ var creators_default = {
467
640
  var allModels = buildAllModels();
468
641
  var providersData = buildProvidersData();
469
642
  var models = {
470
- all: allModels,
643
+ all: new ModelCollection(allModels),
471
644
  get creators() {
472
645
  return Object.keys(creators_default.creators);
473
646
  },
@@ -475,25 +648,33 @@ var models = {
475
648
  return providersData.providers.map((p) => p.id);
476
649
  },
477
650
  fromCreator(creator) {
478
- return allModels.filter(
479
- (model) => model.license.startsWith(creator) || // For open source models
480
- providersData.providers.find((p) => p.id === creator)?.models[model.id]
481
- // For proprietary models
651
+ return new ModelCollection(
652
+ allModels.filter(
653
+ (model) => model.license.startsWith(creator) || // For open source models
654
+ providersData.providers.find((p) => p.id === creator)?.models[model.id]
655
+ // For proprietary models
656
+ )
482
657
  );
483
658
  },
484
659
  fromProvider(provider) {
485
- return allModels.filter((model) => model.providers.includes(provider));
660
+ return new ModelCollection(
661
+ allModels.filter((model) => model.providers.includes(provider))
662
+ );
486
663
  },
487
664
  find(id) {
488
665
  return allModels.find((model) => model.id === id);
489
666
  },
490
667
  can(...capabilities) {
491
- return allModels.filter(
492
- (model) => capabilities.every((capability) => model.can.includes(capability))
668
+ return new ModelCollection(
669
+ allModels.filter(
670
+ (model) => capabilities.every((capability) => model.can.includes(capability))
671
+ )
493
672
  );
494
673
  },
495
674
  withMinContext(tokens) {
496
- return allModels.filter((model) => model.context.total >= tokens);
675
+ return new ModelCollection(
676
+ allModels.filter((model) => model.context.total >= tokens)
677
+ );
497
678
  },
498
679
  getPrice(modelId, provider) {
499
680
  const providerData = providersData.providers.find((p) => p.id === provider);
package/dist/index.mjs CHANGED
@@ -1,12 +1,43 @@
1
- // src/data/models/openai.json
2
- var openai_default = {
1
+ // src/types/models.ts
2
+ var ModelCollection = class _ModelCollection extends Array {
3
+ constructor(models2) {
4
+ super(...models2);
5
+ Object.setPrototypeOf(this, _ModelCollection.prototype);
6
+ }
7
+ can(...capabilities) {
8
+ return new _ModelCollection(
9
+ this.filter((model) => capabilities.every((cap) => model.can.includes(cap)))
10
+ );
11
+ }
12
+ know(...languages) {
13
+ return new _ModelCollection(
14
+ this.filter((model) => languages.every((lang) => model.languages?.includes(lang)))
15
+ );
16
+ }
17
+ // Override array methods to return ModelCollection
18
+ filter(predicate) {
19
+ return new _ModelCollection(super.filter(predicate));
20
+ }
21
+ slice(start, end) {
22
+ return new _ModelCollection(super.slice(start, end));
23
+ }
24
+ };
25
+
26
+ // src/data/models/openai-models.json
27
+ var openai_models_default = {
3
28
  models: [
4
29
  {
5
30
  id: "whisper-1",
6
31
  name: "Whisper-1",
7
32
  license: "proprietary",
8
- providers: ["openai"],
9
- can: ["sound-in", "text-out"],
33
+ providers: [
34
+ "openai",
35
+ "azure"
36
+ ],
37
+ can: [
38
+ "sound-in",
39
+ "text-out"
40
+ ],
10
41
  context: {
11
42
  total: null,
12
43
  maxOutput: null
@@ -16,8 +47,14 @@ var openai_default = {
16
47
  id: "tts-1",
17
48
  name: "TTS-1",
18
49
  license: "proprietary",
19
- providers: ["openai"],
20
- can: ["text-in", "sound-out"],
50
+ providers: [
51
+ "openai",
52
+ "azure"
53
+ ],
54
+ can: [
55
+ "text-in",
56
+ "sound-out"
57
+ ],
21
58
  context: {
22
59
  total: null,
23
60
  maxOutput: null
@@ -27,8 +64,14 @@ var openai_default = {
27
64
  id: "tts-1-hd",
28
65
  name: "TTS-1 HD",
29
66
  license: "proprietary",
30
- providers: ["openai"],
31
- can: ["text-in", "sound-out"],
67
+ providers: [
68
+ "openai",
69
+ "azure"
70
+ ],
71
+ can: [
72
+ "text-in",
73
+ "sound-out"
74
+ ],
32
75
  context: {
33
76
  total: null,
34
77
  maxOutput: null
@@ -38,8 +81,16 @@ var openai_default = {
38
81
  id: "gpt-4o",
39
82
  name: "GPT-4O",
40
83
  license: "proprietary",
41
- providers: ["openai"],
42
- can: ["chat", "img-in", "json-out", "function-out"],
84
+ providers: [
85
+ "openai",
86
+ "azure"
87
+ ],
88
+ can: [
89
+ "chat",
90
+ "img-in",
91
+ "json-out",
92
+ "function-out"
93
+ ],
43
94
  context: {
44
95
  total: 128e3,
45
96
  maxOutput: 16384
@@ -49,8 +100,16 @@ var openai_default = {
49
100
  id: "gpt-4o-mini",
50
101
  name: "GPT-4O Mini",
51
102
  license: "proprietary",
52
- providers: ["openai"],
53
- can: ["chat", "img-in", "json-out", "function-out"],
103
+ providers: [
104
+ "openai",
105
+ "azure"
106
+ ],
107
+ can: [
108
+ "chat",
109
+ "img-in",
110
+ "json-out",
111
+ "function-out"
112
+ ],
54
113
  context: {
55
114
  total: 128e3,
56
115
  maxOutput: 16384
@@ -60,8 +119,17 @@ var openai_default = {
60
119
  id: "o1",
61
120
  name: "OpenAI O1",
62
121
  license: "proprietary",
63
- providers: ["openai"],
64
- can: ["chat", "img-in", "json-out", "function-out"],
122
+ providers: [
123
+ "openai",
124
+ "azure"
125
+ ],
126
+ can: [
127
+ "chat",
128
+ "img-in",
129
+ "json-out",
130
+ "function-out",
131
+ "reason"
132
+ ],
65
133
  context: {
66
134
  total: 2e5,
67
135
  maxOutput: 1e5
@@ -71,8 +139,16 @@ var openai_default = {
71
139
  id: "o1-mini",
72
140
  name: "OpenAI O1 Mini",
73
141
  license: "proprietary",
74
- providers: ["openai"],
75
- can: ["chat", "json-out", "function-out"],
142
+ providers: [
143
+ "openai",
144
+ "azure"
145
+ ],
146
+ can: [
147
+ "chat",
148
+ "json-out",
149
+ "function-out",
150
+ "reason"
151
+ ],
76
152
  context: {
77
153
  total: 128e3,
78
154
  maxOutput: 65536
@@ -82,8 +158,16 @@ var openai_default = {
82
158
  id: "o3-mini",
83
159
  name: "OpenAI O3 Mini",
84
160
  license: "proprietary",
85
- providers: ["openai"],
86
- can: ["chat", "json-out", "function-out"],
161
+ providers: [
162
+ "openai",
163
+ "azure"
164
+ ],
165
+ can: [
166
+ "chat",
167
+ "json-out",
168
+ "function-out",
169
+ "reason"
170
+ ],
87
171
  context: {
88
172
  total: 2e5,
89
173
  maxOutput: 1e5
@@ -93,8 +177,16 @@ var openai_default = {
93
177
  id: "gpt-4o-audio-preview",
94
178
  name: "GPT-4O Audio",
95
179
  license: "proprietary",
96
- providers: ["openai"],
97
- can: ["chat", "sound-in", "json-out", "function-out"],
180
+ providers: [
181
+ "openai",
182
+ "azure"
183
+ ],
184
+ can: [
185
+ "chat",
186
+ "sound-in",
187
+ "json-out",
188
+ "function-out"
189
+ ],
98
190
  context: {
99
191
  total: 128e3,
100
192
  maxOutput: 16384
@@ -104,8 +196,16 @@ var openai_default = {
104
196
  id: "gpt-4o-realtime-preview",
105
197
  name: "GPT-4O Realtime",
106
198
  license: "proprietary",
107
- providers: ["openai"],
108
- can: ["chat", "sound-in", "json-out", "function-out"],
199
+ providers: [
200
+ "openai",
201
+ "azure"
202
+ ],
203
+ can: [
204
+ "chat",
205
+ "sound-in",
206
+ "json-out",
207
+ "function-out"
208
+ ],
109
209
  context: {
110
210
  total: 128e3,
111
211
  maxOutput: 4096
@@ -115,8 +215,13 @@ var openai_default = {
115
215
  id: "dall-e-3",
116
216
  name: "DALL-E 3",
117
217
  license: "proprietary",
118
- providers: ["openai"],
119
- can: ["img-out"],
218
+ providers: [
219
+ "openai",
220
+ "azure"
221
+ ],
222
+ can: [
223
+ "img-out"
224
+ ],
120
225
  context: {
121
226
  maxOutput: 1,
122
227
  sizes: [
@@ -133,14 +238,14 @@ var openai_default = {
133
238
  ]
134
239
  };
135
240
 
136
- // src/data/models/anthropic.json
137
- var anthropic_default = {
241
+ // src/data/models/anthropic-models.json
242
+ var anthropic_models_default = {
138
243
  models: [
139
244
  {
140
245
  id: "claude-3-opus",
141
246
  name: "Claude 3 Opus",
142
247
  license: "proprietary",
143
- providers: ["anthropic"],
248
+ providers: ["anthropic", "aws", "google"],
144
249
  can: ["chat", "img-in", "json-out", "function-out"],
145
250
  context: {
146
251
  total: 2e5,
@@ -151,7 +256,7 @@ var anthropic_default = {
151
256
  id: "claude-3-sonnet",
152
257
  name: "Claude 3 Sonnet",
153
258
  license: "proprietary",
154
- providers: ["anthropic"],
259
+ providers: ["anthropic", "aws", "google"],
155
260
  can: ["chat", "img-in", "json-out", "function-out"],
156
261
  context: {
157
262
  total: 2e5,
@@ -162,7 +267,7 @@ var anthropic_default = {
162
267
  id: "claude-3-haiku",
163
268
  name: "Claude 3 Haiku",
164
269
  license: "proprietary",
165
- providers: ["anthropic"],
270
+ providers: ["anthropic", "aws", "google"],
166
271
  can: ["chat", "img-in", "json-out", "function-out"],
167
272
  context: {
168
273
  total: 2e5,
@@ -172,8 +277,8 @@ var anthropic_default = {
172
277
  ]
173
278
  };
174
279
 
175
- // src/data/models/meta.json
176
- var meta_default = {
280
+ // src/data/models/meta-models.json
281
+ var meta_models_default = {
177
282
  models: [
178
283
  {
179
284
  id: "llama2-70b-4096",
@@ -189,15 +294,83 @@ var meta_default = {
189
294
  ]
190
295
  };
191
296
 
192
- // src/data/models/mistral.json
193
- var mistral_default = {
297
+ // src/data/models/mistral-models.json
298
+ var mistral_models_default = {
194
299
  models: [
195
300
  {
196
- id: "mixtral-8x7b-32768",
197
- name: "Mixtral 8x7B",
301
+ id: "mistral-large-2402",
302
+ name: "Mistral Large",
303
+ license: "proprietary",
304
+ providers: ["mistral", "azure"],
305
+ can: [
306
+ "chat",
307
+ "text-in",
308
+ "text-out",
309
+ "json-out",
310
+ "function-out"
311
+ ],
312
+ context: {
313
+ total: 32768,
314
+ maxOutput: 4096
315
+ }
316
+ },
317
+ {
318
+ id: "mistral-small-2402",
319
+ name: "Mistral Small",
320
+ license: "proprietary",
321
+ providers: ["mistral", "azure"],
322
+ can: [
323
+ "chat",
324
+ "text-in",
325
+ "text-out",
326
+ "json-out",
327
+ "function-out"
328
+ ],
329
+ context: {
330
+ total: 32768,
331
+ maxOutput: 4096
332
+ }
333
+ },
334
+ {
335
+ id: "mistral-medium",
336
+ name: "Mistral Medium",
337
+ license: "proprietary",
338
+ providers: ["mistral"],
339
+ can: [
340
+ "chat",
341
+ "text-in",
342
+ "text-out"
343
+ ],
344
+ context: {
345
+ total: 32768,
346
+ maxOutput: 4096
347
+ }
348
+ },
349
+ {
350
+ id: "open-mistral-7b",
351
+ name: "Open Mistral 7B",
198
352
  license: "apache-2.0",
199
- providers: ["groq"],
200
- can: ["chat", "json-out", "function-out"],
353
+ providers: ["mistral", "groq"],
354
+ can: [
355
+ "chat",
356
+ "text-in",
357
+ "text-out"
358
+ ],
359
+ context: {
360
+ total: 32768,
361
+ maxOutput: 4096
362
+ }
363
+ },
364
+ {
365
+ id: "open-mixtral-8x7b",
366
+ name: "Open Mixtral 8x7B",
367
+ license: "apache-2.0",
368
+ providers: ["mistral", "groq"],
369
+ can: [
370
+ "chat",
371
+ "text-in",
372
+ "text-out"
373
+ ],
201
374
  context: {
202
375
  total: 32768,
203
376
  maxOutput: 4096
@@ -209,15 +382,15 @@ var mistral_default = {
209
382
  // src/builders/models.ts
210
383
  function buildAllModels() {
211
384
  return [
212
- ...openai_default.models,
213
- ...anthropic_default.models,
214
- ...meta_default.models,
215
- ...mistral_default.models
385
+ ...openai_models_default.models,
386
+ ...anthropic_models_default.models,
387
+ ...meta_models_default.models,
388
+ ...mistral_models_default.models
216
389
  ];
217
390
  }
218
391
 
219
- // src/data/providers/openai.json
220
- var openai_default2 = {
392
+ // src/data/providers/openai-provider.json
393
+ var openai_provider_default = {
221
394
  id: "openai",
222
395
  name: "OpenAI",
223
396
  websiteUrl: "https://openai.com/",
@@ -307,8 +480,8 @@ var openai_default2 = {
307
480
  }
308
481
  };
309
482
 
310
- // src/data/providers/anthropic.json
311
- var anthropic_default2 = {
483
+ // src/data/providers/anthropic-provider.json
484
+ var anthropic_provider_default = {
312
485
  id: "anthropic",
313
486
  name: "Anthropic",
314
487
  websiteUrl: "https://www.anthropic.com/",
@@ -333,8 +506,8 @@ var anthropic_default2 = {
333
506
  }
334
507
  };
335
508
 
336
- // src/data/providers/mistral.json
337
- var mistral_default2 = {
509
+ // src/data/providers/mistral-provider.json
510
+ var mistral_provider_default = {
338
511
  id: "mistral",
339
512
  name: "Mistral",
340
513
  websiteUrl: "https://mistral.ai/",
@@ -403,9 +576,9 @@ function validateProvider(raw) {
403
576
  }
404
577
  function buildAllProviders() {
405
578
  return [
406
- validateProvider(openai_default2),
407
- validateProvider(anthropic_default2),
408
- validateProvider(mistral_default2)
579
+ validateProvider(openai_provider_default),
580
+ validateProvider(anthropic_provider_default),
581
+ validateProvider(mistral_provider_default)
409
582
  ];
410
583
  }
411
584
  function buildProvidersData() {
@@ -440,7 +613,7 @@ var creators_default = {
440
613
  var allModels = buildAllModels();
441
614
  var providersData = buildProvidersData();
442
615
  var models = {
443
- all: allModels,
616
+ all: new ModelCollection(allModels),
444
617
  get creators() {
445
618
  return Object.keys(creators_default.creators);
446
619
  },
@@ -448,25 +621,33 @@ var models = {
448
621
  return providersData.providers.map((p) => p.id);
449
622
  },
450
623
  fromCreator(creator) {
451
- return allModels.filter(
452
- (model) => model.license.startsWith(creator) || // For open source models
453
- providersData.providers.find((p) => p.id === creator)?.models[model.id]
454
- // For proprietary models
624
+ return new ModelCollection(
625
+ allModels.filter(
626
+ (model) => model.license.startsWith(creator) || // For open source models
627
+ providersData.providers.find((p) => p.id === creator)?.models[model.id]
628
+ // For proprietary models
629
+ )
455
630
  );
456
631
  },
457
632
  fromProvider(provider) {
458
- return allModels.filter((model) => model.providers.includes(provider));
633
+ return new ModelCollection(
634
+ allModels.filter((model) => model.providers.includes(provider))
635
+ );
459
636
  },
460
637
  find(id) {
461
638
  return allModels.find((model) => model.id === id);
462
639
  },
463
640
  can(...capabilities) {
464
- return allModels.filter(
465
- (model) => capabilities.every((capability) => model.can.includes(capability))
641
+ return new ModelCollection(
642
+ allModels.filter(
643
+ (model) => capabilities.every((capability) => model.can.includes(capability))
644
+ )
466
645
  );
467
646
  },
468
647
  withMinContext(tokens) {
469
- return allModels.filter((model) => model.context.total >= tokens);
648
+ return new ModelCollection(
649
+ allModels.filter((model) => model.context.total >= tokens)
650
+ );
470
651
  },
471
652
  getPrice(modelId, provider) {
472
653
  const providerData = providersData.providers.find((p) => p.id === provider);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "aimodels",
3
- "version": "0.1.2",
3
+ "version": "0.2.1",
4
4
  "description": "A collection of AI model specifications across different providers",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",