aimodels 0.2.3 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -13,36 +13,23 @@ npm install aimodels
13
13
  ```typescript
14
14
  import { models } from 'aimodels';
15
15
 
16
- // Get all available models
17
- console.log(models.all);
18
-
19
- // Get list of all providers
20
- console.log(models.providers); // ['openai', 'anthropic', 'mistral', ...]
21
-
22
- // Get list of model creators
23
- console.log(models.creators); // ['meta', 'mistral', ...]
16
+ // Find models by capability
17
+ const chatModels = models.can('chat');
18
+ const multimodalModels = models.can('chat', 'img-in');
24
19
 
25
- // Get models from a specific provider
26
- const openAiModels = models.fromProvider('openai');
20
+ // Find models by provider
21
+ const openaiModels = models.fromProvider('openai');
27
22
 
28
- // Get models from a specific creator
23
+ // Find models by creator
29
24
  const metaModels = models.fromCreator('meta');
30
25
 
31
- // Find a specific model
32
- const model = models.find('gpt-4');
26
+ // Find models by context window
27
+ const largeContextModels = models.withMinContext(32768);
28
+
29
+ // Find specific model
30
+ const model = models.id('gpt-4');
33
31
  console.log(model?.context.total); // Context window size
34
32
  console.log(model?.providers); // ['openai']
35
-
36
- // Get model pricing for a specific provider
37
- const price = models.getPrice('gpt-4', 'openai');
38
- console.log(price); // { type: 'token', input: 0.03, output: 0.06 }
39
-
40
- // Filter models by capabilities
41
- const chatModels = models.can('chat');
42
- const multimodalModels = models.can('chat', 'img-in');
43
-
44
- // Filter by context window
45
- const largeContextModels = models.withMinContext(32768);
46
33
  ```
47
34
 
48
35
  ## Features
@@ -51,43 +38,53 @@ const largeContextModels = models.withMinContext(32768);
51
38
  - Normalized data structure for easy comparison
52
39
  - Model capabilities (chat, img-in, img-out, function-out, etc.)
53
40
  - Context window information
54
- - Pricing information per provider
55
41
  - Creator and provider associations
56
42
  - TypeScript support with full type safety
57
43
  - Zero dependencies
58
44
  - Universal JavaScript support (Node.js, browsers, Deno)
59
45
  - Regular updates with new models
60
46
 
61
-
62
47
  ## Types
63
48
 
49
+ ### Model
64
50
  ```typescript
65
51
  interface Model {
66
- id: string; // Unique model identifier
67
- name: string; // Display name
68
- can: string[]; // Capabilities (chat, img-in, img-out, etc.)
69
- providers: string[]; // Available providers
70
- context: {
71
- total: number; // Total context window size
72
- };
73
- license: string; // License or creator
52
+ /** Unique identifier */
53
+ id: string;
54
+ /** Display name */
55
+ name: string;
56
+ /** Model capabilities */
57
+ can: Capability[];
58
+ /** Available providers */
59
+ providers: string[];
60
+ /** Context window information */
61
+ context: ModelContext;
62
+ /** License or creator */
63
+ license: string;
74
64
  }
65
+ ```
75
66
 
76
- type ModelPrice =
77
- | { type: 'token'; input: number; output: number } // Price per 1K tokens
78
- | { type: 'image'; price: number; size: string } // Price per image
79
- | { type: 'character'; price: number } // Price per character
80
- | { type: 'minute'; price: number }; // Price per minute
81
-
82
- interface Provider {
83
- id: string; // Provider identifier
84
- name: string; // Display name
85
- websiteUrl: string; // Provider's website
86
- apiUrl: string; // API documentation URL
87
- models: Record<string, ModelPrice>; // Model pricing
88
- }
67
+ ### Capabilities
68
+ ```typescript
69
+ type Capability =
70
+ | "chat" // shortcut for "text-in" and "text-out"
71
+ | "reason" // when the model spends some tokens on reasoning
72
+ | "text-in" // process text input
73
+ | "text-out" // output text
74
+ | "img-in" // understand images
75
+ | "img-out" // generate images
76
+ | "sound-in" // process audio input
77
+ | "sound-out" // generate audio/speech
78
+ | "json-out" // structured JSON output
79
+ | "function-out" // function calling
80
+ | "vectors-out"; // output vector embeddings
89
81
  ```
90
82
 
83
+ For more detailed information, see:
84
+ - [Model Capabilities](/docs/model-capabilities.md)
85
+ - [Model Structure](/docs/model-structure.md)
86
+ - [Providers](/docs/providers.md)
87
+
91
88
  ## License
92
89
 
93
90
  MIT
package/dist/index.d.mts CHANGED
@@ -30,17 +30,28 @@ declare class ModelCollection extends Array<Model> {
30
30
  /** Filter models by minimum context window size */
31
31
  withMinContext(tokens: number): ModelCollection;
32
32
  }
33
- interface ModelContext {
33
+ interface TokenModelContext {
34
34
  /** Maximum total tokens (input + output) */
35
- total: number;
35
+ total: number | null;
36
36
  /** Maximum output tokens */
37
+ maxOutput: number | null;
38
+ }
39
+ interface ImageModelContext {
40
+ /** Maximum outputs per request */
37
41
  maxOutput: number;
42
+ /** Available image sizes */
43
+ sizes: string[];
44
+ /** Available quality settings */
45
+ qualities: string[];
38
46
  }
47
+ type ModelContext = TokenModelContext | ImageModelContext;
39
48
  interface Model {
40
49
  /** Unique identifier */
41
50
  id: string;
42
51
  /** Display name */
43
52
  name: string;
53
+ /** Creator of the model */
54
+ creator: string;
44
55
  /** License type (e.g., "proprietary", "apache-2.0", "llama-2-community") */
45
56
  license: string;
46
57
  /** List of providers that can serve this model */
package/dist/index.d.ts CHANGED
@@ -30,17 +30,28 @@ declare class ModelCollection extends Array<Model> {
30
30
  /** Filter models by minimum context window size */
31
31
  withMinContext(tokens: number): ModelCollection;
32
32
  }
33
- interface ModelContext {
33
+ interface TokenModelContext {
34
34
  /** Maximum total tokens (input + output) */
35
- total: number;
35
+ total: number | null;
36
36
  /** Maximum output tokens */
37
+ maxOutput: number | null;
38
+ }
39
+ interface ImageModelContext {
40
+ /** Maximum outputs per request */
37
41
  maxOutput: number;
42
+ /** Available image sizes */
43
+ sizes: string[];
44
+ /** Available quality settings */
45
+ qualities: string[];
38
46
  }
47
+ type ModelContext = TokenModelContext | ImageModelContext;
39
48
  interface Model {
40
49
  /** Unique identifier */
41
50
  id: string;
42
51
  /** Display name */
43
52
  name: string;
53
+ /** Creator of the model */
54
+ creator: string;
44
55
  /** License type (e.g., "proprietary", "apache-2.0", "llama-2-community") */
45
56
  license: string;
46
57
  /** List of providers that can serve this model */
package/dist/index.js CHANGED
@@ -64,12 +64,16 @@ var ModelCollection = class _ModelCollection extends Array {
64
64
  }
65
65
  /** Filter models by minimum context window size */
66
66
  withMinContext(tokens) {
67
- return this.filter((model) => model.context.total >= tokens);
67
+ return this.filter((model) => {
68
+ const context = model.context;
69
+ return "total" in context && context.total !== null && context.total >= tokens;
70
+ });
68
71
  }
69
72
  };
70
73
 
71
74
  // src/data/models/openai-models.json
72
75
  var openai_models_default = {
76
+ creator: "openai",
73
77
  models: [
74
78
  {
75
79
  id: "whisper-large-v3",
@@ -143,7 +147,7 @@ var openai_models_default = {
143
147
  },
144
148
  {
145
149
  id: "gpt-4o",
146
- name: "GPT-4O",
150
+ name: "GPT-4o",
147
151
  license: "proprietary",
148
152
  providers: [
149
153
  "openai",
@@ -304,13 +308,25 @@ var openai_models_default = {
304
308
 
305
309
  // src/data/models/anthropic-models.json
306
310
  var anthropic_models_default = {
311
+ creator: "anthropic",
307
312
  models: [
308
313
  {
309
314
  id: "claude-3-opus",
310
315
  name: "Claude 3 Opus",
311
316
  license: "proprietary",
312
- providers: ["anthropic", "aws", "google"],
313
- can: ["chat", "img-in", "json-out", "function-out"],
317
+ providers: [
318
+ "anthropic",
319
+ "aws",
320
+ "google"
321
+ ],
322
+ can: [
323
+ "chat",
324
+ "text-in",
325
+ "text-out",
326
+ "img-in",
327
+ "json-out",
328
+ "function-out"
329
+ ],
314
330
  context: {
315
331
  total: 2e5,
316
332
  maxOutput: 4096
@@ -320,8 +336,19 @@ var anthropic_models_default = {
320
336
  id: "claude-3-sonnet",
321
337
  name: "Claude 3 Sonnet",
322
338
  license: "proprietary",
323
- providers: ["anthropic", "aws", "google"],
324
- can: ["chat", "img-in", "json-out", "function-out"],
339
+ providers: [
340
+ "anthropic",
341
+ "aws",
342
+ "google"
343
+ ],
344
+ can: [
345
+ "chat",
346
+ "text-in",
347
+ "text-out",
348
+ "img-in",
349
+ "json-out",
350
+ "function-out"
351
+ ],
325
352
  context: {
326
353
  total: 2e5,
327
354
  maxOutput: 4096
@@ -331,8 +358,19 @@ var anthropic_models_default = {
331
358
  id: "claude-3-haiku",
332
359
  name: "Claude 3 Haiku",
333
360
  license: "proprietary",
334
- providers: ["anthropic", "aws", "google"],
335
- can: ["chat", "img-in", "json-out", "function-out"],
361
+ providers: [
362
+ "anthropic",
363
+ "aws",
364
+ "google"
365
+ ],
366
+ can: [
367
+ "chat",
368
+ "text-in",
369
+ "text-out",
370
+ "img-in",
371
+ "json-out",
372
+ "function-out"
373
+ ],
336
374
  context: {
337
375
  total: 2e5,
338
376
  maxOutput: 4096
@@ -343,6 +381,7 @@ var anthropic_models_default = {
343
381
 
344
382
  // src/data/models/meta-models.json
345
383
  var meta_models_default = {
384
+ creator: "meta",
346
385
  models: [
347
386
  {
348
387
  id: "llama3-70b-8192",
@@ -404,6 +443,7 @@ var meta_models_default = {
404
443
 
405
444
  // src/data/models/mistral-models.json
406
445
  var mistral_models_default = {
446
+ creator: "mistral",
407
447
  models: [
408
448
  {
409
449
  id: "mistral-large-2402",
@@ -489,12 +529,93 @@ var mistral_models_default = {
489
529
 
490
530
  // src/data/models/google-models.json
491
531
  var google_models_default = {
532
+ creator: "google",
492
533
  models: [
493
534
  {
494
- id: "gemma2-9b-it",
495
- name: "Gemma 2 9B Instruct",
535
+ id: "gemini-2.0-flash",
536
+ name: "Gemini 2.0 Flash",
537
+ license: "proprietary",
538
+ providers: ["google"],
539
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in", "reason"],
540
+ context: {
541
+ total: 1048576,
542
+ maxOutput: 8192
543
+ },
544
+ aliases: ["gemini-2.0-flash-001"]
545
+ },
546
+ {
547
+ id: "gemini-2.0-flash-lite",
548
+ name: "Gemini 2.0 Flash Lite",
549
+ license: "proprietary",
550
+ providers: ["google"],
551
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in"],
552
+ context: {
553
+ total: 1048576,
554
+ maxOutput: 8192
555
+ },
556
+ aliases: ["gemini-2.0-flash-lite-preview-02-05"]
557
+ },
558
+ {
559
+ id: "gemini-1.5-flash",
560
+ name: "Gemini 1.5 Flash",
561
+ license: "proprietary",
562
+ providers: ["google"],
563
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in"],
564
+ context: {
565
+ total: 2097152,
566
+ maxOutput: 8192
567
+ }
568
+ },
569
+ {
570
+ id: "gemini-1.5-flash-8b",
571
+ name: "Gemini 1.5 Flash 8B",
572
+ license: "proprietary",
573
+ providers: ["google"],
574
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in"],
575
+ context: {
576
+ total: 2097152,
577
+ maxOutput: 8192
578
+ }
579
+ },
580
+ {
581
+ id: "gemini-1.5-pro",
582
+ name: "Gemini 1.5 Pro",
583
+ license: "proprietary",
584
+ providers: ["google"],
585
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in", "reason"],
586
+ context: {
587
+ total: 2097152,
588
+ maxOutput: 8192
589
+ },
590
+ aliases: ["gemini-1.5-pro-latest", "gemini-1.5-pro-001", "gemini-1.5-pro-002"]
591
+ },
592
+ {
593
+ id: "text-embedding-004",
594
+ name: "Text Embedding 004",
595
+ license: "proprietary",
596
+ providers: ["google"],
597
+ can: ["text-in", "vectors-out"],
598
+ context: {
599
+ total: 2048,
600
+ maxOutput: 768
601
+ }
602
+ },
603
+ {
604
+ id: "gemma-7b-it",
605
+ name: "Gemma 7B Instruct",
496
606
  license: "apache-2.0",
497
- providers: ["groq"],
607
+ providers: ["google"],
608
+ can: ["chat", "text-in", "text-out", "json-out", "function-out"],
609
+ context: {
610
+ total: 8192,
611
+ maxOutput: 4096
612
+ }
613
+ },
614
+ {
615
+ id: "gemma-2b-it",
616
+ name: "Gemma 2B Instruct",
617
+ license: "apache-2.0",
618
+ providers: ["google"],
498
619
  can: ["chat", "text-in", "text-out", "json-out", "function-out"],
499
620
  context: {
500
621
  total: 8192,
@@ -506,6 +627,7 @@ var google_models_default = {
506
627
 
507
628
  // src/data/models/deepseek-models.json
508
629
  var deepseek_models_default = {
630
+ creator: "deepseek",
509
631
  models: [
510
632
  {
511
633
  id: "deepseek-chat",
@@ -545,16 +667,144 @@ var deepseek_models_default = {
545
667
  ]
546
668
  };
547
669
 
670
+ // src/data/models/xai-models.json
671
+ var xai_models_default = {
672
+ creator: "xai",
673
+ models: [
674
+ {
675
+ id: "grok-2-vision-1212",
676
+ name: "Grok 2 Vision",
677
+ creator: "xai",
678
+ license: "proprietary",
679
+ providers: ["xai"],
680
+ can: ["chat", "text-in", "text-out", "img-in"],
681
+ context: {
682
+ total: 32768,
683
+ maxOutput: 4096
684
+ },
685
+ aliases: ["grok-2-vision", "grok-2-vision-latest"]
686
+ },
687
+ {
688
+ id: "grok-2-1212",
689
+ name: "Grok 2",
690
+ creator: "xai",
691
+ license: "proprietary",
692
+ providers: ["xai"],
693
+ can: ["chat", "text-in", "text-out"],
694
+ context: {
695
+ total: 131072,
696
+ maxOutput: 4096
697
+ },
698
+ aliases: ["grok-2", "grok-2-latest"]
699
+ },
700
+ {
701
+ id: "grok-vision-beta",
702
+ name: "Grok Vision Beta",
703
+ creator: "xai",
704
+ license: "proprietary",
705
+ providers: ["xai"],
706
+ can: ["chat", "text-in", "text-out", "img-in"],
707
+ context: {
708
+ total: 8192,
709
+ maxOutput: 4096
710
+ }
711
+ },
712
+ {
713
+ id: "grok-beta",
714
+ name: "Grok Beta",
715
+ creator: "xai",
716
+ license: "proprietary",
717
+ providers: ["xai"],
718
+ can: ["chat", "text-in", "text-out"],
719
+ context: {
720
+ total: 131072,
721
+ maxOutput: 4096
722
+ }
723
+ }
724
+ ]
725
+ };
726
+
548
727
  // src/builders/models.ts
728
+ function validateModel(raw) {
729
+ if (typeof raw !== "object" || raw === null) {
730
+ throw new Error("Model data must be an object");
731
+ }
732
+ const model = raw;
733
+ const stringFields = ["id", "name", "creator", "license"];
734
+ for (const field of stringFields) {
735
+ if (typeof model[field] !== "string") {
736
+ throw new Error(`Model ${field} must be a string`);
737
+ }
738
+ }
739
+ if (!Array.isArray(model.providers)) {
740
+ throw new Error("Model providers must be an array");
741
+ }
742
+ if (!model.providers.every((p) => typeof p === "string")) {
743
+ throw new Error("Model providers must be strings");
744
+ }
745
+ if (!Array.isArray(model.can)) {
746
+ throw new Error("Model capabilities must be an array");
747
+ }
748
+ const validCapabilities = [
749
+ "chat",
750
+ "reason",
751
+ "text-in",
752
+ "text-out",
753
+ "img-in",
754
+ "img-out",
755
+ "sound-in",
756
+ "sound-out",
757
+ "json-out",
758
+ "function-out",
759
+ "vectors-out"
760
+ ];
761
+ if (!model.can.every((c) => validCapabilities.includes(c))) {
762
+ throw new Error(`Model has invalid capabilities: ${model.can.join(", ")}`);
763
+ }
764
+ if (typeof model.context !== "object" || model.context === null) {
765
+ throw new Error("Model context must be an object");
766
+ }
767
+ const context = model.context;
768
+ if (Array.isArray(context.sizes) && Array.isArray(context.qualities)) {
769
+ if (typeof context.maxOutput !== "number") {
770
+ throw new Error("Image model context.maxOutput must be a number");
771
+ }
772
+ } else {
773
+ if (typeof context.total !== "number" && context.total !== null) {
774
+ throw new Error("Token model context.total must be a number or null");
775
+ }
776
+ if (typeof context.maxOutput !== "number" && context.maxOutput !== null) {
777
+ throw new Error("Token model context.maxOutput must be a number or null");
778
+ }
779
+ }
780
+ return {
781
+ id: model.id,
782
+ name: model.name,
783
+ creator: model.creator,
784
+ license: model.license,
785
+ providers: model.providers,
786
+ can: model.can,
787
+ context: model.context,
788
+ ...model.languages ? { languages: model.languages } : {}
789
+ };
790
+ }
549
791
  function buildAllModels() {
550
- return [
551
- ...openai_models_default.models,
552
- ...anthropic_models_default.models,
553
- ...meta_models_default.models,
554
- ...mistral_models_default.models,
555
- ...google_models_default.models,
556
- ...deepseek_models_default.models
792
+ const allModelData = [
793
+ openai_models_default,
794
+ anthropic_models_default,
795
+ meta_models_default,
796
+ mistral_models_default,
797
+ google_models_default,
798
+ deepseek_models_default,
799
+ xai_models_default
557
800
  ];
801
+ const allModels2 = allModelData.flatMap(
802
+ (data) => data.models.map((model) => ({
803
+ ...model,
804
+ creator: data.creator
805
+ }))
806
+ );
807
+ return allModels2.map((model) => validateModel(model));
558
808
  }
559
809
 
560
810
  // src/data/providers/openai-provider.json
package/dist/index.mjs CHANGED
@@ -36,12 +36,16 @@ var ModelCollection = class _ModelCollection extends Array {
36
36
  }
37
37
  /** Filter models by minimum context window size */
38
38
  withMinContext(tokens) {
39
- return this.filter((model) => model.context.total >= tokens);
39
+ return this.filter((model) => {
40
+ const context = model.context;
41
+ return "total" in context && context.total !== null && context.total >= tokens;
42
+ });
40
43
  }
41
44
  };
42
45
 
43
46
  // src/data/models/openai-models.json
44
47
  var openai_models_default = {
48
+ creator: "openai",
45
49
  models: [
46
50
  {
47
51
  id: "whisper-large-v3",
@@ -115,7 +119,7 @@ var openai_models_default = {
115
119
  },
116
120
  {
117
121
  id: "gpt-4o",
118
- name: "GPT-4O",
122
+ name: "GPT-4o",
119
123
  license: "proprietary",
120
124
  providers: [
121
125
  "openai",
@@ -276,13 +280,25 @@ var openai_models_default = {
276
280
 
277
281
  // src/data/models/anthropic-models.json
278
282
  var anthropic_models_default = {
283
+ creator: "anthropic",
279
284
  models: [
280
285
  {
281
286
  id: "claude-3-opus",
282
287
  name: "Claude 3 Opus",
283
288
  license: "proprietary",
284
- providers: ["anthropic", "aws", "google"],
285
- can: ["chat", "img-in", "json-out", "function-out"],
289
+ providers: [
290
+ "anthropic",
291
+ "aws",
292
+ "google"
293
+ ],
294
+ can: [
295
+ "chat",
296
+ "text-in",
297
+ "text-out",
298
+ "img-in",
299
+ "json-out",
300
+ "function-out"
301
+ ],
286
302
  context: {
287
303
  total: 2e5,
288
304
  maxOutput: 4096
@@ -292,8 +308,19 @@ var anthropic_models_default = {
292
308
  id: "claude-3-sonnet",
293
309
  name: "Claude 3 Sonnet",
294
310
  license: "proprietary",
295
- providers: ["anthropic", "aws", "google"],
296
- can: ["chat", "img-in", "json-out", "function-out"],
311
+ providers: [
312
+ "anthropic",
313
+ "aws",
314
+ "google"
315
+ ],
316
+ can: [
317
+ "chat",
318
+ "text-in",
319
+ "text-out",
320
+ "img-in",
321
+ "json-out",
322
+ "function-out"
323
+ ],
297
324
  context: {
298
325
  total: 2e5,
299
326
  maxOutput: 4096
@@ -303,8 +330,19 @@ var anthropic_models_default = {
303
330
  id: "claude-3-haiku",
304
331
  name: "Claude 3 Haiku",
305
332
  license: "proprietary",
306
- providers: ["anthropic", "aws", "google"],
307
- can: ["chat", "img-in", "json-out", "function-out"],
333
+ providers: [
334
+ "anthropic",
335
+ "aws",
336
+ "google"
337
+ ],
338
+ can: [
339
+ "chat",
340
+ "text-in",
341
+ "text-out",
342
+ "img-in",
343
+ "json-out",
344
+ "function-out"
345
+ ],
308
346
  context: {
309
347
  total: 2e5,
310
348
  maxOutput: 4096
@@ -315,6 +353,7 @@ var anthropic_models_default = {
315
353
 
316
354
  // src/data/models/meta-models.json
317
355
  var meta_models_default = {
356
+ creator: "meta",
318
357
  models: [
319
358
  {
320
359
  id: "llama3-70b-8192",
@@ -376,6 +415,7 @@ var meta_models_default = {
376
415
 
377
416
  // src/data/models/mistral-models.json
378
417
  var mistral_models_default = {
418
+ creator: "mistral",
379
419
  models: [
380
420
  {
381
421
  id: "mistral-large-2402",
@@ -461,12 +501,93 @@ var mistral_models_default = {
461
501
 
462
502
  // src/data/models/google-models.json
463
503
  var google_models_default = {
504
+ creator: "google",
464
505
  models: [
465
506
  {
466
- id: "gemma2-9b-it",
467
- name: "Gemma 2 9B Instruct",
507
+ id: "gemini-2.0-flash",
508
+ name: "Gemini 2.0 Flash",
509
+ license: "proprietary",
510
+ providers: ["google"],
511
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in", "reason"],
512
+ context: {
513
+ total: 1048576,
514
+ maxOutput: 8192
515
+ },
516
+ aliases: ["gemini-2.0-flash-001"]
517
+ },
518
+ {
519
+ id: "gemini-2.0-flash-lite",
520
+ name: "Gemini 2.0 Flash Lite",
521
+ license: "proprietary",
522
+ providers: ["google"],
523
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in"],
524
+ context: {
525
+ total: 1048576,
526
+ maxOutput: 8192
527
+ },
528
+ aliases: ["gemini-2.0-flash-lite-preview-02-05"]
529
+ },
530
+ {
531
+ id: "gemini-1.5-flash",
532
+ name: "Gemini 1.5 Flash",
533
+ license: "proprietary",
534
+ providers: ["google"],
535
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in"],
536
+ context: {
537
+ total: 2097152,
538
+ maxOutput: 8192
539
+ }
540
+ },
541
+ {
542
+ id: "gemini-1.5-flash-8b",
543
+ name: "Gemini 1.5 Flash 8B",
544
+ license: "proprietary",
545
+ providers: ["google"],
546
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in"],
547
+ context: {
548
+ total: 2097152,
549
+ maxOutput: 8192
550
+ }
551
+ },
552
+ {
553
+ id: "gemini-1.5-pro",
554
+ name: "Gemini 1.5 Pro",
555
+ license: "proprietary",
556
+ providers: ["google"],
557
+ can: ["chat", "text-in", "text-out", "json-out", "function-out", "img-in", "sound-in", "reason"],
558
+ context: {
559
+ total: 2097152,
560
+ maxOutput: 8192
561
+ },
562
+ aliases: ["gemini-1.5-pro-latest", "gemini-1.5-pro-001", "gemini-1.5-pro-002"]
563
+ },
564
+ {
565
+ id: "text-embedding-004",
566
+ name: "Text Embedding 004",
567
+ license: "proprietary",
568
+ providers: ["google"],
569
+ can: ["text-in", "vectors-out"],
570
+ context: {
571
+ total: 2048,
572
+ maxOutput: 768
573
+ }
574
+ },
575
+ {
576
+ id: "gemma-7b-it",
577
+ name: "Gemma 7B Instruct",
468
578
  license: "apache-2.0",
469
- providers: ["groq"],
579
+ providers: ["google"],
580
+ can: ["chat", "text-in", "text-out", "json-out", "function-out"],
581
+ context: {
582
+ total: 8192,
583
+ maxOutput: 4096
584
+ }
585
+ },
586
+ {
587
+ id: "gemma-2b-it",
588
+ name: "Gemma 2B Instruct",
589
+ license: "apache-2.0",
590
+ providers: ["google"],
470
591
  can: ["chat", "text-in", "text-out", "json-out", "function-out"],
471
592
  context: {
472
593
  total: 8192,
@@ -478,6 +599,7 @@ var google_models_default = {
478
599
 
479
600
  // src/data/models/deepseek-models.json
480
601
  var deepseek_models_default = {
602
+ creator: "deepseek",
481
603
  models: [
482
604
  {
483
605
  id: "deepseek-chat",
@@ -517,16 +639,144 @@ var deepseek_models_default = {
517
639
  ]
518
640
  };
519
641
 
642
+ // src/data/models/xai-models.json
643
+ var xai_models_default = {
644
+ creator: "xai",
645
+ models: [
646
+ {
647
+ id: "grok-2-vision-1212",
648
+ name: "Grok 2 Vision",
649
+ creator: "xai",
650
+ license: "proprietary",
651
+ providers: ["xai"],
652
+ can: ["chat", "text-in", "text-out", "img-in"],
653
+ context: {
654
+ total: 32768,
655
+ maxOutput: 4096
656
+ },
657
+ aliases: ["grok-2-vision", "grok-2-vision-latest"]
658
+ },
659
+ {
660
+ id: "grok-2-1212",
661
+ name: "Grok 2",
662
+ creator: "xai",
663
+ license: "proprietary",
664
+ providers: ["xai"],
665
+ can: ["chat", "text-in", "text-out"],
666
+ context: {
667
+ total: 131072,
668
+ maxOutput: 4096
669
+ },
670
+ aliases: ["grok-2", "grok-2-latest"]
671
+ },
672
+ {
673
+ id: "grok-vision-beta",
674
+ name: "Grok Vision Beta",
675
+ creator: "xai",
676
+ license: "proprietary",
677
+ providers: ["xai"],
678
+ can: ["chat", "text-in", "text-out", "img-in"],
679
+ context: {
680
+ total: 8192,
681
+ maxOutput: 4096
682
+ }
683
+ },
684
+ {
685
+ id: "grok-beta",
686
+ name: "Grok Beta",
687
+ creator: "xai",
688
+ license: "proprietary",
689
+ providers: ["xai"],
690
+ can: ["chat", "text-in", "text-out"],
691
+ context: {
692
+ total: 131072,
693
+ maxOutput: 4096
694
+ }
695
+ }
696
+ ]
697
+ };
698
+
520
699
  // src/builders/models.ts
700
+ function validateModel(raw) {
701
+ if (typeof raw !== "object" || raw === null) {
702
+ throw new Error("Model data must be an object");
703
+ }
704
+ const model = raw;
705
+ const stringFields = ["id", "name", "creator", "license"];
706
+ for (const field of stringFields) {
707
+ if (typeof model[field] !== "string") {
708
+ throw new Error(`Model ${field} must be a string`);
709
+ }
710
+ }
711
+ if (!Array.isArray(model.providers)) {
712
+ throw new Error("Model providers must be an array");
713
+ }
714
+ if (!model.providers.every((p) => typeof p === "string")) {
715
+ throw new Error("Model providers must be strings");
716
+ }
717
+ if (!Array.isArray(model.can)) {
718
+ throw new Error("Model capabilities must be an array");
719
+ }
720
+ const validCapabilities = [
721
+ "chat",
722
+ "reason",
723
+ "text-in",
724
+ "text-out",
725
+ "img-in",
726
+ "img-out",
727
+ "sound-in",
728
+ "sound-out",
729
+ "json-out",
730
+ "function-out",
731
+ "vectors-out"
732
+ ];
733
+ if (!model.can.every((c) => validCapabilities.includes(c))) {
734
+ throw new Error(`Model has invalid capabilities: ${model.can.join(", ")}`);
735
+ }
736
+ if (typeof model.context !== "object" || model.context === null) {
737
+ throw new Error("Model context must be an object");
738
+ }
739
+ const context = model.context;
740
+ if (Array.isArray(context.sizes) && Array.isArray(context.qualities)) {
741
+ if (typeof context.maxOutput !== "number") {
742
+ throw new Error("Image model context.maxOutput must be a number");
743
+ }
744
+ } else {
745
+ if (typeof context.total !== "number" && context.total !== null) {
746
+ throw new Error("Token model context.total must be a number or null");
747
+ }
748
+ if (typeof context.maxOutput !== "number" && context.maxOutput !== null) {
749
+ throw new Error("Token model context.maxOutput must be a number or null");
750
+ }
751
+ }
752
+ return {
753
+ id: model.id,
754
+ name: model.name,
755
+ creator: model.creator,
756
+ license: model.license,
757
+ providers: model.providers,
758
+ can: model.can,
759
+ context: model.context,
760
+ ...model.languages ? { languages: model.languages } : {}
761
+ };
762
+ }
521
763
  function buildAllModels() {
522
- return [
523
- ...openai_models_default.models,
524
- ...anthropic_models_default.models,
525
- ...meta_models_default.models,
526
- ...mistral_models_default.models,
527
- ...google_models_default.models,
528
- ...deepseek_models_default.models
764
+ const allModelData = [
765
+ openai_models_default,
766
+ anthropic_models_default,
767
+ meta_models_default,
768
+ mistral_models_default,
769
+ google_models_default,
770
+ deepseek_models_default,
771
+ xai_models_default
529
772
  ];
773
+ const allModels2 = allModelData.flatMap(
774
+ (data) => data.models.map((model) => ({
775
+ ...model,
776
+ creator: data.creator
777
+ }))
778
+ );
779
+ return allModels2.map((model) => validateModel(model));
530
780
  }
531
781
 
532
782
  // src/data/providers/openai-provider.json
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "aimodels",
3
- "version": "0.2.3",
3
+ "version": "0.3.0",
4
4
  "description": "A collection of AI model specifications across different providers",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",
@@ -29,7 +29,11 @@
29
29
  "version": "git add -A",
30
30
  "postversion": "git push && git push --tags",
31
31
  "prepublishOnly": "npm run clean && npm run typecheck && npm test && npm run lint",
32
- "postpublish": "npm run clean"
32
+ "postpublish": "npm run clean",
33
+ "rules": "airul generate",
34
+ "rules:comment": "# Generate AI rules from documentation",
35
+ "pregenerate": "[ -f .airul.json ] || airul init",
36
+ "postinstall": "echo \"\nRun 'npm run rules' to generate AI rules from your documentation\""
33
37
  },
34
38
  "keywords": [
35
39
  "ai",
@@ -60,6 +64,7 @@
60
64
  "@types/node": "^20.0.0",
61
65
  "@typescript-eslint/eslint-plugin": "^6.0.0",
62
66
  "@typescript-eslint/parser": "^6.0.0",
67
+ "airul": "^0.1.10",
63
68
  "eslint": "^8.0.0",
64
69
  "tsup": "^8.0.0",
65
70
  "typescript": "^5.0.0"