aimodels 0.3.12 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +133 -66
- package/dist/index.d.ts +204 -113
- package/dist/index.js +2431 -2171
- package/package.json +20 -13
- package/LICENSE +0 -21
- package/dist/index.d.mts +0 -212
- package/dist/index.mjs +0 -2279
package/README.md
CHANGED
|
@@ -1,6 +1,15 @@
|
|
|
1
|
-
#
|
|
1
|
+
# aimodels
|
|
2
2
|
|
|
3
|
-
A collection of AI model specifications across different providers. This package provides normalized data about AI models, including their capabilities, context windows, and pricing information.
|
|
3
|
+
A collection of AI model specifications across different providers. This universal JavaScript package provides normalized data about AI models, including their capabilities, context windows, and pricing information. Works in any JavaScript runtime (Node.js, browsers, Deno).
|
|
4
|
+
|
|
5
|
+
## Use cases
|
|
6
|
+
|
|
7
|
+
aimodels is useful when you need to programmatically access info about AI models and their capabilities. It's ideal for frameworks or applications that interact with different models, need to run inference across various providers or show info about models in the UI.
|
|
8
|
+
|
|
9
|
+
aimodels powers:
|
|
10
|
+
- [aimodels.dev](https://aimodels.dev) - a website about AI models
|
|
11
|
+
- [aiwrapper](https://github.com/mitkury/aiwrapper) - an AI wrapper for running AI models ([aiwrapper.dev](https://aiwrapper.dev))
|
|
12
|
+
- [Supa](https://github.com/supaorg/supa) - an open alternative to ChatGPT ([supa.cloud](https://supa.cloud))
|
|
4
13
|
|
|
5
14
|
## Installation
|
|
6
15
|
|
|
@@ -13,48 +22,120 @@ npm install aimodels
|
|
|
13
22
|
```typescript
|
|
14
23
|
import { models } from 'aimodels';
|
|
15
24
|
|
|
16
|
-
//
|
|
25
|
+
// 1. Get all models that support chat functionality
|
|
17
26
|
const chatModels = models.canChat();
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
//
|
|
22
|
-
const
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
//
|
|
27
|
-
const speechModels = models.canHear().canSpeak();
|
|
28
|
-
|
|
29
|
-
// Text processing
|
|
30
|
-
const textProcessors = models.canRead().canWrite();
|
|
31
|
-
|
|
32
|
-
// Available fluent API methods:
|
|
33
|
-
// - canChat() - models with chat capability
|
|
34
|
-
// - canReason() - models with reasoning capability
|
|
35
|
-
// - canRead() - models that can process text input
|
|
36
|
-
// - canWrite() - models that can output text
|
|
37
|
-
// - canSee() - models that understand images
|
|
38
|
-
// - canGenerateImages() - models that can create images
|
|
39
|
-
// - canHear() - models that understand audio
|
|
40
|
-
// - canSpeak() - models that can generate speech
|
|
41
|
-
// - canOutputJSON() - models that provide structured JSON output
|
|
42
|
-
// - canCallFunctions() - models with function calling capability
|
|
43
|
-
// - canGenerateEmbeddings() - models that output vector embeddings
|
|
44
|
-
|
|
45
|
-
// Find models by provider
|
|
46
|
-
const openaiModels = models.fromProvider('openai');
|
|
47
|
-
|
|
48
|
-
// Find models by creator
|
|
49
|
-
const metaModels = models.fromCreator('meta');
|
|
50
|
-
|
|
51
|
-
// Find models by context window
|
|
52
|
-
const largeContextModels = models.withMinContext(32768);
|
|
53
|
-
|
|
54
|
-
// Find specific model
|
|
27
|
+
console.log(`Available chat models: ${chatModels.length}`);
|
|
28
|
+
// Example output: "Available chat models: 99"
|
|
29
|
+
|
|
30
|
+
// 2. Find all chat models with vision capabilities from OpenAI
|
|
31
|
+
const visionModelsFromOpenAI = models.canChat().canSee().fromProvider('openai');
|
|
32
|
+
console.log(visionModelsFromOpenAI.map(model => model.name));
|
|
33
|
+
// Example output: ["GPT-4o", "GPT-4 Vision", ...]
|
|
34
|
+
|
|
35
|
+
// 3. Check if a specific model can process images
|
|
55
36
|
const model = models.id('gpt-4o');
|
|
56
|
-
|
|
57
|
-
console.log(model
|
|
37
|
+
if (model?.canSee()) {
|
|
38
|
+
console.log(`${model.name} can process images`);
|
|
39
|
+
// Enable image upload in your UI
|
|
40
|
+
const allowAttachingImgs = true;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// You can also check multiple capabilities with a single method
|
|
44
|
+
if (model?.can('img-in', 'chat')) {
|
|
45
|
+
console.log(`${model.name} can both chat and understand images`);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// And use capability checks to make UI decisions
|
|
49
|
+
function renderModelControls(model) {
|
|
50
|
+
return {
|
|
51
|
+
showImageUpload: model.canSee(),
|
|
52
|
+
showAudioRecorder: model.canHear(),
|
|
53
|
+
showFunctionEditor: model.canCallFunctions(),
|
|
54
|
+
showResponseFormatting: model.canOutputJSON(),
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// 4. Make decisions based on context window size
|
|
59
|
+
function selectModelBasedOnInputLength(inputTokens) {
|
|
60
|
+
// Find models that can handle your content's size
|
|
61
|
+
const suitableModels = models.canChat().filter(model =>
|
|
62
|
+
(model.context.total || 0) >= inputTokens
|
|
63
|
+
);
|
|
64
|
+
|
|
65
|
+
// Sort by context window size (smallest suitable model first)
|
|
66
|
+
return suitableModels.sort((a, b) =>
|
|
67
|
+
(a.context.total || 0) - (b.context.total || 0)
|
|
68
|
+
)[0];
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
const contentLength = 10000; // tokens
|
|
72
|
+
const recommendedModel = selectModelBasedOnInputLength(contentLength);
|
|
73
|
+
console.log(`Recommended model: ${recommendedModel?.name}`);
|
|
74
|
+
|
|
75
|
+
// 5. Utility function to trim chat messages to fit a model's context window
|
|
76
|
+
function trimChatHistory(messages, model, reserveTokens = 500) {
|
|
77
|
+
// Only proceed if we have a valid model with a context window
|
|
78
|
+
if (!model || !model.context?.total) {
|
|
79
|
+
console.warn('Invalid model or missing context window information');
|
|
80
|
+
return messages;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
const contextWindow = model.context.total;
|
|
84
|
+
let totalTokens = 0;
|
|
85
|
+
const availableTokens = contextWindow - reserveTokens;
|
|
86
|
+
const trimmedMessages = [];
|
|
87
|
+
|
|
88
|
+
// This is a simplified token counting approach
|
|
89
|
+
// In production, you may use a proper tokenizer for your model
|
|
90
|
+
for (const msg of messages.reverse()) {
|
|
91
|
+
// If the model can't process images, remove any image attachments
|
|
92
|
+
if (!model.canSee() && msg.attachments?.some(a => a.type === 'image')) {
|
|
93
|
+
msg.attachments = msg.attachments.filter(a => a.type !== 'image');
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
const estimatedTokens = JSON.stringify(msg).length / 4;
|
|
97
|
+
if (totalTokens + estimatedTokens <= availableTokens) {
|
|
98
|
+
trimmedMessages.unshift(msg);
|
|
99
|
+
totalTokens += estimatedTokens;
|
|
100
|
+
} else {
|
|
101
|
+
break;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return trimmedMessages;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Example usage
|
|
109
|
+
const chatHistory = [/* array of message objects */];
|
|
110
|
+
const gpt4 = models.id('gpt-4');
|
|
111
|
+
const fittedMessages = trimChatHistory(chatHistory, gpt4);
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### Available API Methods
|
|
115
|
+
|
|
116
|
+
```typescript
|
|
117
|
+
// Capability methods
|
|
118
|
+
models.canChat() // Models with chat capability
|
|
119
|
+
models.canReason() // Models with reasoning capability
|
|
120
|
+
models.canRead() // Models that can process text input
|
|
121
|
+
models.canWrite() // Models that can output text
|
|
122
|
+
models.canSee() // Models that understand images
|
|
123
|
+
models.canGenerateImages() // Models that can create images
|
|
124
|
+
models.canHear() // Models that understand audio
|
|
125
|
+
models.canSpeak() // Models that can generate speech
|
|
126
|
+
models.canOutputJSON() // Models that provide structured JSON output
|
|
127
|
+
models.canCallFunctions() // Models with function calling capability
|
|
128
|
+
models.canGenerateEmbeddings() // Models that output vector embeddings
|
|
129
|
+
|
|
130
|
+
// Provider and creator methods
|
|
131
|
+
models.fromProvider('openai') // Find models by provider
|
|
132
|
+
models.fromCreator('meta') // Find models by creator
|
|
133
|
+
|
|
134
|
+
// Context window methods
|
|
135
|
+
models.withMinContext(32768) // Find models with at least this context size
|
|
136
|
+
|
|
137
|
+
// Model lookup
|
|
138
|
+
models.id('gpt-4o') // Find a specific model by ID
|
|
58
139
|
```
|
|
59
140
|
|
|
60
141
|
## Features
|
|
@@ -69,31 +150,17 @@ console.log(model?.providers); // ['openai']
|
|
|
69
150
|
- Universal JavaScript support (Node.js, browsers, Deno)
|
|
70
151
|
- Regular updates with new models
|
|
71
152
|
|
|
72
|
-
##
|
|
153
|
+
## License
|
|
73
154
|
|
|
74
|
-
|
|
75
|
-
```typescript
|
|
76
|
-
interface Model {
|
|
77
|
-
/** Unique identifier */
|
|
78
|
-
id: string;
|
|
79
|
-
/** Display name */
|
|
80
|
-
name: string;
|
|
81
|
-
/** Model capabilities */
|
|
82
|
-
can: Capability[];
|
|
83
|
-
/** Available providers */
|
|
84
|
-
providers: string[];
|
|
85
|
-
/** Context window information */
|
|
86
|
-
context: ModelContext;
|
|
87
|
-
/** License or creator */
|
|
88
|
-
license: string;
|
|
89
|
-
}
|
|
90
|
-
```
|
|
155
|
+
MIT
|
|
91
156
|
|
|
92
|
-
|
|
93
|
-
- [Model Capabilities](/docs/model-capabilities.md)
|
|
94
|
-
- [Model Structure](/docs/model-structure.md)
|
|
95
|
-
- [Providers](/docs/providers.md)
|
|
157
|
+
## Development
|
|
96
158
|
|
|
97
|
-
|
|
159
|
+
### Build Process
|
|
98
160
|
|
|
99
|
-
|
|
161
|
+
This project uses [tsup](https://github.com/egoist/tsup) (built on esbuild) for bundling. The build process is simple:
|
|
162
|
+
|
|
163
|
+
```bash
|
|
164
|
+
# Build the complete package and test
|
|
165
|
+
npm run build
|
|
166
|
+
```
|
package/dist/index.d.ts
CHANGED
|
@@ -1,46 +1,59 @@
|
|
|
1
|
+
interface TokenBasedPricePerMillionTokens {
|
|
2
|
+
/** Price per million input tokens */
|
|
3
|
+
input: number;
|
|
4
|
+
/** Price per million output tokens */
|
|
5
|
+
output: number;
|
|
6
|
+
/** Price type */
|
|
7
|
+
type: 'token';
|
|
8
|
+
}
|
|
9
|
+
interface ImagePrice {
|
|
10
|
+
/** Price per image */
|
|
11
|
+
price: number;
|
|
12
|
+
/** Image size */
|
|
13
|
+
size: string;
|
|
14
|
+
/** Price type */
|
|
15
|
+
type: 'image';
|
|
16
|
+
/** Price unit */
|
|
17
|
+
unit: 'per_image';
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/** Core organization data */
|
|
21
|
+
interface Organization {
|
|
22
|
+
/** Unique identifier (e.g., "openai", "meta", "anthropic") */
|
|
23
|
+
id: string;
|
|
24
|
+
/** Display name (e.g., "OpenAI", "Meta", "Anthropic") */
|
|
25
|
+
name: string;
|
|
26
|
+
/** Organization's main website URL */
|
|
27
|
+
websiteUrl: string;
|
|
28
|
+
/** Organization's country of origin */
|
|
29
|
+
country: string;
|
|
30
|
+
/** Year founded */
|
|
31
|
+
founded: number;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
interface ProviderSource {
|
|
35
|
+
id: string;
|
|
36
|
+
apiUrl: string;
|
|
37
|
+
apiDocsUrl: string;
|
|
38
|
+
pricing: Record<string, TokenBasedPricePerMillionTokens | ImagePrice>;
|
|
39
|
+
}
|
|
40
|
+
/** Provider-specific data */
|
|
41
|
+
interface Provider extends Organization {
|
|
42
|
+
/** Provider's API endpoint URL */
|
|
43
|
+
apiUrl: string;
|
|
44
|
+
/** Provider's API documentation URL */
|
|
45
|
+
apiDocsUrl: string;
|
|
46
|
+
/** Whether this is a local provider */
|
|
47
|
+
isLocal?: number;
|
|
48
|
+
/** Model pricing information */
|
|
49
|
+
pricing: Record<string, TokenBasedPricePerMillionTokens | ImagePrice>;
|
|
50
|
+
}
|
|
51
|
+
|
|
1
52
|
/**
|
|
2
53
|
* Defines all possible model capabilities
|
|
3
54
|
*/
|
|
4
55
|
type Capability = "chat" | "reason" | "txt-in" | "txt-out" | "img-in" | "img-out" | "audio-in" | "audio-out" | "json-out" | "fn-out" | "vec-out";
|
|
5
56
|
|
|
6
|
-
declare class ModelCollection extends Array<Model> {
|
|
7
|
-
/** Create a new ModelCollection from an array of models */
|
|
8
|
-
constructor(models?: Model[]);
|
|
9
|
-
/** Filter models by one or more capabilities (all must be present) */
|
|
10
|
-
can(...capabilities: Capability[]): ModelCollection;
|
|
11
|
-
/**
|
|
12
|
-
* Fluent capability filters for better readability
|
|
13
|
-
* Each method filters models by a specific capability
|
|
14
|
-
*/
|
|
15
|
-
canChat(): ModelCollection;
|
|
16
|
-
canReason(): ModelCollection;
|
|
17
|
-
canRead(): ModelCollection;
|
|
18
|
-
canWrite(): ModelCollection;
|
|
19
|
-
canSee(): ModelCollection;
|
|
20
|
-
canGenerateImages(): ModelCollection;
|
|
21
|
-
canHear(): ModelCollection;
|
|
22
|
-
canSpeak(): ModelCollection;
|
|
23
|
-
canOutputJSON(): ModelCollection;
|
|
24
|
-
canCallFunctions(): ModelCollection;
|
|
25
|
-
canGenerateEmbeddings(): ModelCollection;
|
|
26
|
-
/** Filter models by one or more languages (all must be supported) */
|
|
27
|
-
know(...languages: string[]): ModelCollection;
|
|
28
|
-
/** Override array filter to return ModelCollection */
|
|
29
|
-
filter(predicate: (value: Model, index: number, array: Model[]) => boolean): ModelCollection;
|
|
30
|
-
/** Override array slice to return ModelCollection */
|
|
31
|
-
slice(start?: number, end?: number): ModelCollection;
|
|
32
|
-
/** Find a model by its ID or alias */
|
|
33
|
-
id(modelId: string): Model | undefined;
|
|
34
|
-
/** Get models available from a specific provider */
|
|
35
|
-
fromProvider(provider: string): ModelCollection;
|
|
36
|
-
/** Get models available from a specific creator */
|
|
37
|
-
fromCreator(creator: string): ModelCollection;
|
|
38
|
-
/** Filter models by minimum context window size */
|
|
39
|
-
withMinContext(tokens: number): ModelCollection;
|
|
40
|
-
/** Get all providers from all models in the collection deduplicated */
|
|
41
|
-
getProviders(): string[];
|
|
42
|
-
getCreators(): string[];
|
|
43
|
-
}
|
|
44
57
|
interface BaseContext {
|
|
45
58
|
/** The type discriminator */
|
|
46
59
|
type: string;
|
|
@@ -114,99 +127,177 @@ interface EmbeddingContext extends BaseContext {
|
|
|
114
127
|
normalized?: boolean;
|
|
115
128
|
}
|
|
116
129
|
type ModelContext = TokenContext | CharacterContext | ImageContext | AudioInputContext | AudioOutputContext | EmbeddingContext;
|
|
117
|
-
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Represents the raw model data exactly as it appears in the source files.
|
|
133
|
+
* This is a direct representation of the JSON structure.
|
|
134
|
+
*/
|
|
135
|
+
interface ModelSource {
|
|
118
136
|
/** Unique identifier */
|
|
119
137
|
id: string;
|
|
120
138
|
/** Display name */
|
|
121
|
-
name
|
|
122
|
-
/** Creator of the model */
|
|
123
|
-
creator: string;
|
|
124
|
-
/** License type (e.g., "proprietary", "apache-2.0", "llama-2-community") */
|
|
125
|
-
license: string;
|
|
126
|
-
/** List of providers that can serve this model */
|
|
127
|
-
providers: string[];
|
|
139
|
+
name?: string;
|
|
128
140
|
/** Model capabilities */
|
|
129
|
-
|
|
141
|
+
capabilities?: Capability[];
|
|
142
|
+
/** Available providers */
|
|
143
|
+
providerIds?: string[];
|
|
144
|
+
/** Context window information */
|
|
145
|
+
context?: ModelContext;
|
|
146
|
+
/** Organization that created this model */
|
|
147
|
+
creatorId?: string;
|
|
148
|
+
/** License type (e.g., "proprietary", "apache-2.0", "llama-2-community") */
|
|
149
|
+
license?: string;
|
|
130
150
|
/** Languages the model knows */
|
|
131
151
|
languages?: string[];
|
|
132
152
|
/** Alternative identifiers for this model */
|
|
133
153
|
aliases?: string[];
|
|
134
|
-
/** Context window information */
|
|
135
|
-
context: ModelContext;
|
|
136
154
|
/** Base model ID this model extends */
|
|
137
155
|
extends?: string;
|
|
138
156
|
/** Properties that override the base model */
|
|
139
|
-
overrides?: Partial<Omit<
|
|
157
|
+
overrides?: Partial<Omit<ModelSource, 'id' | 'extends' | 'overrides'>>;
|
|
140
158
|
}
|
|
141
159
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
160
|
+
/**
|
|
161
|
+
* Enhanced Model class that provides functionality on top of raw model data.
|
|
162
|
+
* Handles inheritance resolution and provides access to related objects.
|
|
163
|
+
*/
|
|
164
|
+
declare class Model {
|
|
165
|
+
private source;
|
|
166
|
+
constructor(source: ModelSource);
|
|
167
|
+
get id(): string;
|
|
168
|
+
get extends(): string | undefined;
|
|
169
|
+
get overrides(): Partial<ModelSource> | undefined;
|
|
170
|
+
private resolveProperty;
|
|
171
|
+
get name(): string;
|
|
172
|
+
get capabilities(): Capability[];
|
|
173
|
+
get context(): ModelContext | undefined;
|
|
174
|
+
get license(): string | undefined;
|
|
175
|
+
get languages(): string[] | undefined;
|
|
176
|
+
get aliases(): string[] | undefined;
|
|
177
|
+
get providerIds(): string[];
|
|
178
|
+
get providers(): Provider[];
|
|
179
|
+
get creatorId(): string | undefined;
|
|
180
|
+
get creator(): Organization | undefined;
|
|
181
|
+
/**
|
|
182
|
+
* Check if model has all specified capabilities
|
|
183
|
+
* Uses same API pattern as ModelCollection.can() but returns boolean
|
|
184
|
+
*/
|
|
185
|
+
can(...capabilities: Capability[]): boolean;
|
|
186
|
+
canChat(): boolean;
|
|
187
|
+
canReason(): boolean;
|
|
188
|
+
canRead(): boolean;
|
|
189
|
+
canWrite(): boolean;
|
|
190
|
+
canSee(): boolean;
|
|
191
|
+
canGenerateImages(): boolean;
|
|
192
|
+
canHear(): boolean;
|
|
193
|
+
canSpeak(): boolean;
|
|
194
|
+
canOutputJSON(): boolean;
|
|
195
|
+
canCallFunctions(): boolean;
|
|
196
|
+
canGenerateEmbeddings(): boolean;
|
|
159
197
|
}
|
|
160
198
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
199
|
+
declare class ModelCollection extends Array<Model> {
|
|
200
|
+
static providersData: Record<string, ProviderSource>;
|
|
201
|
+
static orgsData: Record<string, Organization>;
|
|
202
|
+
static modelSources: Record<string, ModelSource>;
|
|
203
|
+
/** Create a new ModelCollection from an array of models */
|
|
204
|
+
constructor(models?: Model[]);
|
|
205
|
+
/** Set the shared providers data */
|
|
206
|
+
static setProviders(providers: Record<string, Provider>): void;
|
|
207
|
+
/** Set the shared creators data */
|
|
208
|
+
static setOrgs(orgs: Record<string, Organization>): void;
|
|
209
|
+
/** Filter models by one or more capabilities (all must be present) */
|
|
210
|
+
can(...capabilities: Capability[]): ModelCollection;
|
|
211
|
+
/**
|
|
212
|
+
* Fluent capability filters for better readability
|
|
213
|
+
* Each method filters models by a specific capability
|
|
214
|
+
*/
|
|
215
|
+
canChat(): ModelCollection;
|
|
216
|
+
canReason(): ModelCollection;
|
|
217
|
+
canRead(): ModelCollection;
|
|
218
|
+
canWrite(): ModelCollection;
|
|
219
|
+
canSee(): ModelCollection;
|
|
220
|
+
canGenerateImages(): ModelCollection;
|
|
221
|
+
canHear(): ModelCollection;
|
|
222
|
+
canSpeak(): ModelCollection;
|
|
223
|
+
canOutputJSON(): ModelCollection;
|
|
224
|
+
canCallFunctions(): ModelCollection;
|
|
225
|
+
canGenerateEmbeddings(): ModelCollection;
|
|
226
|
+
/** Filter models by one or more languages (all must be supported) */
|
|
227
|
+
know(...languages: string[]): ModelCollection;
|
|
228
|
+
/** Override array filter to return ModelCollection */
|
|
229
|
+
filter(predicate: (value: Model, index: number, array: Model[]) => boolean): ModelCollection;
|
|
230
|
+
/** Override array slice to return ModelCollection */
|
|
231
|
+
slice(start?: number, end?: number): ModelCollection;
|
|
232
|
+
/** Find a model by its ID or alias */
|
|
233
|
+
id(modelId: string): Model | undefined;
|
|
234
|
+
/** Get models available from a specific provider */
|
|
235
|
+
fromProvider(provider: string): ModelCollection;
|
|
236
|
+
/** Get models available from a specific creator */
|
|
237
|
+
fromCreator(creator: string): ModelCollection;
|
|
238
|
+
/** Filter models by minimum context window size */
|
|
239
|
+
withMinContext(tokens: number): ModelCollection;
|
|
240
|
+
/** Get all providers from all models in the collection deduplicated */
|
|
241
|
+
get providers(): Provider[];
|
|
242
|
+
/** Get all orgs from all models in the collection deduplicated */
|
|
243
|
+
get orgs(): Organization[];
|
|
244
|
+
/** Get a specific provider by ID */
|
|
245
|
+
getProvider(id: string): Provider | undefined;
|
|
246
|
+
/** Get a specific creator by ID */
|
|
247
|
+
getCreator(id: string): Organization | undefined;
|
|
248
|
+
/** Get providers for a specific model */
|
|
249
|
+
getProvidersForModel(modelId: string): Provider[];
|
|
250
|
+
/** Get creator for a specific model */
|
|
251
|
+
getCreatorForModel(modelId: string): Organization | undefined;
|
|
176
252
|
}
|
|
177
253
|
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
var creators$1 = {
|
|
197
|
-
creators: creators
|
|
198
|
-
};
|
|
199
|
-
|
|
254
|
+
/**
|
|
255
|
+
* AIModels is a collection of AI models with associated metadata.
|
|
256
|
+
*
|
|
257
|
+
* This class follows the singleton pattern with a private constructor and a static getter.
|
|
258
|
+
* IMPORTANT: Do not instantiate this class directly. Instead, import the pre-configured
|
|
259
|
+
* singleton instance from the package:
|
|
260
|
+
*
|
|
261
|
+
* ```typescript
|
|
262
|
+
* import { models } from 'aimodels';
|
|
263
|
+
* ```
|
|
264
|
+
*
|
|
265
|
+
* The singleton instance contains all the model data and is the recommended way to use this package.
|
|
266
|
+
* If you need to access the class for type references, you can also import it:
|
|
267
|
+
*
|
|
268
|
+
* ```typescript
|
|
269
|
+
* import { AIModels, models } from 'aimodels';
|
|
270
|
+
* ```
|
|
271
|
+
*/
|
|
200
272
|
declare class AIModels extends ModelCollection {
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
273
|
+
private static _instance;
|
|
274
|
+
/**
|
|
275
|
+
* @private
|
|
276
|
+
* Private constructor used only by the static instance getter.
|
|
277
|
+
* Users should import the pre-configured instance from the package.
|
|
278
|
+
*/
|
|
279
|
+
private constructor();
|
|
280
|
+
/**
|
|
281
|
+
* Add data to the static data containers
|
|
282
|
+
* @param data Object containing model sources, providers, and organizations to add
|
|
283
|
+
*/
|
|
284
|
+
static addStaticData({ models, providers, orgs }: {
|
|
285
|
+
models?: Record<string, ModelSource>;
|
|
286
|
+
providers?: Record<string, ProviderSource>;
|
|
287
|
+
orgs?: Record<string, Organization>;
|
|
288
|
+
}): void;
|
|
289
|
+
static get instance(): AIModels;
|
|
290
|
+
/**
|
|
291
|
+
* Override to return all providers directly without filtering through models.
|
|
292
|
+
* We want to return all known providers here.
|
|
293
|
+
*/
|
|
294
|
+
get providers(): Provider[];
|
|
295
|
+
/**
|
|
296
|
+
* Override to return all creators directly without filtering through models.
|
|
297
|
+
* We want to return all known creators here.
|
|
298
|
+
*/
|
|
299
|
+
get orgs(): Organization[];
|
|
209
300
|
}
|
|
210
301
|
declare const models: AIModels;
|
|
211
302
|
|
|
212
|
-
export { AIModels, type Capability, type Model, ModelCollection, type ModelContext, type
|
|
303
|
+
export { AIModels, type AudioInputContext, type AudioOutputContext, type BaseContext, type Capability, type CharacterContext, type EmbeddingContext, type ImageContext, Model, ModelCollection, type ModelContext, type ModelSource, type Organization, type Provider, type ProviderSource, type TokenContext, models };
|