aimodels 0.5.1 → 0.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -55
- package/dist/index.d.ts +1 -1
- package/dist/index.js +104 -0
- package/package.json +4 -4
package/README.md
CHANGED
|
@@ -9,7 +9,7 @@ aimodels is useful when you need to programmatically access info about AI models
|
|
|
9
9
|
aimodels powers:
|
|
10
10
|
- [aimodels.dev](https://aimodels.dev) - a website about AI models
|
|
11
11
|
- [aiwrapper](https://github.com/mitkury/aiwrapper) - an AI wrapper for running AI models
|
|
12
|
-
- [
|
|
12
|
+
- [Sila](https://github.com/silaorg/sila) - an open alternative to ChatGPT
|
|
13
13
|
|
|
14
14
|
## Installation
|
|
15
15
|
|
|
@@ -55,60 +55,6 @@ function renderModelControls(model) {
|
|
|
55
55
|
};
|
|
56
56
|
}
|
|
57
57
|
|
|
58
|
-
// 4. Make decisions based on context window size
|
|
59
|
-
function selectModelBasedOnInputLength(inputTokens) {
|
|
60
|
-
// Find models that can handle your content's size
|
|
61
|
-
const suitableModels = models.canChat().filter(model =>
|
|
62
|
-
(model.context.total || 0) >= inputTokens
|
|
63
|
-
);
|
|
64
|
-
|
|
65
|
-
// Sort by context window size (smallest suitable model first)
|
|
66
|
-
return suitableModels.sort((a, b) =>
|
|
67
|
-
(a.context.total || 0) - (b.context.total || 0)
|
|
68
|
-
)[0];
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
const contentLength = 10000; // tokens
|
|
72
|
-
const recommendedModel = selectModelBasedOnInputLength(contentLength);
|
|
73
|
-
console.log(`Recommended model: ${recommendedModel?.name}`);
|
|
74
|
-
|
|
75
|
-
// 5. Utility function to trim chat messages to fit a model's context window
|
|
76
|
-
function trimChatHistory(messages, model, reserveTokens = 500) {
|
|
77
|
-
// Only proceed if we have a valid model with a context window
|
|
78
|
-
if (!model || !model.context?.total) {
|
|
79
|
-
console.warn('Invalid model or missing context window information');
|
|
80
|
-
return messages;
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
const contextWindow = model.context.total;
|
|
84
|
-
let totalTokens = 0;
|
|
85
|
-
const availableTokens = contextWindow - reserveTokens;
|
|
86
|
-
const trimmedMessages = [];
|
|
87
|
-
|
|
88
|
-
// This is a simplified token counting approach
|
|
89
|
-
// In production, you may use a proper tokenizer for your model
|
|
90
|
-
for (const msg of messages.reverse()) {
|
|
91
|
-
// If the model can't process images, remove any image attachments
|
|
92
|
-
if (!model.canSee() && msg.attachments?.some(a => a.type === 'image')) {
|
|
93
|
-
msg.attachments = msg.attachments.filter(a => a.type !== 'image');
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
const estimatedTokens = JSON.stringify(msg).length / 4;
|
|
97
|
-
if (totalTokens + estimatedTokens <= availableTokens) {
|
|
98
|
-
trimmedMessages.unshift(msg);
|
|
99
|
-
totalTokens += estimatedTokens;
|
|
100
|
-
} else {
|
|
101
|
-
break;
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
return trimmedMessages;
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
// Example usage
|
|
109
|
-
const chatHistory = [/* array of message objects */];
|
|
110
|
-
const gpt5 = models.id('gpt-5.1');
|
|
111
|
-
const fittedMessages = trimChatHistory(chatHistory, gpt5);
|
|
112
58
|
```
|
|
113
59
|
|
|
114
60
|
### Available API Methods
|
package/dist/index.d.ts
CHANGED
|
@@ -91,7 +91,7 @@ interface TokenContext extends BaseContext {
|
|
|
91
91
|
* This is a flexible object that can contain any properties or nested objects
|
|
92
92
|
* related to model-specific extensions (e.g., reasoning, experimental features).
|
|
93
93
|
*/
|
|
94
|
-
extended?: Record<string,
|
|
94
|
+
extended?: Record<string, unknown>;
|
|
95
95
|
}
|
|
96
96
|
interface CharacterContext extends BaseContext {
|
|
97
97
|
type: "character";
|
package/dist/index.js
CHANGED
|
@@ -1,5 +1,97 @@
|
|
|
1
1
|
// src/data.js
|
|
2
2
|
var models = {
|
|
3
|
+
"claude-sonnet-4-5-20250929": {
|
|
4
|
+
"id": "claude-sonnet-4-5-20250929",
|
|
5
|
+
"name": "Claude Sonnet 4.5",
|
|
6
|
+
"license": "proprietary",
|
|
7
|
+
"capabilities": [
|
|
8
|
+
"chat",
|
|
9
|
+
"txt-in",
|
|
10
|
+
"txt-out",
|
|
11
|
+
"img-in",
|
|
12
|
+
"fn-out",
|
|
13
|
+
"reason"
|
|
14
|
+
],
|
|
15
|
+
"context": {
|
|
16
|
+
"type": "token",
|
|
17
|
+
"total": 2e5,
|
|
18
|
+
"maxOutput": 64e3,
|
|
19
|
+
"outputIsFixed": 1
|
|
20
|
+
},
|
|
21
|
+
"aliases": [
|
|
22
|
+
"claude-sonnet-4-5"
|
|
23
|
+
],
|
|
24
|
+
"creatorId": "anthropic"
|
|
25
|
+
},
|
|
26
|
+
"claude-haiku-4-5-20251001": {
|
|
27
|
+
"id": "claude-haiku-4-5-20251001",
|
|
28
|
+
"name": "Claude Haiku 4.5",
|
|
29
|
+
"license": "proprietary",
|
|
30
|
+
"capabilities": [
|
|
31
|
+
"chat",
|
|
32
|
+
"txt-in",
|
|
33
|
+
"txt-out",
|
|
34
|
+
"img-in",
|
|
35
|
+
"fn-out",
|
|
36
|
+
"reason"
|
|
37
|
+
],
|
|
38
|
+
"context": {
|
|
39
|
+
"type": "token",
|
|
40
|
+
"total": 2e5,
|
|
41
|
+
"maxOutput": 64e3,
|
|
42
|
+
"outputIsFixed": 1
|
|
43
|
+
},
|
|
44
|
+
"aliases": [
|
|
45
|
+
"claude-haiku-4-5"
|
|
46
|
+
],
|
|
47
|
+
"creatorId": "anthropic"
|
|
48
|
+
},
|
|
49
|
+
"claude-opus-4-5-20251101": {
|
|
50
|
+
"id": "claude-opus-4-5-20251101",
|
|
51
|
+
"name": "Claude Opus 4.5",
|
|
52
|
+
"license": "proprietary",
|
|
53
|
+
"capabilities": [
|
|
54
|
+
"chat",
|
|
55
|
+
"txt-in",
|
|
56
|
+
"txt-out",
|
|
57
|
+
"img-in",
|
|
58
|
+
"fn-out",
|
|
59
|
+
"reason"
|
|
60
|
+
],
|
|
61
|
+
"context": {
|
|
62
|
+
"type": "token",
|
|
63
|
+
"total": 2e5,
|
|
64
|
+
"maxOutput": 64e3,
|
|
65
|
+
"outputIsFixed": 1
|
|
66
|
+
},
|
|
67
|
+
"aliases": [
|
|
68
|
+
"claude-opus-4-5"
|
|
69
|
+
],
|
|
70
|
+
"creatorId": "anthropic"
|
|
71
|
+
},
|
|
72
|
+
"claude-opus-4-1-20250805": {
|
|
73
|
+
"id": "claude-opus-4-1-20250805",
|
|
74
|
+
"name": "Claude Opus 4.1",
|
|
75
|
+
"license": "proprietary",
|
|
76
|
+
"capabilities": [
|
|
77
|
+
"chat",
|
|
78
|
+
"txt-in",
|
|
79
|
+
"txt-out",
|
|
80
|
+
"img-in",
|
|
81
|
+
"fn-out",
|
|
82
|
+
"reason"
|
|
83
|
+
],
|
|
84
|
+
"context": {
|
|
85
|
+
"type": "token",
|
|
86
|
+
"total": 2e5,
|
|
87
|
+
"maxOutput": 32e3,
|
|
88
|
+
"outputIsFixed": 1
|
|
89
|
+
},
|
|
90
|
+
"aliases": [
|
|
91
|
+
"claude-opus-4-1"
|
|
92
|
+
],
|
|
93
|
+
"creatorId": "anthropic"
|
|
94
|
+
},
|
|
3
95
|
"claude-opus-4-20250514": {
|
|
4
96
|
"id": "claude-opus-4-20250514",
|
|
5
97
|
"name": "Claude Opus 4",
|
|
@@ -1212,6 +1304,18 @@ var models = {
|
|
|
1212
1304
|
],
|
|
1213
1305
|
"creatorId": "openai"
|
|
1214
1306
|
},
|
|
1307
|
+
"gpt-5.2": {
|
|
1308
|
+
"id": "gpt-5.2",
|
|
1309
|
+
"extends": "gpt-5.1",
|
|
1310
|
+
"overrides": {
|
|
1311
|
+
"name": "GPT-5.2",
|
|
1312
|
+
"aliases": [
|
|
1313
|
+
"gpt-5.2-latest",
|
|
1314
|
+
"gpt-5.2-chat-latest"
|
|
1315
|
+
]
|
|
1316
|
+
},
|
|
1317
|
+
"creatorId": "openai"
|
|
1318
|
+
},
|
|
1215
1319
|
"gpt-4o": {
|
|
1216
1320
|
"id": "gpt-4o",
|
|
1217
1321
|
"name": "GPT-4o",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "aimodels",
|
|
3
|
-
"version": "0.5.
|
|
3
|
+
"version": "0.5.2",
|
|
4
4
|
"description": "A collection of AI model specifications across different providers",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -23,18 +23,18 @@
|
|
|
23
23
|
"validate:data": "tsx src/schemas/validate-data.ts",
|
|
24
24
|
"prebuild": "npm run validate:data && npm run clean && npm run gen-data-js-file",
|
|
25
25
|
"build:ts": "tsup",
|
|
26
|
-
"build": "npm run build:ts
|
|
26
|
+
"build": "npm run build:ts",
|
|
27
27
|
"test": "vitest run",
|
|
28
28
|
"test:watch": "vitest watch",
|
|
29
29
|
"test:coverage": "vitest run --coverage",
|
|
30
30
|
"typecheck": "tsc --noEmit",
|
|
31
31
|
"lint": "eslint src --ext .ts",
|
|
32
32
|
"clean": "rm -rf dist",
|
|
33
|
-
"prepare": "
|
|
33
|
+
"prepare": "npm run build",
|
|
34
34
|
"preversion": "npm run typecheck && npm run lint && npm test",
|
|
35
35
|
"version": "git add -A",
|
|
36
36
|
"postversion": "git push && git push --tags",
|
|
37
|
-
"prepublishOnly": "
|
|
37
|
+
"prepublishOnly": "npm run typecheck && npm run lint && npm run build",
|
|
38
38
|
"postpublish": "npm run clean",
|
|
39
39
|
"rules": "airul generate",
|
|
40
40
|
"rules:comment": "# Generate AI rules from documentation",
|