@funkai/models 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.generated/entries.json +23 -0
- package/.generated/req.txt +1 -0
- package/.turbo/turbo-build.log +145 -0
- package/.turbo/turbo-typecheck.log +4 -0
- package/CHANGELOG.md +23 -0
- package/README.md +95 -0
- package/dist/alibaba-B6q4Ng1R.mjs +957 -0
- package/dist/alibaba-B6q4Ng1R.mjs.map +1 -0
- package/dist/amazon-bedrock-Cv9AHQBH.mjs +2070 -0
- package/dist/amazon-bedrock-Cv9AHQBH.mjs.map +1 -0
- package/dist/anthropic-yB7ST97_.mjs +651 -0
- package/dist/anthropic-yB7ST97_.mjs.map +1 -0
- package/dist/cerebras-COfl7XM-.mjs +95 -0
- package/dist/cerebras-COfl7XM-.mjs.map +1 -0
- package/dist/cohere-B7TgO0hT.mjs +271 -0
- package/dist/cohere-B7TgO0hT.mjs.map +1 -0
- package/dist/deepinfra-B0GxUwCG.mjs +636 -0
- package/dist/deepinfra-B0GxUwCG.mjs.map +1 -0
- package/dist/deepseek-D64ZEsvS.mjs +50 -0
- package/dist/deepseek-D64ZEsvS.mjs.map +1 -0
- package/dist/fireworks-ai-DJYvdAi_.mjs +304 -0
- package/dist/fireworks-ai-DJYvdAi_.mjs.map +1 -0
- package/dist/google-BypRl349.mjs +833 -0
- package/dist/google-BypRl349.mjs.map +1 -0
- package/dist/google-vertex-DbS-zTGD.mjs +730 -0
- package/dist/google-vertex-DbS-zTGD.mjs.map +1 -0
- package/dist/groq-ei_PerYi.mjs +381 -0
- package/dist/groq-ei_PerYi.mjs.map +1 -0
- package/dist/huggingface-DaM1EeLP.mjs +456 -0
- package/dist/huggingface-DaM1EeLP.mjs.map +1 -0
- package/dist/inception-CspEzqNV.mjs +101 -0
- package/dist/inception-CspEzqNV.mjs.map +1 -0
- package/dist/index.d.mts +30314 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +271 -0
- package/dist/index.mjs.map +1 -0
- package/dist/llama-Cf3-koap.mjs +161 -0
- package/dist/llama-Cf3-koap.mjs.map +1 -0
- package/dist/mistral-BI9MdAO4.mjs +579 -0
- package/dist/mistral-BI9MdAO4.mjs.map +1 -0
- package/dist/nvidia-COHacuoa.mjs +1625 -0
- package/dist/nvidia-COHacuoa.mjs.map +1 -0
- package/dist/openai-C0nCfZUq.mjs +1023 -0
- package/dist/openai-C0nCfZUq.mjs.map +1 -0
- package/dist/openrouter-DSFzxKQb.mjs +4608 -0
- package/dist/openrouter-DSFzxKQb.mjs.map +1 -0
- package/dist/perplexity-zeZ2WlBU.mjs +96 -0
- package/dist/perplexity-zeZ2WlBU.mjs.map +1 -0
- package/dist/providers/alibaba.d.mts +1795 -0
- package/dist/providers/alibaba.d.mts.map +1 -0
- package/dist/providers/alibaba.mjs +39 -0
- package/dist/providers/alibaba.mjs.map +1 -0
- package/dist/providers/amazon-bedrock.d.mts +3713 -0
- package/dist/providers/amazon-bedrock.d.mts.map +1 -0
- package/dist/providers/amazon-bedrock.mjs +39 -0
- package/dist/providers/amazon-bedrock.mjs.map +1 -0
- package/dist/providers/anthropic.d.mts +1109 -0
- package/dist/providers/anthropic.d.mts.map +1 -0
- package/dist/providers/anthropic.mjs +39 -0
- package/dist/providers/anthropic.mjs.map +1 -0
- package/dist/providers/cerebras.d.mts +219 -0
- package/dist/providers/cerebras.d.mts.map +1 -0
- package/dist/providers/cerebras.mjs +39 -0
- package/dist/providers/cerebras.mjs.map +1 -0
- package/dist/providers/cohere.d.mts +555 -0
- package/dist/providers/cohere.d.mts.map +1 -0
- package/dist/providers/cohere.mjs +39 -0
- package/dist/providers/cohere.mjs.map +1 -0
- package/dist/providers/deepinfra.d.mts +1245 -0
- package/dist/providers/deepinfra.d.mts.map +1 -0
- package/dist/providers/deepinfra.mjs +39 -0
- package/dist/providers/deepinfra.mjs.map +1 -0
- package/dist/providers/deepseek.d.mts +139 -0
- package/dist/providers/deepseek.d.mts.map +1 -0
- package/dist/providers/deepseek.mjs +39 -0
- package/dist/providers/deepseek.mjs.map +1 -0
- package/dist/providers/fireworks-ai.d.mts +611 -0
- package/dist/providers/fireworks-ai.d.mts.map +1 -0
- package/dist/providers/fireworks-ai.mjs +39 -0
- package/dist/providers/fireworks-ai.mjs.map +1 -0
- package/dist/providers/google-vertex.d.mts +1227 -0
- package/dist/providers/google-vertex.d.mts.map +1 -0
- package/dist/providers/google-vertex.mjs +39 -0
- package/dist/providers/google-vertex.mjs.map +1 -0
- package/dist/providers/google.d.mts +1359 -0
- package/dist/providers/google.d.mts.map +1 -0
- package/dist/providers/google.mjs +39 -0
- package/dist/providers/google.mjs.map +1 -0
- package/dist/providers/groq.d.mts +765 -0
- package/dist/providers/groq.d.mts.map +1 -0
- package/dist/providers/groq.mjs +39 -0
- package/dist/providers/groq.mjs.map +1 -0
- package/dist/providers/huggingface.d.mts +901 -0
- package/dist/providers/huggingface.d.mts.map +1 -0
- package/dist/providers/huggingface.mjs +39 -0
- package/dist/providers/huggingface.mjs.map +1 -0
- package/dist/providers/inception.d.mts +231 -0
- package/dist/providers/inception.d.mts.map +1 -0
- package/dist/providers/inception.mjs +39 -0
- package/dist/providers/inception.mjs.map +1 -0
- package/dist/providers/llama.d.mts +345 -0
- package/dist/providers/llama.d.mts.map +1 -0
- package/dist/providers/llama.mjs +39 -0
- package/dist/providers/llama.mjs.map +1 -0
- package/dist/providers/mistral.d.mts +1143 -0
- package/dist/providers/mistral.d.mts.map +1 -0
- package/dist/providers/mistral.mjs +39 -0
- package/dist/providers/mistral.mjs.map +1 -0
- package/dist/providers/nvidia.d.mts +3117 -0
- package/dist/providers/nvidia.d.mts.map +1 -0
- package/dist/providers/nvidia.mjs +39 -0
- package/dist/providers/nvidia.mjs.map +1 -0
- package/dist/providers/openai.d.mts +1963 -0
- package/dist/providers/openai.d.mts.map +1 -0
- package/dist/providers/openai.mjs +39 -0
- package/dist/providers/openai.mjs.map +1 -0
- package/dist/providers/openrouter.d.mts +8531 -0
- package/dist/providers/openrouter.d.mts.map +1 -0
- package/dist/providers/openrouter.mjs +39 -0
- package/dist/providers/openrouter.mjs.map +1 -0
- package/dist/providers/perplexity.d.mts +221 -0
- package/dist/providers/perplexity.d.mts.map +1 -0
- package/dist/providers/perplexity.mjs +39 -0
- package/dist/providers/perplexity.mjs.map +1 -0
- package/dist/providers/togetherai.d.mts +767 -0
- package/dist/providers/togetherai.d.mts.map +1 -0
- package/dist/providers/togetherai.mjs +39 -0
- package/dist/providers/togetherai.mjs.map +1 -0
- package/dist/providers/xai.d.mts +1161 -0
- package/dist/providers/xai.d.mts.map +1 -0
- package/dist/providers/xai.mjs +39 -0
- package/dist/providers/xai.mjs.map +1 -0
- package/dist/togetherai-BvcxUfPE.mjs +382 -0
- package/dist/togetherai-BvcxUfPE.mjs.map +1 -0
- package/dist/types-DjdaZckF.d.mts +71 -0
- package/dist/types-DjdaZckF.d.mts.map +1 -0
- package/dist/xai-fSuAkQJo.mjs +587 -0
- package/dist/xai-fSuAkQJo.mjs.map +1 -0
- package/docs/catalog/filtering.md +102 -0
- package/docs/catalog/overview.md +168 -0
- package/docs/catalog/providers.md +73 -0
- package/docs/cost/overview.md +125 -0
- package/docs/guides/filter-models.md +113 -0
- package/docs/guides/setup-resolver.md +106 -0
- package/docs/guides/track-costs.md +133 -0
- package/docs/overview.md +139 -0
- package/docs/provider/configuration.md +100 -0
- package/docs/provider/openrouter.md +105 -0
- package/docs/provider/overview.md +131 -0
- package/docs/troubleshooting.md +100 -0
- package/package.json +142 -0
- package/providers.json +39 -0
- package/scripts/generate-models.ts +392 -0
- package/src/catalog/index.test.ts +124 -0
- package/src/catalog/index.ts +65 -0
- package/src/catalog/providers/alibaba.ts +468 -0
- package/src/catalog/providers/amazon-bedrock.ts +941 -0
- package/src/catalog/providers/anthropic.ts +270 -0
- package/src/catalog/providers/cerebras.ts +61 -0
- package/src/catalog/providers/cohere.ts +149 -0
- package/src/catalog/providers/deepinfra.ts +325 -0
- package/src/catalog/providers/deepseek.ts +39 -0
- package/src/catalog/providers/fireworks-ai.ts +160 -0
- package/src/catalog/providers/google-vertex.ts +314 -0
- package/src/catalog/providers/google.ts +347 -0
- package/src/catalog/providers/groq.ts +204 -0
- package/src/catalog/providers/huggingface.ts +237 -0
- package/src/catalog/providers/inception.ts +61 -0
- package/src/catalog/providers/index.ts +59 -0
- package/src/catalog/providers/llama.ts +94 -0
- package/src/catalog/providers/mistral.ts +303 -0
- package/src/catalog/providers/nvidia.ts +820 -0
- package/src/catalog/providers/openai.ts +501 -0
- package/src/catalog/providers/openrouter.ts +2201 -0
- package/src/catalog/providers/perplexity.ts +61 -0
- package/src/catalog/providers/togetherai.ts +204 -0
- package/src/catalog/providers/xai.ts +292 -0
- package/src/catalog/types.ts +86 -0
- package/src/cost/calculate.test.ts +157 -0
- package/src/cost/calculate.ts +43 -0
- package/src/cost/index.ts +2 -0
- package/src/cost/types.ts +25 -0
- package/src/index.ts +25 -0
- package/src/provider/index.ts +9 -0
- package/src/provider/openrouter.test.ts +125 -0
- package/src/provider/openrouter.ts +110 -0
- package/src/provider/resolver.test.ts +138 -0
- package/src/provider/resolver.ts +125 -0
- package/src/provider/types.ts +39 -0
- package/src/providers/alibaba.ts +65 -0
- package/src/providers/amazon-bedrock.ts +67 -0
- package/src/providers/anthropic.ts +65 -0
- package/src/providers/cerebras.ts +65 -0
- package/src/providers/cohere.ts +65 -0
- package/src/providers/deepinfra.ts +65 -0
- package/src/providers/deepseek.ts +65 -0
- package/src/providers/fireworks-ai.ts +65 -0
- package/src/providers/google-vertex.ts +67 -0
- package/src/providers/google.ts +65 -0
- package/src/providers/groq.ts +65 -0
- package/src/providers/huggingface.ts +67 -0
- package/src/providers/inception.ts +65 -0
- package/src/providers/llama.ts +65 -0
- package/src/providers/mistral.ts +65 -0
- package/src/providers/nvidia.ts +65 -0
- package/src/providers/openai.ts +65 -0
- package/src/providers/openrouter.ts +67 -0
- package/src/providers/perplexity.ts +67 -0
- package/src/providers/togetherai.ts +65 -0
- package/src/providers/xai.ts +65 -0
- package/tsconfig.json +25 -0
- package/tsdown.config.ts +23 -0
- package/vitest.config.ts +29 -0
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# Filtering Models
|
|
2
|
+
|
|
3
|
+
Advanced patterns for filtering the model catalog using `models()` predicates.
|
|
4
|
+
|
|
5
|
+
## Key Concepts
|
|
6
|
+
|
|
7
|
+
### Predicate-Based Filtering
|
|
8
|
+
|
|
9
|
+
`models()` accepts an optional predicate function `(m: ModelDefinition) => boolean`. When provided, only models where the predicate returns `true` are included. When omitted, the full catalog is returned.
|
|
10
|
+
|
|
11
|
+
```ts
|
|
12
|
+
const filtered = models((m) => m.capabilities.reasoning);
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Usage
|
|
16
|
+
|
|
17
|
+
### Filter by Capability
|
|
18
|
+
|
|
19
|
+
```ts
|
|
20
|
+
const reasoning = models((m) => m.capabilities.reasoning);
|
|
21
|
+
const withTools = models((m) => m.capabilities.toolCall);
|
|
22
|
+
const structured = models((m) => m.capabilities.structuredOutput);
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
### Filter by Provider
|
|
26
|
+
|
|
27
|
+
```ts
|
|
28
|
+
const openai = models((m) => m.provider === "openai");
|
|
29
|
+
const anthropic = models((m) => m.provider === "anthropic");
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### Filter by Modality
|
|
33
|
+
|
|
34
|
+
```ts
|
|
35
|
+
const vision = models((m) => m.modalities.input.includes("image"));
|
|
36
|
+
const audio = models((m) => m.modalities.input.includes("audio"));
|
|
37
|
+
const multimodal = models((m) => m.modalities.input.length > 1);
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
### Filter by Context Window
|
|
41
|
+
|
|
42
|
+
```ts
|
|
43
|
+
const largeContext = models((m) => m.contextWindow >= 128_000);
|
|
44
|
+
const longOutput = models((m) => m.maxOutput >= 16_000);
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
### Filter by Pricing
|
|
48
|
+
|
|
49
|
+
```ts
|
|
50
|
+
const cheapInput = models((m) => m.pricing.input < 0.000001);
|
|
51
|
+
const withCache = models((m) => m.pricing.cacheRead != null);
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### Filter by Family
|
|
55
|
+
|
|
56
|
+
```ts
|
|
57
|
+
const gpt = models((m) => m.family === "gpt");
|
|
58
|
+
const claude = models((m) => m.family.startsWith("claude"));
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### Combine Multiple Conditions
|
|
62
|
+
|
|
63
|
+
```ts
|
|
64
|
+
const ideal = models(
|
|
65
|
+
(m) => m.capabilities.reasoning && m.capabilities.toolCall && m.contextWindow >= 128_000,
|
|
66
|
+
);
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Chain Filters with Array Methods
|
|
70
|
+
|
|
71
|
+
Since `models()` returns `readonly ModelDefinition[]`, standard array methods work:
|
|
72
|
+
|
|
73
|
+
```ts
|
|
74
|
+
const sorted = models((m) => m.capabilities.reasoning).toSorted(
|
|
75
|
+
(a, b) => a.pricing.input - b.pricing.input,
|
|
76
|
+
);
|
|
77
|
+
|
|
78
|
+
const cheapest = sorted[0];
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### Extract Unique Values
|
|
82
|
+
|
|
83
|
+
```ts
|
|
84
|
+
const providers = [...new Set(models().map((m) => m.provider))];
|
|
85
|
+
const families = [...new Set(models().map((m) => m.family))];
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
### Per-Provider Filtering
|
|
89
|
+
|
|
90
|
+
Use subpath exports for provider-scoped operations:
|
|
91
|
+
|
|
92
|
+
```ts
|
|
93
|
+
import { openAIModels } from "@funkai/models/openai";
|
|
94
|
+
|
|
95
|
+
const reasoningGpt = openAIModels.filter((m) => m.capabilities.reasoning);
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## References
|
|
99
|
+
|
|
100
|
+
- [Model Catalog](overview.md)
|
|
101
|
+
- [Providers](providers.md)
|
|
102
|
+
- [Filter Models Guide](../guides/filter-models.md)
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
# Model Catalog
|
|
2
|
+
|
|
3
|
+
The model catalog is an auto-generated, readonly collection of `ModelDefinition` objects sourced from [models.dev](https://models.dev). It provides lookup functions, type-safe IDs with autocomplete, and per-provider subpath exports.
|
|
4
|
+
|
|
5
|
+
## Architecture
|
|
6
|
+
|
|
7
|
+
```mermaid
|
|
8
|
+
%%{init: {
|
|
9
|
+
'theme': 'base',
|
|
10
|
+
'themeVariables': {
|
|
11
|
+
'primaryColor': '#313244',
|
|
12
|
+
'primaryTextColor': '#cdd6f4',
|
|
13
|
+
'primaryBorderColor': '#6c7086',
|
|
14
|
+
'lineColor': '#89b4fa',
|
|
15
|
+
'secondaryColor': '#45475a',
|
|
16
|
+
'tertiaryColor': '#1e1e2e',
|
|
17
|
+
'background': '#1e1e2e',
|
|
18
|
+
'mainBkg': '#313244',
|
|
19
|
+
'clusterBkg': '#1e1e2e',
|
|
20
|
+
'clusterBorder': '#45475a'
|
|
21
|
+
},
|
|
22
|
+
'flowchart': { 'curve': 'basis', 'padding': 15 }
|
|
23
|
+
}}%%
|
|
24
|
+
|
|
25
|
+
flowchart LR
|
|
26
|
+
source["models.dev API"]:::external
|
|
27
|
+
|
|
28
|
+
subgraph generation [" "]
|
|
29
|
+
script["generate:models script"]:::core
|
|
30
|
+
providers["Per-provider .ts files"]:::core
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
subgraph catalog [" "]
|
|
34
|
+
MODELS["MODELS constant"]:::core
|
|
35
|
+
modelFn["model(id)"]:::core
|
|
36
|
+
modelsFn["models(filter?)"]:::core
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
source --> script
|
|
40
|
+
script --> providers
|
|
41
|
+
providers --> MODELS
|
|
42
|
+
MODELS --> modelFn
|
|
43
|
+
MODELS --> modelsFn
|
|
44
|
+
|
|
45
|
+
classDef external fill:#313244,stroke:#f5c2e7,stroke-width:2px,color:#cdd6f4
|
|
46
|
+
classDef core fill:#313244,stroke:#89b4fa,stroke-width:2px,color:#cdd6f4
|
|
47
|
+
|
|
48
|
+
style generation fill:#181825,stroke:#fab387,stroke-width:2px
|
|
49
|
+
style catalog fill:#181825,stroke:#89b4fa,stroke-width:2px
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Key Concepts
|
|
53
|
+
|
|
54
|
+
### ModelDefinition
|
|
55
|
+
|
|
56
|
+
Each model has the following fields:
|
|
57
|
+
|
|
58
|
+
| Field | Type | Description |
|
|
59
|
+
| --------------- | ------------------- | ---------------------------------------------- |
|
|
60
|
+
| `id` | `string` | Provider-native identifier (e.g. `"gpt-4.1"`) |
|
|
61
|
+
| `name` | `string` | Human-readable display name |
|
|
62
|
+
| `provider` | `string` | Provider slug (e.g. `"openai"`) |
|
|
63
|
+
| `family` | `string` | Model family (e.g. `"gpt"`, `"claude-sonnet"`) |
|
|
64
|
+
| `pricing` | `ModelPricing` | Per-token pricing rates in USD |
|
|
65
|
+
| `contextWindow` | `number` | Maximum context window in tokens |
|
|
66
|
+
| `maxOutput` | `number` | Maximum output tokens |
|
|
67
|
+
| `modalities` | `ModelModalities` | Supported input/output modalities |
|
|
68
|
+
| `capabilities` | `ModelCapabilities` | Boolean capability flags |
|
|
69
|
+
|
|
70
|
+
### ModelPricing
|
|
71
|
+
|
|
72
|
+
| Field | Type | Description |
|
|
73
|
+
| ------------ | --------------------- | ----------------------------------- |
|
|
74
|
+
| `input` | `number` | Cost per input token |
|
|
75
|
+
| `output` | `number` | Cost per output token |
|
|
76
|
+
| `cacheRead` | `number \| undefined` | Cost per cached input token (read) |
|
|
77
|
+
| `cacheWrite` | `number \| undefined` | Cost per cached input token (write) |
|
|
78
|
+
|
|
79
|
+
### ModelCapabilities
|
|
80
|
+
|
|
81
|
+
| Field | Type | Description |
|
|
82
|
+
| ------------------ | --------- | -------------------------------- |
|
|
83
|
+
| `reasoning` | `boolean` | Supports chain-of-thought |
|
|
84
|
+
| `toolCall` | `boolean` | Supports tool (function) calling |
|
|
85
|
+
| `attachment` | `boolean` | Supports file/image attachments |
|
|
86
|
+
| `structuredOutput` | `boolean` | Supports structured JSON output |
|
|
87
|
+
|
|
88
|
+
### ModelModalities
|
|
89
|
+
|
|
90
|
+
| Field | Type | Description |
|
|
91
|
+
| -------- | ------------------- | ---------------------------------------------------- |
|
|
92
|
+
| `input` | `readonly string[]` | Accepted input modalities (e.g. `"text"`, `"image"`) |
|
|
93
|
+
| `output` | `readonly string[]` | Produced output modalities |
|
|
94
|
+
|
|
95
|
+
## Usage
|
|
96
|
+
|
|
97
|
+
### Look Up a Single Model
|
|
98
|
+
|
|
99
|
+
`model(id)` returns the matching `ModelDefinition` or `null`:
|
|
100
|
+
|
|
101
|
+
```ts
|
|
102
|
+
const m = model("openai/gpt-4.1");
|
|
103
|
+
if (m) {
|
|
104
|
+
console.log(m.name);
|
|
105
|
+
console.log(m.pricing.input);
|
|
106
|
+
console.log(m.capabilities.reasoning);
|
|
107
|
+
}
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
### Get All Models
|
|
111
|
+
|
|
112
|
+
`models()` returns the full catalog. Pass a predicate to filter:
|
|
113
|
+
|
|
114
|
+
```ts
|
|
115
|
+
const all = models();
|
|
116
|
+
const withTools = models((m) => m.capabilities.toolCall);
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Access the Raw Catalog
|
|
120
|
+
|
|
121
|
+
`MODELS` is the complete readonly array, useful when you need direct iteration:
|
|
122
|
+
|
|
123
|
+
```ts
|
|
124
|
+
const providers = new Set(MODELS.map((m) => m.provider));
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
### Type-Safe Model IDs
|
|
128
|
+
|
|
129
|
+
`ModelId` provides autocomplete for known model IDs while accepting arbitrary strings for new or custom models:
|
|
130
|
+
|
|
131
|
+
```ts
|
|
132
|
+
import type { ModelId } from "@funkai/models";
|
|
133
|
+
|
|
134
|
+
const id: ModelId = "openai/gpt-4.1";
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### Per-Provider Subpath Exports
|
|
138
|
+
|
|
139
|
+
Each provider has a dedicated subpath with a filtered model list, lookup function, and typed ID:
|
|
140
|
+
|
|
141
|
+
```ts
|
|
142
|
+
import { openAIModels, openAIModel } from "@funkai/models/openai";
|
|
143
|
+
|
|
144
|
+
const m = openAIModel("gpt-4o-2024-11-20");
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
## Updating the Catalog
|
|
148
|
+
|
|
149
|
+
Regenerate the catalog from models.dev:
|
|
150
|
+
|
|
151
|
+
```bash
|
|
152
|
+
pnpm --filter=@funkai/models generate:models
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
Force-regenerate (ignoring staleness cache):
|
|
156
|
+
|
|
157
|
+
```bash
|
|
158
|
+
pnpm --filter=@funkai/models generate:models --force
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
This requires `OPENROUTER_API_KEY` to be set in the environment.
|
|
162
|
+
|
|
163
|
+
## References
|
|
164
|
+
|
|
165
|
+
- [Filtering](filtering.md)
|
|
166
|
+
- [Providers](providers.md)
|
|
167
|
+
- [Provider Resolution](../provider/overview.md)
|
|
168
|
+
- [Cost Calculation](../cost/overview.md)
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# Supported Providers
|
|
2
|
+
|
|
3
|
+
The model catalog includes models from 21 providers. Each provider has a dedicated subpath export and a prefix used in model IDs.
|
|
4
|
+
|
|
5
|
+
## Provider List
|
|
6
|
+
|
|
7
|
+
| Provider | Prefix | Subpath Import |
|
|
8
|
+
| -------------- | ---------------- | ------------------------------- |
|
|
9
|
+
| OpenAI | `openai` | `@funkai/models/openai` |
|
|
10
|
+
| Anthropic | `anthropic` | `@funkai/models/anthropic` |
|
|
11
|
+
| Google | `google` | `@funkai/models/google` |
|
|
12
|
+
| Google Vertex | `google-vertex` | `@funkai/models/google-vertex` |
|
|
13
|
+
| Mistral | `mistral` | `@funkai/models/mistral` |
|
|
14
|
+
| Amazon Bedrock | `amazon-bedrock` | `@funkai/models/amazon-bedrock` |
|
|
15
|
+
| Groq | `groq` | `@funkai/models/groq` |
|
|
16
|
+
| DeepSeek | `deepseek` | `@funkai/models/deepseek` |
|
|
17
|
+
| xAI | `xai` | `@funkai/models/xai` |
|
|
18
|
+
| Cohere | `cohere` | `@funkai/models/cohere` |
|
|
19
|
+
| Fireworks AI | `fireworks-ai` | `@funkai/models/fireworks-ai` |
|
|
20
|
+
| Together AI | `togetherai` | `@funkai/models/togetherai` |
|
|
21
|
+
| DeepInfra | `deepinfra` | `@funkai/models/deepinfra` |
|
|
22
|
+
| Cerebras | `cerebras` | `@funkai/models/cerebras` |
|
|
23
|
+
| Perplexity | `perplexity` | `@funkai/models/perplexity` |
|
|
24
|
+
| OpenRouter | `openrouter` | `@funkai/models/openrouter` |
|
|
25
|
+
| Llama | `llama` | `@funkai/models/llama` |
|
|
26
|
+
| Alibaba | `alibaba` | `@funkai/models/alibaba` |
|
|
27
|
+
| NVIDIA | `nvidia` | `@funkai/models/nvidia` |
|
|
28
|
+
| Hugging Face | `huggingface` | `@funkai/models/huggingface` |
|
|
29
|
+
| Inception | `inception` | `@funkai/models/inception` |
|
|
30
|
+
|
|
31
|
+
## Subpath Export API
|
|
32
|
+
|
|
33
|
+
Each provider subpath exports three members following a consistent naming pattern:
|
|
34
|
+
|
|
35
|
+
| Export | Type | Description |
|
|
36
|
+
| ------------------- | ---------- | ------------------------------------------------ |
|
|
37
|
+
| `<provider>Models` | `const` | Readonly array of `ModelDefinition` for provider |
|
|
38
|
+
| `<provider>Model` | `function` | Look up a model by ID, returns `null` if missing |
|
|
39
|
+
| `<Provider>ModelId` | `type` | Union type of known model IDs for the provider |
|
|
40
|
+
|
|
41
|
+
### Example
|
|
42
|
+
|
|
43
|
+
```ts
|
|
44
|
+
import { anthropicModels, anthropicModel } from "@funkai/models/anthropic";
|
|
45
|
+
import type { AnthropicModelId } from "@funkai/models/anthropic";
|
|
46
|
+
|
|
47
|
+
const id: AnthropicModelId = "claude-sonnet-4-20250514";
|
|
48
|
+
|
|
49
|
+
const m = anthropicModel(id);
|
|
50
|
+
if (m) {
|
|
51
|
+
console.log(m.name, m.pricing.input);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const withReasoning = anthropicModels.filter((m) => m.capabilities.reasoning);
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Model ID Format
|
|
58
|
+
|
|
59
|
+
Model IDs in the catalog use the format `<provider-native-id>` (e.g. `"gpt-4.1"`, `"claude-sonnet-4-20250514"`). When used with `createModelResolver()`, prefix them with the provider slug: `"openai/gpt-4.1"`, `"anthropic/claude-sonnet-4-20250514"`.
|
|
60
|
+
|
|
61
|
+
## Data Source
|
|
62
|
+
|
|
63
|
+
All provider data is auto-generated from [models.dev](https://models.dev) via the `generate:models` script. To update:
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
pnpm --filter=@funkai/models generate:models
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## References
|
|
70
|
+
|
|
71
|
+
- [Model Catalog](overview.md)
|
|
72
|
+
- [Filtering](filtering.md)
|
|
73
|
+
- [Provider Resolution](../provider/overview.md)
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
# Cost Calculation
|
|
2
|
+
|
|
3
|
+
`calculateCost()` computes the USD cost of a model invocation by multiplying token counts against per-token pricing rates from the catalog.
|
|
4
|
+
|
|
5
|
+
## Key Concepts
|
|
6
|
+
|
|
7
|
+
### TokenUsage
|
|
8
|
+
|
|
9
|
+
Token counts from a model invocation:
|
|
10
|
+
|
|
11
|
+
| Field | Type | Description |
|
|
12
|
+
| ------------------ | -------- | ------------------------------------- |
|
|
13
|
+
| `inputTokens` | `number` | Number of input (prompt) tokens |
|
|
14
|
+
| `outputTokens` | `number` | Number of output (completion) tokens |
|
|
15
|
+
| `totalTokens` | `number` | Total tokens (input + output) |
|
|
16
|
+
| `cacheReadTokens` | `number` | Tokens served from prompt cache |
|
|
17
|
+
| `cacheWriteTokens` | `number` | Tokens written into prompt cache |
|
|
18
|
+
| `reasoningTokens` | `number` | Tokens consumed by internal reasoning |
|
|
19
|
+
|
|
20
|
+
### ModelPricing
|
|
21
|
+
|
|
22
|
+
Per-token pricing rates from the model catalog:
|
|
23
|
+
|
|
24
|
+
| Field | Type | Description |
|
|
25
|
+
| ------------ | --------------------- | --------------------------------- |
|
|
26
|
+
| `input` | `number` | Cost per input token (USD) |
|
|
27
|
+
| `output` | `number` | Cost per output token (USD) |
|
|
28
|
+
| `cacheRead` | `number \| undefined` | Cost per cached read token (USD) |
|
|
29
|
+
| `cacheWrite` | `number \| undefined` | Cost per cached write token (USD) |
|
|
30
|
+
|
|
31
|
+
Pricing rates are stored per-token in the catalog (converted from per-million at generation time). No runtime conversion is needed.
|
|
32
|
+
|
|
33
|
+
### UsageCost
|
|
34
|
+
|
|
35
|
+
The output of `calculateCost()`:
|
|
36
|
+
|
|
37
|
+
| Field | Type | Description |
|
|
38
|
+
| ------------ | -------- | ---------------------------- |
|
|
39
|
+
| `input` | `number` | Cost for input tokens |
|
|
40
|
+
| `output` | `number` | Cost for output tokens |
|
|
41
|
+
| `cacheRead` | `number` | Cost for cached read tokens |
|
|
42
|
+
| `cacheWrite` | `number` | Cost for cached write tokens |
|
|
43
|
+
| `total` | `number` | Sum of all cost fields |
|
|
44
|
+
|
|
45
|
+
All fields are non-negative. Fields that don't apply are `0`.
|
|
46
|
+
|
|
47
|
+
## Usage
|
|
48
|
+
|
|
49
|
+
### Basic Cost Calculation
|
|
50
|
+
|
|
51
|
+
```ts
|
|
52
|
+
const m = model("openai/gpt-4.1");
|
|
53
|
+
if (m) {
|
|
54
|
+
const usage: TokenUsage = {
|
|
55
|
+
inputTokens: 1000,
|
|
56
|
+
outputTokens: 500,
|
|
57
|
+
totalTokens: 1500,
|
|
58
|
+
cacheReadTokens: 200,
|
|
59
|
+
cacheWriteTokens: 0,
|
|
60
|
+
reasoningTokens: 0,
|
|
61
|
+
};
|
|
62
|
+
const cost = calculateCost(usage, m.pricing);
|
|
63
|
+
console.log(`Total: $${cost.total.toFixed(6)}`);
|
|
64
|
+
}
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Cost Breakdown
|
|
68
|
+
|
|
69
|
+
```ts
|
|
70
|
+
const cost = calculateCost(usage, m.pricing);
|
|
71
|
+
|
|
72
|
+
console.log(`Input: $${cost.input.toFixed(6)}`);
|
|
73
|
+
console.log(`Output: $${cost.output.toFixed(6)}`);
|
|
74
|
+
console.log(`Cache read: $${cost.cacheRead.toFixed(6)}`);
|
|
75
|
+
console.log(`Cache write: $${cost.cacheWrite.toFixed(6)}`);
|
|
76
|
+
console.log(`Total: $${cost.total.toFixed(6)}`);
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Compare Costs Across Models
|
|
80
|
+
|
|
81
|
+
```ts
|
|
82
|
+
const candidates = models((m) => m.capabilities.reasoning);
|
|
83
|
+
|
|
84
|
+
const usage: TokenUsage = {
|
|
85
|
+
inputTokens: 10_000,
|
|
86
|
+
outputTokens: 2_000,
|
|
87
|
+
totalTokens: 12_000,
|
|
88
|
+
cacheReadTokens: 0,
|
|
89
|
+
cacheWriteTokens: 0,
|
|
90
|
+
reasoningTokens: 0,
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
const costs = candidates.map((m) => ({
|
|
94
|
+
id: m.id,
|
|
95
|
+
total: calculateCost(usage, m.pricing).total,
|
|
96
|
+
}));
|
|
97
|
+
|
|
98
|
+
const sorted = costs.toSorted((a, b) => a.total - b.total);
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### Accumulate Costs
|
|
102
|
+
|
|
103
|
+
```ts
|
|
104
|
+
const totalCost = runs.reduce((sum, run) => {
|
|
105
|
+
const cost = calculateCost(run.usage, run.model.pricing);
|
|
106
|
+
return sum + cost.total;
|
|
107
|
+
}, 0);
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Calculation Formula
|
|
111
|
+
|
|
112
|
+
```text
|
|
113
|
+
input = inputTokens * pricing.input
|
|
114
|
+
output = outputTokens * pricing.output
|
|
115
|
+
cacheRead = cacheReadTokens * (pricing.cacheRead ?? 0)
|
|
116
|
+
cacheWrite = cacheWriteTokens * (pricing.cacheWrite ?? 0)
|
|
117
|
+
total = input + output + cacheRead + cacheWrite
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
Optional pricing fields (`cacheRead`, `cacheWrite`) default to `0` when absent.
|
|
121
|
+
|
|
122
|
+
## References
|
|
123
|
+
|
|
124
|
+
- [Model Catalog](../catalog/overview.md)
|
|
125
|
+
- [Track Costs Guide](../guides/track-costs.md)
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# Filter Models
|
|
2
|
+
|
|
3
|
+
Common patterns for finding the right model from the catalog using `models()` predicates.
|
|
4
|
+
|
|
5
|
+
## Prerequisites
|
|
6
|
+
|
|
7
|
+
- `@funkai/models` installed
|
|
8
|
+
|
|
9
|
+
## Steps
|
|
10
|
+
|
|
11
|
+
### 1. Import the Catalog Functions
|
|
12
|
+
|
|
13
|
+
```ts
|
|
14
|
+
import { models, model } from "@funkai/models";
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
### 2. Filter by Capability
|
|
18
|
+
|
|
19
|
+
Find models with specific capabilities:
|
|
20
|
+
|
|
21
|
+
```ts
|
|
22
|
+
const reasoning = models((m) => m.capabilities.reasoning);
|
|
23
|
+
const withTools = models((m) => m.capabilities.toolCall);
|
|
24
|
+
const structured = models((m) => m.capabilities.structuredOutput);
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
### 3. Filter by Provider
|
|
28
|
+
|
|
29
|
+
Narrow to a specific provider:
|
|
30
|
+
|
|
31
|
+
```ts
|
|
32
|
+
const openai = models((m) => m.provider === "openai");
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Or use the subpath export:
|
|
36
|
+
|
|
37
|
+
```ts
|
|
38
|
+
import { openAIModels } from "@funkai/models/openai";
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### 4. Combine Conditions
|
|
42
|
+
|
|
43
|
+
Chain multiple requirements:
|
|
44
|
+
|
|
45
|
+
```ts
|
|
46
|
+
const ideal = models(
|
|
47
|
+
(m) =>
|
|
48
|
+
m.capabilities.reasoning &&
|
|
49
|
+
m.capabilities.toolCall &&
|
|
50
|
+
m.contextWindow >= 128_000 &&
|
|
51
|
+
m.pricing.input < 0.00001,
|
|
52
|
+
);
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### 5. Sort by Price
|
|
56
|
+
|
|
57
|
+
Find the cheapest model matching your criteria:
|
|
58
|
+
|
|
59
|
+
```ts
|
|
60
|
+
const cheapest = models((m) => m.capabilities.reasoning).toSorted(
|
|
61
|
+
(a, b) => a.pricing.input - b.pricing.input,
|
|
62
|
+
);
|
|
63
|
+
|
|
64
|
+
const pick = cheapest[0];
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### 6. Find Multimodal Models
|
|
68
|
+
|
|
69
|
+
```ts
|
|
70
|
+
const vision = models((m) => m.modalities.input.includes("image"));
|
|
71
|
+
const audio = models((m) => m.modalities.input.includes("audio"));
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Verification
|
|
75
|
+
|
|
76
|
+
Verify your filter returns expected results:
|
|
77
|
+
|
|
78
|
+
```ts
|
|
79
|
+
const results = models((m) => m.capabilities.reasoning && m.provider === "openai");
|
|
80
|
+
|
|
81
|
+
for (const m of results) {
|
|
82
|
+
console.log(m.id, m.name, m.pricing.input);
|
|
83
|
+
}
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Troubleshooting
|
|
87
|
+
|
|
88
|
+
### Filter returns empty array
|
|
89
|
+
|
|
90
|
+
**Issue:** No models match your predicate.
|
|
91
|
+
|
|
92
|
+
**Fix:** Relax your filter conditions. Check which values exist in the catalog:
|
|
93
|
+
|
|
94
|
+
```ts
|
|
95
|
+
const providers = [...new Set(models().map((m) => m.provider))];
|
|
96
|
+
console.log(providers);
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Model not found in catalog
|
|
100
|
+
|
|
101
|
+
**Issue:** `model(id)` returns `null` for a model you expect to exist.
|
|
102
|
+
|
|
103
|
+
**Fix:** The catalog is generated periodically. Regenerate it to include new models:
|
|
104
|
+
|
|
105
|
+
```bash
|
|
106
|
+
pnpm --filter=@funkai/models generate:models
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## References
|
|
110
|
+
|
|
111
|
+
- [Model Catalog](../catalog/overview.md)
|
|
112
|
+
- [Filtering](../catalog/filtering.md)
|
|
113
|
+
- [Providers](../catalog/providers.md)
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# Set Up a Model Resolver
|
|
2
|
+
|
|
3
|
+
Configure `createModelResolver()` with multiple providers and an OpenRouter fallback.
|
|
4
|
+
|
|
5
|
+
## Prerequisites
|
|
6
|
+
|
|
7
|
+
- `@funkai/models` installed
|
|
8
|
+
- API keys for your providers (OpenAI, Anthropic, etc.)
|
|
9
|
+
- `OPENROUTER_API_KEY` set in the environment (for fallback)
|
|
10
|
+
|
|
11
|
+
## Steps
|
|
12
|
+
|
|
13
|
+
### 1. Install Provider SDKs
|
|
14
|
+
|
|
15
|
+
Install the AI SDK providers you want to use directly:
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pnpm add @ai-sdk/openai @ai-sdk/anthropic
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
### 2. Create the Resolver
|
|
22
|
+
|
|
23
|
+
```ts
|
|
24
|
+
import { createModelResolver, openrouter } from "@funkai/models";
|
|
25
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
26
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
27
|
+
|
|
28
|
+
const resolve = createModelResolver({
|
|
29
|
+
providers: {
|
|
30
|
+
openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
|
|
31
|
+
anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }),
|
|
32
|
+
},
|
|
33
|
+
fallback: openrouter,
|
|
34
|
+
});
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
### 3. Resolve Models
|
|
38
|
+
|
|
39
|
+
```ts
|
|
40
|
+
const gpt = resolve("openai/gpt-4.1");
|
|
41
|
+
const claude = resolve("anthropic/claude-sonnet-4");
|
|
42
|
+
const mistral = resolve("mistral/mistral-large-latest");
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
- `"openai/gpt-4.1"` routes through `@ai-sdk/openai` directly
|
|
46
|
+
- `"anthropic/claude-sonnet-4"` routes through `@ai-sdk/anthropic` directly
|
|
47
|
+
- `"mistral/mistral-large-latest"` has no mapped provider, so it routes through OpenRouter
|
|
48
|
+
|
|
49
|
+
### 4. Use with Agents
|
|
50
|
+
|
|
51
|
+
Pass the resolver to `@funkai/agents` by resolving the model before creating the agent:
|
|
52
|
+
|
|
53
|
+
```ts
|
|
54
|
+
import { agent } from "@funkai/agents";
|
|
55
|
+
|
|
56
|
+
const summarizer = agent({
|
|
57
|
+
name: "summarizer",
|
|
58
|
+
model: resolve("openai/gpt-4.1"),
|
|
59
|
+
prompt: ({ input }) => `Summarize:\n\n${input.text}`,
|
|
60
|
+
});
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Verification
|
|
64
|
+
|
|
65
|
+
Verify the resolver works by resolving each configured provider:
|
|
66
|
+
|
|
67
|
+
```ts
|
|
68
|
+
const gpt = resolve("openai/gpt-4.1");
|
|
69
|
+
const claude = resolve("anthropic/claude-sonnet-4");
|
|
70
|
+
|
|
71
|
+
console.log(gpt.modelId);
|
|
72
|
+
console.log(claude.modelId);
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
## Troubleshooting
|
|
76
|
+
|
|
77
|
+
### Cannot resolve model: no provider mapped
|
|
78
|
+
|
|
79
|
+
**Issue:** The model ID prefix does not match any key in `providers` and no `fallback` is configured.
|
|
80
|
+
|
|
81
|
+
**Fix:** Add the provider to the `providers` map or configure a `fallback`:
|
|
82
|
+
|
|
83
|
+
```ts
|
|
84
|
+
const resolve = createModelResolver({
|
|
85
|
+
providers: {
|
|
86
|
+
openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
|
|
87
|
+
},
|
|
88
|
+
fallback: openrouter,
|
|
89
|
+
});
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### OPENROUTER_API_KEY environment variable is required
|
|
93
|
+
|
|
94
|
+
**Issue:** Using `openrouter` as the fallback but `OPENROUTER_API_KEY` is not set.
|
|
95
|
+
|
|
96
|
+
**Fix:** Set the environment variable:
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
export OPENROUTER_API_KEY=sk-or-...
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
## References
|
|
103
|
+
|
|
104
|
+
- [Provider Resolution](../provider/overview.md)
|
|
105
|
+
- [Configuration](../provider/configuration.md)
|
|
106
|
+
- [OpenRouter](../provider/openrouter.md)
|