@anolilab/ai-model-registry 1.3.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/CHANGELOG.md +62 -0
  2. package/LICENSE.md +8 -13
  3. package/MIGRATION.md +474 -0
  4. package/README.md +275 -200
  5. package/dist/icons-sprite.d.ts +1 -1
  6. package/dist/icons-sprite.js +11 -11
  7. package/dist/index.d.ts +16 -16
  8. package/dist/index.js +1 -16
  9. package/dist/packem_chunks/alibaba.js +1 -0
  10. package/dist/packem_chunks/amazon-bedrock.js +1 -0
  11. package/dist/packem_chunks/anthropic.js +1 -0
  12. package/dist/packem_chunks/api.js +75 -0
  13. package/dist/packem_chunks/azure-open-ai.js +1 -0
  14. package/dist/packem_chunks/cerebras.js +1 -0
  15. package/dist/packem_chunks/chutes.js +1 -0
  16. package/dist/packem_chunks/cloudflare.js +3 -0
  17. package/dist/packem_chunks/deep-infra.js +1 -0
  18. package/dist/packem_chunks/deep-seek.js +1 -0
  19. package/dist/packem_chunks/fireworks-ai.js +1 -0
  20. package/dist/packem_chunks/git-hub-copilot.js +1 -0
  21. package/dist/packem_chunks/git-hub-models.js +1 -0
  22. package/dist/packem_chunks/google-partner.js +1 -0
  23. package/dist/packem_chunks/google-vertex.js +1 -0
  24. package/dist/packem_chunks/google.js +1 -0
  25. package/dist/packem_chunks/groq.js +1 -0
  26. package/dist/packem_chunks/hugging-face.js +1 -0
  27. package/dist/packem_chunks/inception.js +1 -0
  28. package/dist/packem_chunks/inference.js +1 -0
  29. package/dist/packem_chunks/meta.js +1 -0
  30. package/dist/packem_chunks/mistral.js +1 -0
  31. package/dist/packem_chunks/model-scope.js +1 -0
  32. package/dist/packem_chunks/morph.js +1 -0
  33. package/dist/packem_chunks/open-ai.js +1 -0
  34. package/dist/packem_chunks/open-router.js +1 -0
  35. package/dist/packem_chunks/providers.js +1 -0
  36. package/dist/packem_chunks/requesty.js +73 -0
  37. package/dist/packem_chunks/together-ai.js +1 -0
  38. package/dist/packem_chunks/upstage.js +1 -0
  39. package/dist/packem_chunks/v0.js +1 -0
  40. package/dist/packem_chunks/venice.js +1 -0
  41. package/dist/packem_chunks/vercel.js +1 -0
  42. package/dist/packem_chunks/weights-_-biases.js +1 -0
  43. package/dist/packem_chunks/xai.js +1 -0
  44. package/dist/schema.d.ts +1 -1
  45. package/dist/schema.js +1 -1
  46. package/dist/types/alibaba.d.ts +6 -0
  47. package/dist/types/alibaba.js +0 -0
  48. package/dist/types/amazon-bedrock.d.ts +6 -0
  49. package/dist/types/amazon-bedrock.js +0 -0
  50. package/dist/types/anthropic.d.ts +6 -0
  51. package/dist/types/anthropic.js +0 -0
  52. package/dist/types/azure-open-ai.d.ts +6 -0
  53. package/dist/types/azure-open-ai.js +0 -0
  54. package/dist/types/cerebras.d.ts +6 -0
  55. package/dist/types/cerebras.js +0 -0
  56. package/dist/types/chutes.d.ts +6 -0
  57. package/dist/types/chutes.js +0 -0
  58. package/dist/types/cloudflare.d.ts +6 -0
  59. package/dist/types/cloudflare.js +0 -0
  60. package/dist/types/deep-infra.d.ts +6 -0
  61. package/dist/types/deep-infra.js +0 -0
  62. package/dist/types/deep-seek.d.ts +6 -0
  63. package/dist/types/deep-seek.js +0 -0
  64. package/dist/types/fireworks-ai.d.ts +6 -0
  65. package/dist/types/fireworks-ai.js +0 -0
  66. package/dist/types/git-hub-copilot.d.ts +6 -0
  67. package/dist/types/git-hub-copilot.js +0 -0
  68. package/dist/types/git-hub-models.d.ts +6 -0
  69. package/dist/types/git-hub-models.js +0 -0
  70. package/dist/types/google-partner.d.ts +6 -0
  71. package/dist/types/google-partner.js +0 -0
  72. package/dist/types/google-vertex.d.ts +6 -0
  73. package/dist/types/google-vertex.js +0 -0
  74. package/dist/types/google.d.ts +6 -0
  75. package/dist/types/google.js +0 -0
  76. package/dist/types/groq.d.ts +6 -0
  77. package/dist/types/groq.js +0 -0
  78. package/dist/types/hugging-face.d.ts +6 -0
  79. package/dist/types/hugging-face.js +0 -0
  80. package/dist/types/inception.d.ts +6 -0
  81. package/dist/types/inception.js +0 -0
  82. package/dist/types/inference.d.ts +6 -0
  83. package/dist/types/inference.js +0 -0
  84. package/dist/types/meta.d.ts +6 -0
  85. package/dist/types/meta.js +0 -0
  86. package/dist/types/mistral.d.ts +6 -0
  87. package/dist/types/mistral.js +0 -0
  88. package/dist/types/model-scope.d.ts +6 -0
  89. package/dist/types/model-scope.js +0 -0
  90. package/dist/types/morph.d.ts +6 -0
  91. package/dist/types/morph.js +0 -0
  92. package/dist/types/open-ai.d.ts +6 -0
  93. package/dist/types/open-ai.js +0 -0
  94. package/dist/types/open-router.d.ts +6 -0
  95. package/dist/types/open-router.js +0 -0
  96. package/dist/types/providers.d.ts +11 -0
  97. package/dist/types/providers.js +1 -0
  98. package/dist/types/requesty.d.ts +6 -0
  99. package/dist/types/requesty.js +0 -0
  100. package/dist/types/together-ai.d.ts +6 -0
  101. package/dist/types/together-ai.js +0 -0
  102. package/dist/types/upstage.d.ts +6 -0
  103. package/dist/types/upstage.js +0 -0
  104. package/dist/types/v0.d.ts +6 -0
  105. package/dist/types/v0.js +0 -0
  106. package/dist/types/venice.d.ts +6 -0
  107. package/dist/types/venice.js +0 -0
  108. package/dist/types/vercel.d.ts +6 -0
  109. package/dist/types/vercel.js +0 -0
  110. package/dist/types/weights-_-biases.d.ts +6 -0
  111. package/dist/types/weights-_-biases.js +0 -0
  112. package/dist/types/xai.d.ts +6 -0
  113. package/dist/types/xai.js +0 -0
  114. package/package.json +7 -2
  115. package/public/alibaba.json +326 -0
  116. package/public/amazon-bedrock.json +2141 -0
  117. package/public/anthropic.json +396 -0
  118. package/public/api.json +66961 -19318
  119. package/public/azure-open-ai.json +332 -0
  120. package/public/cerebras.json +288 -0
  121. package/public/chutes.json +3220 -0
  122. package/public/cloudflare.json +3094 -0
  123. package/public/deep-infra.json +956 -0
  124. package/public/deep-seek.json +46 -0
  125. package/public/fireworks-ai.json +14486 -0
  126. package/public/git-hub-copilot.json +676 -0
  127. package/public/git-hub-models.json +256 -0
  128. package/public/google-partner.json +536 -0
  129. package/public/google-vertex.json +1376 -0
  130. package/public/google.json +536 -0
  131. package/public/groq.json +882 -0
  132. package/public/hugging-face.json +1096 -0
  133. package/public/inception.json +81 -0
  134. package/public/inference.json +291 -0
  135. package/public/meta.json +151 -0
  136. package/public/mistral.json +2181 -0
  137. package/public/model-scope.json +37839 -0
  138. package/public/morph.json +46 -0
  139. package/public/open-ai.json +606 -0
  140. package/public/open-router.json +15341 -0
  141. package/public/providers.json +43 -0
  142. package/public/requesty.json +13757 -0
  143. package/public/together-ai.json +641 -0
  144. package/public/upstage.json +606 -0
  145. package/public/v0.json +431 -0
  146. package/public/venice.json +501 -0
  147. package/public/vercel.json +4071 -0
  148. package/public/weights-&-biases.json +551 -0
  149. package/public/xai.json +256 -0
@@ -0,0 +1,81 @@
1
+ {
2
+ "metadata": {
3
+ "description": "AI Models API - Models from inception",
4
+ "lastUpdated": "2026-01-10T23:24:47.804Z",
5
+ "provider": "inception",
6
+ "totalModels": 2,
7
+ "version": "0.0.0-development"
8
+ },
9
+ "models": [
10
+ {
11
+ "attachment": false,
12
+ "cost": {
13
+ "input": 0.25,
14
+ "inputCacheHit": 0.25,
15
+ "output": 1
16
+ },
17
+ "extendedThinking": false,
18
+ "id": "mercury",
19
+ "knowledge": "2023-10",
20
+ "lastUpdated": "2025-07-31",
21
+ "limit": {
22
+ "context": 128000,
23
+ "output": 16384
24
+ },
25
+ "modalities": {
26
+ "input": ["text"],
27
+ "output": ["text"]
28
+ },
29
+ "name": "Mercury",
30
+ "openWeights": false,
31
+ "provider": "inception",
32
+ "providerDoc": "https://platform.inceptionlabs.ai/docs",
33
+ "providerEnv": ["INCEPTION_API_KEY"],
34
+ "providerId": "inception",
35
+ "providerModelsDevId": "inception",
36
+ "providerNpm": "@ai-sdk/openai-compatible",
37
+ "reasoning": false,
38
+ "releaseDate": "2025-06-26",
39
+ "streamingSupported": true,
40
+ "temperature": true,
41
+ "toolCall": true,
42
+ "vision": false,
43
+ "icon": "inception"
44
+ },
45
+ {
46
+ "attachment": false,
47
+ "cost": {
48
+ "input": 0.25,
49
+ "inputCacheHit": 0.25,
50
+ "output": 1
51
+ },
52
+ "extendedThinking": false,
53
+ "id": "mercury-coder",
54
+ "knowledge": "2023-10",
55
+ "lastUpdated": "2025-07-31",
56
+ "limit": {
57
+ "context": 128000,
58
+ "output": 16384
59
+ },
60
+ "modalities": {
61
+ "input": ["text"],
62
+ "output": ["text"]
63
+ },
64
+ "name": "Mercury Coder",
65
+ "openWeights": false,
66
+ "provider": "inception",
67
+ "providerDoc": "https://platform.inceptionlabs.ai/docs",
68
+ "providerEnv": ["INCEPTION_API_KEY"],
69
+ "providerId": "inception",
70
+ "providerModelsDevId": "inception",
71
+ "providerNpm": "@ai-sdk/openai-compatible",
72
+ "reasoning": false,
73
+ "releaseDate": "2025-02-26",
74
+ "streamingSupported": true,
75
+ "temperature": true,
76
+ "toolCall": true,
77
+ "vision": false,
78
+ "icon": "inception"
79
+ }
80
+ ]
81
+ }
@@ -0,0 +1,291 @@
1
+ {
2
+ "metadata": {
3
+ "description": "AI Models API - Models from Inference",
4
+ "lastUpdated": "2026-01-10T23:24:47.804Z",
5
+ "provider": "Inference",
6
+ "totalModels": 8,
7
+ "version": "0.0.0-development"
8
+ },
9
+ "models": [
10
+ {
11
+ "attachment": false,
12
+ "cost": {
13
+ "input": 0.002,
14
+ "inputCacheHit": null,
15
+ "output": 0.01
16
+ },
17
+ "extendedThinking": false,
18
+ "id": "2-vision",
19
+ "knowledge": null,
20
+ "lastUpdated": null,
21
+ "limit": {
22
+ "context": null,
23
+ "output": null
24
+ },
25
+ "modalities": {
26
+ "input": ["text", "image"],
27
+ "output": ["text"]
28
+ },
29
+ "name": "2-Vision",
30
+ "openWeights": false,
31
+ "provider": "Inference",
32
+ "providerDoc": "https://inference.net/models",
33
+ "providerEnv": ["INFERENCE_API_KEY"],
34
+ "providerId": "inference",
35
+ "providerModelsDevId": "inference",
36
+ "providerNpm": "@ai-sdk/openai-compatible",
37
+ "reasoning": false,
38
+ "releaseDate": null,
39
+ "streamingSupported": true,
40
+ "temperature": true,
41
+ "toolCall": false,
42
+ "vision": true,
43
+ "icon": "inference"
44
+ },
45
+ {
46
+ "attachment": false,
47
+ "cost": {
48
+ "input": null,
49
+ "inputCacheHit": null,
50
+ "output": null
51
+ },
52
+ "extendedThinking": false,
53
+ "id": "3-gemma",
54
+ "knowledge": null,
55
+ "lastUpdated": null,
56
+ "limit": {
57
+ "context": null,
58
+ "output": null
59
+ },
60
+ "modalities": {
61
+ "input": ["text"],
62
+ "output": ["text"]
63
+ },
64
+ "name": "3Gemma",
65
+ "openWeights": false,
66
+ "provider": "Inference",
67
+ "providerDoc": "https://inference.net/models",
68
+ "providerEnv": ["INFERENCE_API_KEY"],
69
+ "providerId": "inference",
70
+ "providerModelsDevId": "inference",
71
+ "providerNpm": "@ai-sdk/openai-compatible",
72
+ "reasoning": false,
73
+ "releaseDate": null,
74
+ "streamingSupported": true,
75
+ "temperature": true,
76
+ "toolCall": false,
77
+ "vision": false,
78
+ "icon": "inference"
79
+ },
80
+ {
81
+ "attachment": false,
82
+ "cost": {
83
+ "input": 0.0003,
84
+ "inputCacheHit": null,
85
+ "output": 0.0003
86
+ },
87
+ "extendedThinking": false,
88
+ "id": "id-meta-fp-16-llama",
89
+ "knowledge": null,
90
+ "lastUpdated": null,
91
+ "limit": {
92
+ "context": null,
93
+ "output": null
94
+ },
95
+ "modalities": {
96
+ "input": ["text"],
97
+ "output": ["text"]
98
+ },
99
+ "name": "IDMetaFP16Llama",
100
+ "openWeights": false,
101
+ "provider": "Inference",
102
+ "providerDoc": "https://inference.net/models",
103
+ "providerEnv": ["INFERENCE_API_KEY"],
104
+ "providerId": "inference",
105
+ "providerModelsDevId": "inference",
106
+ "providerNpm": "@ai-sdk/openai-compatible",
107
+ "reasoning": false,
108
+ "releaseDate": null,
109
+ "streamingSupported": true,
110
+ "temperature": true,
111
+ "toolCall": false,
112
+ "vision": false,
113
+ "icon": "inference"
114
+ },
115
+ {
116
+ "attachment": false,
117
+ "cost": {
118
+ "input": 0.0003,
119
+ "inputCacheHit": null,
120
+ "output": 0.0003
121
+ },
122
+ "extendedThinking": false,
123
+ "id": "id-meta-fp-8-fp-16-llama",
124
+ "knowledge": null,
125
+ "lastUpdated": null,
126
+ "limit": {
127
+ "context": null,
128
+ "output": null
129
+ },
130
+ "modalities": {
131
+ "input": ["text"],
132
+ "output": ["text"]
133
+ },
134
+ "name": "IDMetaFP8FP16Llama",
135
+ "openWeights": false,
136
+ "provider": "Inference",
137
+ "providerDoc": "https://inference.net/models",
138
+ "providerEnv": ["INFERENCE_API_KEY"],
139
+ "providerId": "inference",
140
+ "providerModelsDevId": "inference",
141
+ "providerNpm": "@ai-sdk/openai-compatible",
142
+ "reasoning": false,
143
+ "releaseDate": null,
144
+ "streamingSupported": true,
145
+ "temperature": true,
146
+ "toolCall": false,
147
+ "vision": false,
148
+ "icon": "inference"
149
+ },
150
+ {
151
+ "attachment": false,
152
+ "cost": {
153
+ "input": 0.0003,
154
+ "inputCacheHit": null,
155
+ "output": 0.0003
156
+ },
157
+ "extendedThinking": false,
158
+ "id": "id-meta-fp-8-llama",
159
+ "knowledge": null,
160
+ "lastUpdated": null,
161
+ "limit": {
162
+ "context": null,
163
+ "output": null
164
+ },
165
+ "modalities": {
166
+ "input": ["text"],
167
+ "output": ["text"]
168
+ },
169
+ "name": "IDMetaFP8Llama",
170
+ "openWeights": false,
171
+ "provider": "Inference",
172
+ "providerDoc": "https://inference.net/models",
173
+ "providerEnv": ["INFERENCE_API_KEY"],
174
+ "providerId": "inference",
175
+ "providerModelsDevId": "inference",
176
+ "providerNpm": "@ai-sdk/openai-compatible",
177
+ "reasoning": false,
178
+ "releaseDate": null,
179
+ "streamingSupported": true,
180
+ "temperature": true,
181
+ "toolCall": false,
182
+ "vision": false,
183
+ "icon": "inference"
184
+ },
185
+ {
186
+ "attachment": false,
187
+ "cost": {
188
+ "input": 0.0003,
189
+ "inputCacheHit": null,
190
+ "output": 0.0003
191
+ },
192
+ "extendedThinking": false,
193
+ "id": "id-mistral-fp-8-mistral",
194
+ "knowledge": null,
195
+ "lastUpdated": null,
196
+ "limit": {
197
+ "context": null,
198
+ "output": null
199
+ },
200
+ "modalities": {
201
+ "input": ["text"],
202
+ "output": ["text"]
203
+ },
204
+ "name": "IDMistralFP8Mistral",
205
+ "openWeights": false,
206
+ "provider": "Inference",
207
+ "providerDoc": "https://inference.net/models",
208
+ "providerEnv": ["INFERENCE_API_KEY"],
209
+ "providerId": "inference",
210
+ "providerModelsDevId": "inference",
211
+ "providerNpm": "@ai-sdk/openai-compatible",
212
+ "reasoning": false,
213
+ "releaseDate": null,
214
+ "streamingSupported": true,
215
+ "temperature": true,
216
+ "toolCall": false,
217
+ "vision": false,
218
+ "icon": "inference"
219
+ },
220
+ {
221
+ "attachment": false,
222
+ "cost": {
223
+ "input": 0.0003,
224
+ "inputCacheHit": null,
225
+ "output": 0.0003
226
+ },
227
+ "extendedThinking": false,
228
+ "id": "instruct-llama",
229
+ "knowledge": null,
230
+ "lastUpdated": null,
231
+ "limit": {
232
+ "context": null,
233
+ "output": null
234
+ },
235
+ "modalities": {
236
+ "input": ["text"],
237
+ "output": ["text"]
238
+ },
239
+ "name": "InstructLlama",
240
+ "openWeights": false,
241
+ "provider": "Inference",
242
+ "providerDoc": "https://inference.net/models",
243
+ "providerEnv": ["INFERENCE_API_KEY"],
244
+ "providerId": "inference",
245
+ "providerModelsDevId": "inference",
246
+ "providerNpm": "@ai-sdk/openai-compatible",
247
+ "reasoning": false,
248
+ "releaseDate": null,
249
+ "streamingSupported": true,
250
+ "temperature": true,
251
+ "toolCall": false,
252
+ "vision": false,
253
+ "icon": "inference"
254
+ },
255
+ {
256
+ "attachment": false,
257
+ "cost": {
258
+ "input": 0.0003,
259
+ "inputCacheHit": null,
260
+ "output": 0.0003
261
+ },
262
+ "extendedThinking": false,
263
+ "id": "instruct-mistral-ne-mo-12b-instruct",
264
+ "knowledge": null,
265
+ "lastUpdated": null,
266
+ "limit": {
267
+ "context": null,
268
+ "output": null
269
+ },
270
+ "modalities": {
271
+ "input": ["text"],
272
+ "output": ["text"]
273
+ },
274
+ "name": "InstructMistral-NeMo-12B-Instruct",
275
+ "openWeights": false,
276
+ "provider": "Inference",
277
+ "providerDoc": "https://inference.net/models",
278
+ "providerEnv": ["INFERENCE_API_KEY"],
279
+ "providerId": "inference",
280
+ "providerModelsDevId": "inference",
281
+ "providerNpm": "@ai-sdk/openai-compatible",
282
+ "reasoning": false,
283
+ "releaseDate": null,
284
+ "streamingSupported": true,
285
+ "temperature": true,
286
+ "toolCall": false,
287
+ "vision": false,
288
+ "icon": "inference"
289
+ }
290
+ ]
291
+ }
@@ -0,0 +1,151 @@
1
+ {
2
+ "metadata": {
3
+ "description": "AI Models API - Models from Meta",
4
+ "lastUpdated": "2026-01-10T23:24:47.804Z",
5
+ "provider": "Meta",
6
+ "totalModels": 4,
7
+ "version": "0.0.0-development"
8
+ },
9
+ "models": [
10
+ {
11
+ "attachment": false,
12
+ "cost": {
13
+ "input": 0,
14
+ "inputCacheHit": null,
15
+ "output": 0
16
+ },
17
+ "extendedThinking": false,
18
+ "id": "llama-4-maverick-17b-128e-instruct-fp8",
19
+ "knowledge": null,
20
+ "lastUpdated": null,
21
+ "limit": {
22
+ "context": null,
23
+ "output": null
24
+ },
25
+ "modalities": {
26
+ "input": ["text"],
27
+ "output": ["text"]
28
+ },
29
+ "name": "Cerebras-Llama-4-Maverick-17B-128E-Instruct (Preview)",
30
+ "openWeights": true,
31
+ "provider": "Meta",
32
+ "providerDoc": "https://llama.meta.com/llama/",
33
+ "providerEnv": ["META_API_KEY"],
34
+ "providerId": "meta/cerebras",
35
+ "providerModelsDevId": "meta",
36
+ "providerNpm": "@ai-sdk/meta",
37
+ "reasoning": false,
38
+ "releaseDate": null,
39
+ "streamingSupported": true,
40
+ "temperature": true,
41
+ "toolCall": true,
42
+ "vision": false,
43
+ "icon": "meta"
44
+ },
45
+ {
46
+ "attachment": false,
47
+ "cost": {
48
+ "input": 0,
49
+ "inputCacheHit": null,
50
+ "output": 0
51
+ },
52
+ "extendedThinking": false,
53
+ "id": "llama-4-scout-17b-16e-instruct-fp8",
54
+ "knowledge": null,
55
+ "lastUpdated": null,
56
+ "limit": {
57
+ "context": null,
58
+ "output": null
59
+ },
60
+ "modalities": {
61
+ "input": ["text"],
62
+ "output": ["text"]
63
+ },
64
+ "name": "Cerebras-Llama-4-Scout-17B-16E-Instruct (Preview)",
65
+ "openWeights": true,
66
+ "provider": "Meta",
67
+ "providerDoc": "https://llama.meta.com/llama/",
68
+ "providerEnv": ["META_API_KEY"],
69
+ "providerId": "meta/cerebras",
70
+ "providerModelsDevId": "meta",
71
+ "providerNpm": "@ai-sdk/meta",
72
+ "reasoning": false,
73
+ "releaseDate": null,
74
+ "streamingSupported": true,
75
+ "temperature": true,
76
+ "toolCall": true,
77
+ "vision": false,
78
+ "icon": "meta"
79
+ },
80
+ {
81
+ "attachment": false,
82
+ "cost": {
83
+ "input": 0.00045,
84
+ "inputCacheHit": null,
85
+ "output": 0.00045
86
+ },
87
+ "extendedThinking": false,
88
+ "id": "llama-3_3-70b-instruct",
89
+ "knowledge": null,
90
+ "lastUpdated": null,
91
+ "limit": {
92
+ "context": null,
93
+ "output": null
94
+ },
95
+ "modalities": {
96
+ "input": ["text"],
97
+ "output": ["text"]
98
+ },
99
+ "name": "Llama-3.3-70B-Instruct",
100
+ "openWeights": true,
101
+ "provider": "Meta",
102
+ "providerDoc": "https://llama.meta.com/llama/",
103
+ "providerEnv": ["META_API_KEY"],
104
+ "providerId": "meta",
105
+ "providerModelsDevId": "meta",
106
+ "providerNpm": "@ai-sdk/meta",
107
+ "reasoning": false,
108
+ "releaseDate": null,
109
+ "streamingSupported": true,
110
+ "temperature": true,
111
+ "toolCall": true,
112
+ "vision": false,
113
+ "icon": "meta"
114
+ },
115
+ {
116
+ "attachment": false,
117
+ "cost": {
118
+ "input": 0,
119
+ "inputCacheHit": null,
120
+ "output": 0
121
+ },
122
+ "extendedThinking": false,
123
+ "id": "llama-3_3-8b-instruct",
124
+ "knowledge": null,
125
+ "lastUpdated": null,
126
+ "limit": {
127
+ "context": null,
128
+ "output": null
129
+ },
130
+ "modalities": {
131
+ "input": ["text"],
132
+ "output": ["text"]
133
+ },
134
+ "name": "Llama-3.3-8B-Instruct",
135
+ "openWeights": true,
136
+ "provider": "Meta",
137
+ "providerDoc": "https://llama.meta.com/llama/",
138
+ "providerEnv": ["META_API_KEY"],
139
+ "providerId": "meta",
140
+ "providerModelsDevId": "meta",
141
+ "providerNpm": "@ai-sdk/meta",
142
+ "reasoning": false,
143
+ "releaseDate": null,
144
+ "streamingSupported": true,
145
+ "temperature": true,
146
+ "toolCall": true,
147
+ "vision": false,
148
+ "icon": "meta"
149
+ }
150
+ ]
151
+ }