llmist 8.1.4 → 9.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +309 -28
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +8 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.js +309 -28
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -277,12 +277,20 @@ var init_model_shortcuts = __esm({
|
|
|
277
277
|
"src/core/model-shortcuts.ts"() {
|
|
278
278
|
"use strict";
|
|
279
279
|
MODEL_ALIASES = {
|
|
280
|
-
// OpenAI aliases
|
|
280
|
+
// OpenAI aliases - GPT-5.2 is the latest flagship
|
|
281
281
|
gpt4: "openai:gpt-4o",
|
|
282
282
|
gpt4o: "openai:gpt-4o",
|
|
283
|
-
|
|
283
|
+
"gpt4o-mini": "openai:gpt-4o-mini",
|
|
284
|
+
gpt5: "openai:gpt-5.2",
|
|
285
|
+
// Latest flagship
|
|
286
|
+
"gpt5.2": "openai:gpt-5.2",
|
|
287
|
+
"gpt5.1": "openai:gpt-5.1",
|
|
284
288
|
"gpt5-mini": "openai:gpt-5-mini",
|
|
285
289
|
"gpt5-nano": "openai:gpt-5-nano",
|
|
290
|
+
"gpt5-codex": "openai:gpt-5-codex",
|
|
291
|
+
o1: "openai:o1",
|
|
292
|
+
o3: "openai:o3",
|
|
293
|
+
"o4-mini": "openai:o4-mini",
|
|
286
294
|
// Anthropic aliases
|
|
287
295
|
sonnet: "anthropic:claude-sonnet-4-5",
|
|
288
296
|
"claude-sonnet": "anthropic:claude-sonnet-4-5",
|
|
@@ -290,11 +298,12 @@ var init_model_shortcuts = __esm({
|
|
|
290
298
|
"claude-haiku": "anthropic:claude-haiku-4-5",
|
|
291
299
|
opus: "anthropic:claude-opus-4-5",
|
|
292
300
|
"claude-opus": "anthropic:claude-opus-4-5",
|
|
293
|
-
// Gemini aliases
|
|
294
|
-
flash: "gemini:gemini-2.
|
|
295
|
-
"gemini-flash": "gemini:gemini-2.
|
|
296
|
-
"
|
|
297
|
-
pro: "gemini:gemini-
|
|
301
|
+
// Gemini aliases - 2.5 Flash is the recommended fast model
|
|
302
|
+
flash: "gemini:gemini-2.5-flash",
|
|
303
|
+
"gemini-flash": "gemini:gemini-2.5-flash",
|
|
304
|
+
"flash-lite": "gemini:gemini-2.5-flash-lite",
|
|
305
|
+
"gemini-pro": "gemini:gemini-3-pro-preview",
|
|
306
|
+
pro: "gemini:gemini-3-pro-preview"
|
|
298
307
|
};
|
|
299
308
|
KNOWN_MODEL_PATTERNS = [
|
|
300
309
|
/^gpt-?\d/i,
|
|
@@ -3556,10 +3565,10 @@ var init_anthropic_models = __esm({
|
|
|
3556
3565
|
contextWindow: 2e5,
|
|
3557
3566
|
maxOutputTokens: 64e3,
|
|
3558
3567
|
pricing: {
|
|
3559
|
-
input:
|
|
3560
|
-
output:
|
|
3561
|
-
cachedInput: 0.
|
|
3562
|
-
cacheWriteInput: 1
|
|
3568
|
+
input: 0.8,
|
|
3569
|
+
output: 4,
|
|
3570
|
+
cachedInput: 0.08,
|
|
3571
|
+
cacheWriteInput: 1
|
|
3563
3572
|
},
|
|
3564
3573
|
knowledgeCutoff: "2025-02",
|
|
3565
3574
|
features: {
|
|
@@ -3721,6 +3730,32 @@ var init_anthropic_models = __esm({
|
|
|
3721
3730
|
notes: "Legacy model - upgrade to Haiku 4.5 for better performance"
|
|
3722
3731
|
}
|
|
3723
3732
|
},
|
|
3733
|
+
// Dated Opus 4.5
|
|
3734
|
+
{
|
|
3735
|
+
provider: "anthropic",
|
|
3736
|
+
modelId: "claude-opus-4-5-20251124",
|
|
3737
|
+
displayName: "Claude Opus 4.5",
|
|
3738
|
+
contextWindow: 2e5,
|
|
3739
|
+
maxOutputTokens: 64e3,
|
|
3740
|
+
pricing: {
|
|
3741
|
+
input: 5,
|
|
3742
|
+
output: 25,
|
|
3743
|
+
cachedInput: 0.5,
|
|
3744
|
+
cacheWriteInput: 6.25
|
|
3745
|
+
},
|
|
3746
|
+
knowledgeCutoff: "2025-03",
|
|
3747
|
+
features: {
|
|
3748
|
+
streaming: true,
|
|
3749
|
+
functionCalling: true,
|
|
3750
|
+
vision: true,
|
|
3751
|
+
reasoning: true
|
|
3752
|
+
},
|
|
3753
|
+
metadata: {
|
|
3754
|
+
family: "Claude 4",
|
|
3755
|
+
releaseDate: "2025-11-24",
|
|
3756
|
+
notes: "Most powerful model for coding and computer use. Extended thinking support."
|
|
3757
|
+
}
|
|
3758
|
+
},
|
|
3724
3759
|
// Modern aliases (recommended by Anthropic)
|
|
3725
3760
|
{
|
|
3726
3761
|
provider: "anthropic",
|
|
@@ -3729,10 +3764,10 @@ var init_anthropic_models = __esm({
|
|
|
3729
3764
|
contextWindow: 2e5,
|
|
3730
3765
|
maxOutputTokens: 64e3,
|
|
3731
3766
|
pricing: {
|
|
3732
|
-
input:
|
|
3733
|
-
output:
|
|
3734
|
-
cachedInput: 0.
|
|
3735
|
-
cacheWriteInput: 1
|
|
3767
|
+
input: 0.8,
|
|
3768
|
+
output: 4,
|
|
3769
|
+
cachedInput: 0.08,
|
|
3770
|
+
cacheWriteInput: 1
|
|
3736
3771
|
},
|
|
3737
3772
|
knowledgeCutoff: "2025-02",
|
|
3738
3773
|
features: {
|
|
@@ -4318,6 +4353,33 @@ var init_gemini_models = __esm({
|
|
|
4318
4353
|
notes: "Best model for multimodal understanding, agentic and vibe-coding. Deep Think mode available."
|
|
4319
4354
|
}
|
|
4320
4355
|
},
|
|
4356
|
+
// Gemini 3 Flash (Preview)
|
|
4357
|
+
{
|
|
4358
|
+
provider: "gemini",
|
|
4359
|
+
modelId: "gemini-3-flash-preview",
|
|
4360
|
+
displayName: "Gemini 3 Flash (Preview)",
|
|
4361
|
+
contextWindow: 1048576,
|
|
4362
|
+
maxOutputTokens: 65536,
|
|
4363
|
+
pricing: {
|
|
4364
|
+
input: 0.4,
|
|
4365
|
+
// $0.40 for text/image/video
|
|
4366
|
+
output: 3,
|
|
4367
|
+
cachedInput: 0.04
|
|
4368
|
+
},
|
|
4369
|
+
knowledgeCutoff: "2025-01",
|
|
4370
|
+
features: {
|
|
4371
|
+
streaming: true,
|
|
4372
|
+
functionCalling: true,
|
|
4373
|
+
vision: true,
|
|
4374
|
+
reasoning: true,
|
|
4375
|
+
structuredOutputs: true
|
|
4376
|
+
},
|
|
4377
|
+
metadata: {
|
|
4378
|
+
family: "Gemini 3",
|
|
4379
|
+
releaseDate: "2025-12",
|
|
4380
|
+
notes: "Fast, cost-effective model with Deep Think mode. Good for agentic tasks."
|
|
4381
|
+
}
|
|
4382
|
+
},
|
|
4321
4383
|
// Gemini 2.5 Pro
|
|
4322
4384
|
{
|
|
4323
4385
|
provider: "gemini",
|
|
@@ -5052,13 +5114,37 @@ var GPT_IMAGE_SIZES, GPT_IMAGE_QUALITIES, DALLE3_SIZES, DALLE3_QUALITIES, DALLE2
|
|
|
5052
5114
|
var init_openai_image_models = __esm({
|
|
5053
5115
|
"src/providers/openai-image-models.ts"() {
|
|
5054
5116
|
"use strict";
|
|
5055
|
-
GPT_IMAGE_SIZES = ["1024x1024", "1024x1536", "1536x1024"];
|
|
5117
|
+
GPT_IMAGE_SIZES = ["1024x1024", "1024x1536", "1536x1024", "1920x1080", "auto"];
|
|
5056
5118
|
GPT_IMAGE_QUALITIES = ["low", "medium", "high"];
|
|
5057
5119
|
DALLE3_SIZES = ["1024x1024", "1024x1792", "1792x1024"];
|
|
5058
5120
|
DALLE3_QUALITIES = ["standard", "hd"];
|
|
5059
5121
|
DALLE2_SIZES = ["256x256", "512x512", "1024x1024"];
|
|
5060
5122
|
openaiImageModels = [
|
|
5061
|
-
// GPT Image 1 Family (flagship)
|
|
5123
|
+
// GPT Image 1.5 Family (flagship)
|
|
5124
|
+
{
|
|
5125
|
+
provider: "openai",
|
|
5126
|
+
modelId: "gpt-image-1.5",
|
|
5127
|
+
displayName: "GPT Image 1.5",
|
|
5128
|
+
pricing: {
|
|
5129
|
+
bySize: {
|
|
5130
|
+
"1024x1024": { low: 8e-3, medium: 0.03, high: 0.13 },
|
|
5131
|
+
"1024x1536": { low: 0.012, medium: 0.045, high: 0.195 },
|
|
5132
|
+
"1536x1024": { low: 0.012, medium: 0.045, high: 0.195 },
|
|
5133
|
+
"1920x1080": { low: 0.016, medium: 0.06, high: 0.26 }
|
|
5134
|
+
}
|
|
5135
|
+
},
|
|
5136
|
+
supportedSizes: [...GPT_IMAGE_SIZES],
|
|
5137
|
+
supportedQualities: [...GPT_IMAGE_QUALITIES],
|
|
5138
|
+
maxImages: 1,
|
|
5139
|
+
defaultSize: "1024x1024",
|
|
5140
|
+
defaultQuality: "medium",
|
|
5141
|
+
features: {
|
|
5142
|
+
textRendering: true,
|
|
5143
|
+
transparency: true,
|
|
5144
|
+
editing: true
|
|
5145
|
+
}
|
|
5146
|
+
},
|
|
5147
|
+
// GPT Image 1 Family (previous gen)
|
|
5062
5148
|
{
|
|
5063
5149
|
provider: "openai",
|
|
5064
5150
|
modelId: "gpt-image-1",
|
|
@@ -5070,7 +5156,7 @@ var init_openai_image_models = __esm({
|
|
|
5070
5156
|
"1536x1024": { low: 0.016, medium: 0.06, high: 0.25 }
|
|
5071
5157
|
}
|
|
5072
5158
|
},
|
|
5073
|
-
supportedSizes: [
|
|
5159
|
+
supportedSizes: ["1024x1024", "1024x1536", "1536x1024"],
|
|
5074
5160
|
supportedQualities: [...GPT_IMAGE_QUALITIES],
|
|
5075
5161
|
maxImages: 1,
|
|
5076
5162
|
defaultSize: "1024x1024",
|
|
@@ -5091,7 +5177,7 @@ var init_openai_image_models = __esm({
|
|
|
5091
5177
|
"1536x1024": { low: 75e-4, medium: 0.03, high: 0.078 }
|
|
5092
5178
|
}
|
|
5093
5179
|
},
|
|
5094
|
-
supportedSizes: [
|
|
5180
|
+
supportedSizes: ["1024x1024", "1024x1536", "1536x1024"],
|
|
5095
5181
|
supportedQualities: [...GPT_IMAGE_QUALITIES],
|
|
5096
5182
|
maxImages: 1,
|
|
5097
5183
|
defaultSize: "1024x1024",
|
|
@@ -5101,11 +5187,53 @@ var init_openai_image_models = __esm({
|
|
|
5101
5187
|
transparency: true
|
|
5102
5188
|
}
|
|
5103
5189
|
},
|
|
5104
|
-
//
|
|
5190
|
+
// Sora Video Generation Models
|
|
5191
|
+
{
|
|
5192
|
+
provider: "openai",
|
|
5193
|
+
modelId: "sora-2",
|
|
5194
|
+
displayName: "Sora 2",
|
|
5195
|
+
pricing: {
|
|
5196
|
+
bySize: {
|
|
5197
|
+
"1920x1080": { standard: 0.5, high: 1 },
|
|
5198
|
+
"1080x1920": { standard: 0.5, high: 1 },
|
|
5199
|
+
"1024x1024": { standard: 0.4, high: 0.8 }
|
|
5200
|
+
}
|
|
5201
|
+
},
|
|
5202
|
+
supportedSizes: ["1920x1080", "1080x1920", "1024x1024"],
|
|
5203
|
+
supportedQualities: ["standard", "high"],
|
|
5204
|
+
maxImages: 1,
|
|
5205
|
+
defaultSize: "1920x1080",
|
|
5206
|
+
defaultQuality: "standard",
|
|
5207
|
+
features: {
|
|
5208
|
+
videoGeneration: true
|
|
5209
|
+
}
|
|
5210
|
+
},
|
|
5211
|
+
{
|
|
5212
|
+
provider: "openai",
|
|
5213
|
+
modelId: "sora-2-pro",
|
|
5214
|
+
displayName: "Sora 2 Pro",
|
|
5215
|
+
pricing: {
|
|
5216
|
+
bySize: {
|
|
5217
|
+
"1920x1080": { standard: 1, high: 2 },
|
|
5218
|
+
"1080x1920": { standard: 1, high: 2 },
|
|
5219
|
+
"1024x1024": { standard: 0.8, high: 1.6 }
|
|
5220
|
+
}
|
|
5221
|
+
},
|
|
5222
|
+
supportedSizes: ["1920x1080", "1080x1920", "1024x1024"],
|
|
5223
|
+
supportedQualities: ["standard", "high"],
|
|
5224
|
+
maxImages: 1,
|
|
5225
|
+
defaultSize: "1920x1080",
|
|
5226
|
+
defaultQuality: "standard",
|
|
5227
|
+
features: {
|
|
5228
|
+
videoGeneration: true,
|
|
5229
|
+
extendedDuration: true
|
|
5230
|
+
}
|
|
5231
|
+
},
|
|
5232
|
+
// DALL-E Family (deprecated - use GPT Image models instead)
|
|
5105
5233
|
{
|
|
5106
5234
|
provider: "openai",
|
|
5107
5235
|
modelId: "dall-e-3",
|
|
5108
|
-
displayName: "DALL-E 3",
|
|
5236
|
+
displayName: "DALL-E 3 (Deprecated)",
|
|
5109
5237
|
pricing: {
|
|
5110
5238
|
bySize: {
|
|
5111
5239
|
"1024x1024": { standard: 0.04, hd: 0.08 },
|
|
@@ -5126,7 +5254,7 @@ var init_openai_image_models = __esm({
|
|
|
5126
5254
|
{
|
|
5127
5255
|
provider: "openai",
|
|
5128
5256
|
modelId: "dall-e-2",
|
|
5129
|
-
displayName: "DALL-E 2 (
|
|
5257
|
+
displayName: "DALL-E 2 (Deprecated)",
|
|
5130
5258
|
pricing: {
|
|
5131
5259
|
bySize: {
|
|
5132
5260
|
"256x256": 0.016,
|
|
@@ -5148,12 +5276,65 @@ var init_openai_models = __esm({
|
|
|
5148
5276
|
"src/providers/openai-models.ts"() {
|
|
5149
5277
|
"use strict";
|
|
5150
5278
|
OPENAI_MODELS = [
|
|
5151
|
-
// GPT-5 Family
|
|
5279
|
+
// GPT-5.2 Family (Latest flagship)
|
|
5280
|
+
{
|
|
5281
|
+
provider: "openai",
|
|
5282
|
+
modelId: "gpt-5.2",
|
|
5283
|
+
displayName: "GPT-5.2",
|
|
5284
|
+
contextWindow: 1e6,
|
|
5285
|
+
maxOutputTokens: 128e3,
|
|
5286
|
+
pricing: {
|
|
5287
|
+
input: 1.25,
|
|
5288
|
+
output: 10,
|
|
5289
|
+
cachedInput: 0.125
|
|
5290
|
+
},
|
|
5291
|
+
knowledgeCutoff: "2025-03-31",
|
|
5292
|
+
features: {
|
|
5293
|
+
streaming: true,
|
|
5294
|
+
functionCalling: true,
|
|
5295
|
+
vision: true,
|
|
5296
|
+
reasoning: true,
|
|
5297
|
+
structuredOutputs: true,
|
|
5298
|
+
fineTuning: true
|
|
5299
|
+
},
|
|
5300
|
+
metadata: {
|
|
5301
|
+
family: "GPT-5.2",
|
|
5302
|
+
releaseDate: "2025-12-01",
|
|
5303
|
+
notes: "Latest flagship model with 1M context window and enhanced reasoning.",
|
|
5304
|
+
supportsTemperature: false
|
|
5305
|
+
}
|
|
5306
|
+
},
|
|
5307
|
+
{
|
|
5308
|
+
provider: "openai",
|
|
5309
|
+
modelId: "gpt-5.2-pro",
|
|
5310
|
+
displayName: "GPT-5.2 Pro",
|
|
5311
|
+
contextWindow: 1e6,
|
|
5312
|
+
maxOutputTokens: 128e3,
|
|
5313
|
+
pricing: {
|
|
5314
|
+
input: 15,
|
|
5315
|
+
output: 120
|
|
5316
|
+
},
|
|
5317
|
+
knowledgeCutoff: "2025-03-31",
|
|
5318
|
+
features: {
|
|
5319
|
+
streaming: true,
|
|
5320
|
+
functionCalling: true,
|
|
5321
|
+
vision: true,
|
|
5322
|
+
reasoning: true,
|
|
5323
|
+
structuredOutputs: true
|
|
5324
|
+
},
|
|
5325
|
+
metadata: {
|
|
5326
|
+
family: "GPT-5.2",
|
|
5327
|
+
releaseDate: "2025-12-01",
|
|
5328
|
+
notes: "Premium tier GPT-5.2 with enhanced reasoning. Does not support prompt caching.",
|
|
5329
|
+
supportsTemperature: false
|
|
5330
|
+
}
|
|
5331
|
+
},
|
|
5332
|
+
// GPT-5.1 Family
|
|
5152
5333
|
{
|
|
5153
5334
|
provider: "openai",
|
|
5154
5335
|
modelId: "gpt-5.1",
|
|
5155
5336
|
displayName: "GPT-5.1",
|
|
5156
|
-
contextWindow:
|
|
5337
|
+
contextWindow: 1e6,
|
|
5157
5338
|
maxOutputTokens: 32768,
|
|
5158
5339
|
pricing: {
|
|
5159
5340
|
input: 1.25,
|
|
@@ -5170,17 +5351,68 @@ var init_openai_models = __esm({
|
|
|
5170
5351
|
fineTuning: true
|
|
5171
5352
|
},
|
|
5172
5353
|
metadata: {
|
|
5173
|
-
family: "GPT-5",
|
|
5354
|
+
family: "GPT-5.1",
|
|
5174
5355
|
releaseDate: "2025-11-12",
|
|
5175
|
-
notes: "
|
|
5356
|
+
notes: "GPT-5 variant with improved instruction following. 2-3x faster than GPT-5.",
|
|
5357
|
+
supportsTemperature: false
|
|
5358
|
+
}
|
|
5359
|
+
},
|
|
5360
|
+
{
|
|
5361
|
+
provider: "openai",
|
|
5362
|
+
modelId: "gpt-5.1-codex",
|
|
5363
|
+
displayName: "GPT-5.1 Codex",
|
|
5364
|
+
contextWindow: 1e6,
|
|
5365
|
+
maxOutputTokens: 32768,
|
|
5366
|
+
pricing: {
|
|
5367
|
+
input: 1.25,
|
|
5368
|
+
output: 10,
|
|
5369
|
+
cachedInput: 0.125
|
|
5370
|
+
},
|
|
5371
|
+
knowledgeCutoff: "2024-09-30",
|
|
5372
|
+
features: {
|
|
5373
|
+
streaming: true,
|
|
5374
|
+
functionCalling: true,
|
|
5375
|
+
vision: true,
|
|
5376
|
+
reasoning: true,
|
|
5377
|
+
structuredOutputs: true
|
|
5378
|
+
},
|
|
5379
|
+
metadata: {
|
|
5380
|
+
family: "GPT-5.1",
|
|
5381
|
+
notes: "GPT-5.1 variant optimized for code generation and analysis.",
|
|
5382
|
+
supportsTemperature: false
|
|
5383
|
+
}
|
|
5384
|
+
},
|
|
5385
|
+
{
|
|
5386
|
+
provider: "openai",
|
|
5387
|
+
modelId: "gpt-5.1-codex-max",
|
|
5388
|
+
displayName: "GPT-5.1 Codex Max",
|
|
5389
|
+
contextWindow: 1e6,
|
|
5390
|
+
maxOutputTokens: 32768,
|
|
5391
|
+
pricing: {
|
|
5392
|
+
input: 1.25,
|
|
5393
|
+
output: 10,
|
|
5394
|
+
cachedInput: 0.125
|
|
5395
|
+
},
|
|
5396
|
+
knowledgeCutoff: "2024-09-30",
|
|
5397
|
+
features: {
|
|
5398
|
+
streaming: true,
|
|
5399
|
+
functionCalling: true,
|
|
5400
|
+
vision: true,
|
|
5401
|
+
reasoning: true,
|
|
5402
|
+
structuredOutputs: true
|
|
5403
|
+
},
|
|
5404
|
+
metadata: {
|
|
5405
|
+
family: "GPT-5.1",
|
|
5406
|
+
notes: "Extended thinking variant of GPT-5.1 Codex for complex code tasks.",
|
|
5176
5407
|
supportsTemperature: false
|
|
5177
5408
|
}
|
|
5178
5409
|
},
|
|
5410
|
+
// GPT-5 Family
|
|
5179
5411
|
{
|
|
5180
5412
|
provider: "openai",
|
|
5181
5413
|
modelId: "gpt-5",
|
|
5182
5414
|
displayName: "GPT-5",
|
|
5183
|
-
contextWindow:
|
|
5415
|
+
contextWindow: 1e6,
|
|
5184
5416
|
maxOutputTokens: 128e3,
|
|
5185
5417
|
pricing: {
|
|
5186
5418
|
input: 1.25,
|
|
@@ -5199,7 +5431,32 @@ var init_openai_models = __esm({
|
|
|
5199
5431
|
metadata: {
|
|
5200
5432
|
family: "GPT-5",
|
|
5201
5433
|
releaseDate: "2025-08-07",
|
|
5202
|
-
notes: "
|
|
5434
|
+
notes: "High-capability model for coding and agentic tasks. 90% caching discount.",
|
|
5435
|
+
supportsTemperature: false
|
|
5436
|
+
}
|
|
5437
|
+
},
|
|
5438
|
+
{
|
|
5439
|
+
provider: "openai",
|
|
5440
|
+
modelId: "gpt-5-codex",
|
|
5441
|
+
displayName: "GPT-5 Codex",
|
|
5442
|
+
contextWindow: 1e6,
|
|
5443
|
+
maxOutputTokens: 128e3,
|
|
5444
|
+
pricing: {
|
|
5445
|
+
input: 1.25,
|
|
5446
|
+
output: 10,
|
|
5447
|
+
cachedInput: 0.125
|
|
5448
|
+
},
|
|
5449
|
+
knowledgeCutoff: "2024-09-30",
|
|
5450
|
+
features: {
|
|
5451
|
+
streaming: true,
|
|
5452
|
+
functionCalling: true,
|
|
5453
|
+
vision: true,
|
|
5454
|
+
reasoning: true,
|
|
5455
|
+
structuredOutputs: true
|
|
5456
|
+
},
|
|
5457
|
+
metadata: {
|
|
5458
|
+
family: "GPT-5",
|
|
5459
|
+
notes: "GPT-5 variant optimized for code generation and analysis.",
|
|
5203
5460
|
supportsTemperature: false
|
|
5204
5461
|
}
|
|
5205
5462
|
},
|
|
@@ -5426,6 +5683,30 @@ var init_openai_models = __esm({
|
|
|
5426
5683
|
supportsTemperature: false
|
|
5427
5684
|
}
|
|
5428
5685
|
},
|
|
5686
|
+
{
|
|
5687
|
+
provider: "openai",
|
|
5688
|
+
modelId: "o1-pro",
|
|
5689
|
+
displayName: "o1 Pro",
|
|
5690
|
+
contextWindow: 2e5,
|
|
5691
|
+
maxOutputTokens: 1e5,
|
|
5692
|
+
pricing: {
|
|
5693
|
+
input: 150,
|
|
5694
|
+
output: 600
|
|
5695
|
+
},
|
|
5696
|
+
knowledgeCutoff: "2024-12-01",
|
|
5697
|
+
features: {
|
|
5698
|
+
streaming: true,
|
|
5699
|
+
functionCalling: true,
|
|
5700
|
+
vision: true,
|
|
5701
|
+
reasoning: true,
|
|
5702
|
+
structuredOutputs: true
|
|
5703
|
+
},
|
|
5704
|
+
metadata: {
|
|
5705
|
+
family: "o-series",
|
|
5706
|
+
notes: "Premium tier o1 with extended reasoning. Does not support prompt caching.",
|
|
5707
|
+
supportsTemperature: false
|
|
5708
|
+
}
|
|
5709
|
+
},
|
|
5429
5710
|
{
|
|
5430
5711
|
provider: "openai",
|
|
5431
5712
|
modelId: "o3",
|