llmist 8.1.3 → 9.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +309 -28
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +8 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.js +309 -28
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.d.cts
CHANGED
|
@@ -206,6 +206,12 @@ interface ImageModelSpec {
|
|
|
206
206
|
textRendering?: boolean;
|
|
207
207
|
/** Supports transparency */
|
|
208
208
|
transparency?: boolean;
|
|
209
|
+
/** Supports image editing/inpainting */
|
|
210
|
+
editing?: boolean;
|
|
211
|
+
/** Supports video generation (Sora) */
|
|
212
|
+
videoGeneration?: boolean;
|
|
213
|
+
/** Supports extended duration video */
|
|
214
|
+
extendedDuration?: boolean;
|
|
209
215
|
};
|
|
210
216
|
}
|
|
211
217
|
/**
|
|
@@ -7399,6 +7405,8 @@ declare function isAbortError(error: unknown): boolean;
|
|
|
7399
7405
|
*/
|
|
7400
7406
|
/**
|
|
7401
7407
|
* Map of common model aliases to their full provider:model-id format.
|
|
7408
|
+
*
|
|
7409
|
+
* Updated: 2025-12-20
|
|
7402
7410
|
*/
|
|
7403
7411
|
declare const MODEL_ALIASES: Record<string, string>;
|
|
7404
7412
|
/**
|
package/dist/index.d.ts
CHANGED
|
@@ -206,6 +206,12 @@ interface ImageModelSpec {
|
|
|
206
206
|
textRendering?: boolean;
|
|
207
207
|
/** Supports transparency */
|
|
208
208
|
transparency?: boolean;
|
|
209
|
+
/** Supports image editing/inpainting */
|
|
210
|
+
editing?: boolean;
|
|
211
|
+
/** Supports video generation (Sora) */
|
|
212
|
+
videoGeneration?: boolean;
|
|
213
|
+
/** Supports extended duration video */
|
|
214
|
+
extendedDuration?: boolean;
|
|
209
215
|
};
|
|
210
216
|
}
|
|
211
217
|
/**
|
|
@@ -7399,6 +7405,8 @@ declare function isAbortError(error: unknown): boolean;
|
|
|
7399
7405
|
*/
|
|
7400
7406
|
/**
|
|
7401
7407
|
* Map of common model aliases to their full provider:model-id format.
|
|
7408
|
+
*
|
|
7409
|
+
* Updated: 2025-12-20
|
|
7402
7410
|
*/
|
|
7403
7411
|
declare const MODEL_ALIASES: Record<string, string>;
|
|
7404
7412
|
/**
|
package/dist/index.js
CHANGED
|
@@ -266,12 +266,20 @@ var init_model_shortcuts = __esm({
|
|
|
266
266
|
"src/core/model-shortcuts.ts"() {
|
|
267
267
|
"use strict";
|
|
268
268
|
MODEL_ALIASES = {
|
|
269
|
-
// OpenAI aliases
|
|
269
|
+
// OpenAI aliases - GPT-5.2 is the latest flagship
|
|
270
270
|
gpt4: "openai:gpt-4o",
|
|
271
271
|
gpt4o: "openai:gpt-4o",
|
|
272
|
-
|
|
272
|
+
"gpt4o-mini": "openai:gpt-4o-mini",
|
|
273
|
+
gpt5: "openai:gpt-5.2",
|
|
274
|
+
// Latest flagship
|
|
275
|
+
"gpt5.2": "openai:gpt-5.2",
|
|
276
|
+
"gpt5.1": "openai:gpt-5.1",
|
|
273
277
|
"gpt5-mini": "openai:gpt-5-mini",
|
|
274
278
|
"gpt5-nano": "openai:gpt-5-nano",
|
|
279
|
+
"gpt5-codex": "openai:gpt-5-codex",
|
|
280
|
+
o1: "openai:o1",
|
|
281
|
+
o3: "openai:o3",
|
|
282
|
+
"o4-mini": "openai:o4-mini",
|
|
275
283
|
// Anthropic aliases
|
|
276
284
|
sonnet: "anthropic:claude-sonnet-4-5",
|
|
277
285
|
"claude-sonnet": "anthropic:claude-sonnet-4-5",
|
|
@@ -279,11 +287,12 @@ var init_model_shortcuts = __esm({
|
|
|
279
287
|
"claude-haiku": "anthropic:claude-haiku-4-5",
|
|
280
288
|
opus: "anthropic:claude-opus-4-5",
|
|
281
289
|
"claude-opus": "anthropic:claude-opus-4-5",
|
|
282
|
-
// Gemini aliases
|
|
283
|
-
flash: "gemini:gemini-2.
|
|
284
|
-
"gemini-flash": "gemini:gemini-2.
|
|
285
|
-
"
|
|
286
|
-
pro: "gemini:gemini-
|
|
290
|
+
// Gemini aliases - 2.5 Flash is the recommended fast model
|
|
291
|
+
flash: "gemini:gemini-2.5-flash",
|
|
292
|
+
"gemini-flash": "gemini:gemini-2.5-flash",
|
|
293
|
+
"flash-lite": "gemini:gemini-2.5-flash-lite",
|
|
294
|
+
"gemini-pro": "gemini:gemini-3-pro-preview",
|
|
295
|
+
pro: "gemini:gemini-3-pro-preview"
|
|
287
296
|
};
|
|
288
297
|
KNOWN_MODEL_PATTERNS = [
|
|
289
298
|
/^gpt-?\d/i,
|
|
@@ -3543,10 +3552,10 @@ var init_anthropic_models = __esm({
|
|
|
3543
3552
|
contextWindow: 2e5,
|
|
3544
3553
|
maxOutputTokens: 64e3,
|
|
3545
3554
|
pricing: {
|
|
3546
|
-
input:
|
|
3547
|
-
output:
|
|
3548
|
-
cachedInput: 0.
|
|
3549
|
-
cacheWriteInput: 1
|
|
3555
|
+
input: 0.8,
|
|
3556
|
+
output: 4,
|
|
3557
|
+
cachedInput: 0.08,
|
|
3558
|
+
cacheWriteInput: 1
|
|
3550
3559
|
},
|
|
3551
3560
|
knowledgeCutoff: "2025-02",
|
|
3552
3561
|
features: {
|
|
@@ -3708,6 +3717,32 @@ var init_anthropic_models = __esm({
|
|
|
3708
3717
|
notes: "Legacy model - upgrade to Haiku 4.5 for better performance"
|
|
3709
3718
|
}
|
|
3710
3719
|
},
|
|
3720
|
+
// Dated Opus 4.5
|
|
3721
|
+
{
|
|
3722
|
+
provider: "anthropic",
|
|
3723
|
+
modelId: "claude-opus-4-5-20251124",
|
|
3724
|
+
displayName: "Claude Opus 4.5",
|
|
3725
|
+
contextWindow: 2e5,
|
|
3726
|
+
maxOutputTokens: 64e3,
|
|
3727
|
+
pricing: {
|
|
3728
|
+
input: 5,
|
|
3729
|
+
output: 25,
|
|
3730
|
+
cachedInput: 0.5,
|
|
3731
|
+
cacheWriteInput: 6.25
|
|
3732
|
+
},
|
|
3733
|
+
knowledgeCutoff: "2025-03",
|
|
3734
|
+
features: {
|
|
3735
|
+
streaming: true,
|
|
3736
|
+
functionCalling: true,
|
|
3737
|
+
vision: true,
|
|
3738
|
+
reasoning: true
|
|
3739
|
+
},
|
|
3740
|
+
metadata: {
|
|
3741
|
+
family: "Claude 4",
|
|
3742
|
+
releaseDate: "2025-11-24",
|
|
3743
|
+
notes: "Most powerful model for coding and computer use. Extended thinking support."
|
|
3744
|
+
}
|
|
3745
|
+
},
|
|
3711
3746
|
// Modern aliases (recommended by Anthropic)
|
|
3712
3747
|
{
|
|
3713
3748
|
provider: "anthropic",
|
|
@@ -3716,10 +3751,10 @@ var init_anthropic_models = __esm({
|
|
|
3716
3751
|
contextWindow: 2e5,
|
|
3717
3752
|
maxOutputTokens: 64e3,
|
|
3718
3753
|
pricing: {
|
|
3719
|
-
input:
|
|
3720
|
-
output:
|
|
3721
|
-
cachedInput: 0.
|
|
3722
|
-
cacheWriteInput: 1
|
|
3754
|
+
input: 0.8,
|
|
3755
|
+
output: 4,
|
|
3756
|
+
cachedInput: 0.08,
|
|
3757
|
+
cacheWriteInput: 1
|
|
3723
3758
|
},
|
|
3724
3759
|
knowledgeCutoff: "2025-02",
|
|
3725
3760
|
features: {
|
|
@@ -4305,6 +4340,33 @@ var init_gemini_models = __esm({
|
|
|
4305
4340
|
notes: "Best model for multimodal understanding, agentic and vibe-coding. Deep Think mode available."
|
|
4306
4341
|
}
|
|
4307
4342
|
},
|
|
4343
|
+
// Gemini 3 Flash (Preview)
|
|
4344
|
+
{
|
|
4345
|
+
provider: "gemini",
|
|
4346
|
+
modelId: "gemini-3-flash-preview",
|
|
4347
|
+
displayName: "Gemini 3 Flash (Preview)",
|
|
4348
|
+
contextWindow: 1048576,
|
|
4349
|
+
maxOutputTokens: 65536,
|
|
4350
|
+
pricing: {
|
|
4351
|
+
input: 0.4,
|
|
4352
|
+
// $0.40 for text/image/video
|
|
4353
|
+
output: 3,
|
|
4354
|
+
cachedInput: 0.04
|
|
4355
|
+
},
|
|
4356
|
+
knowledgeCutoff: "2025-01",
|
|
4357
|
+
features: {
|
|
4358
|
+
streaming: true,
|
|
4359
|
+
functionCalling: true,
|
|
4360
|
+
vision: true,
|
|
4361
|
+
reasoning: true,
|
|
4362
|
+
structuredOutputs: true
|
|
4363
|
+
},
|
|
4364
|
+
metadata: {
|
|
4365
|
+
family: "Gemini 3",
|
|
4366
|
+
releaseDate: "2025-12",
|
|
4367
|
+
notes: "Fast, cost-effective model with Deep Think mode. Good for agentic tasks."
|
|
4368
|
+
}
|
|
4369
|
+
},
|
|
4308
4370
|
// Gemini 2.5 Pro
|
|
4309
4371
|
{
|
|
4310
4372
|
provider: "gemini",
|
|
@@ -5039,13 +5101,37 @@ var GPT_IMAGE_SIZES, GPT_IMAGE_QUALITIES, DALLE3_SIZES, DALLE3_QUALITIES, DALLE2
|
|
|
5039
5101
|
var init_openai_image_models = __esm({
|
|
5040
5102
|
"src/providers/openai-image-models.ts"() {
|
|
5041
5103
|
"use strict";
|
|
5042
|
-
GPT_IMAGE_SIZES = ["1024x1024", "1024x1536", "1536x1024"];
|
|
5104
|
+
GPT_IMAGE_SIZES = ["1024x1024", "1024x1536", "1536x1024", "1920x1080", "auto"];
|
|
5043
5105
|
GPT_IMAGE_QUALITIES = ["low", "medium", "high"];
|
|
5044
5106
|
DALLE3_SIZES = ["1024x1024", "1024x1792", "1792x1024"];
|
|
5045
5107
|
DALLE3_QUALITIES = ["standard", "hd"];
|
|
5046
5108
|
DALLE2_SIZES = ["256x256", "512x512", "1024x1024"];
|
|
5047
5109
|
openaiImageModels = [
|
|
5048
|
-
// GPT Image 1 Family (flagship)
|
|
5110
|
+
// GPT Image 1.5 Family (flagship)
|
|
5111
|
+
{
|
|
5112
|
+
provider: "openai",
|
|
5113
|
+
modelId: "gpt-image-1.5",
|
|
5114
|
+
displayName: "GPT Image 1.5",
|
|
5115
|
+
pricing: {
|
|
5116
|
+
bySize: {
|
|
5117
|
+
"1024x1024": { low: 8e-3, medium: 0.03, high: 0.13 },
|
|
5118
|
+
"1024x1536": { low: 0.012, medium: 0.045, high: 0.195 },
|
|
5119
|
+
"1536x1024": { low: 0.012, medium: 0.045, high: 0.195 },
|
|
5120
|
+
"1920x1080": { low: 0.016, medium: 0.06, high: 0.26 }
|
|
5121
|
+
}
|
|
5122
|
+
},
|
|
5123
|
+
supportedSizes: [...GPT_IMAGE_SIZES],
|
|
5124
|
+
supportedQualities: [...GPT_IMAGE_QUALITIES],
|
|
5125
|
+
maxImages: 1,
|
|
5126
|
+
defaultSize: "1024x1024",
|
|
5127
|
+
defaultQuality: "medium",
|
|
5128
|
+
features: {
|
|
5129
|
+
textRendering: true,
|
|
5130
|
+
transparency: true,
|
|
5131
|
+
editing: true
|
|
5132
|
+
}
|
|
5133
|
+
},
|
|
5134
|
+
// GPT Image 1 Family (previous gen)
|
|
5049
5135
|
{
|
|
5050
5136
|
provider: "openai",
|
|
5051
5137
|
modelId: "gpt-image-1",
|
|
@@ -5057,7 +5143,7 @@ var init_openai_image_models = __esm({
|
|
|
5057
5143
|
"1536x1024": { low: 0.016, medium: 0.06, high: 0.25 }
|
|
5058
5144
|
}
|
|
5059
5145
|
},
|
|
5060
|
-
supportedSizes: [
|
|
5146
|
+
supportedSizes: ["1024x1024", "1024x1536", "1536x1024"],
|
|
5061
5147
|
supportedQualities: [...GPT_IMAGE_QUALITIES],
|
|
5062
5148
|
maxImages: 1,
|
|
5063
5149
|
defaultSize: "1024x1024",
|
|
@@ -5078,7 +5164,7 @@ var init_openai_image_models = __esm({
|
|
|
5078
5164
|
"1536x1024": { low: 75e-4, medium: 0.03, high: 0.078 }
|
|
5079
5165
|
}
|
|
5080
5166
|
},
|
|
5081
|
-
supportedSizes: [
|
|
5167
|
+
supportedSizes: ["1024x1024", "1024x1536", "1536x1024"],
|
|
5082
5168
|
supportedQualities: [...GPT_IMAGE_QUALITIES],
|
|
5083
5169
|
maxImages: 1,
|
|
5084
5170
|
defaultSize: "1024x1024",
|
|
@@ -5088,11 +5174,53 @@ var init_openai_image_models = __esm({
|
|
|
5088
5174
|
transparency: true
|
|
5089
5175
|
}
|
|
5090
5176
|
},
|
|
5091
|
-
//
|
|
5177
|
+
// Sora Video Generation Models
|
|
5178
|
+
{
|
|
5179
|
+
provider: "openai",
|
|
5180
|
+
modelId: "sora-2",
|
|
5181
|
+
displayName: "Sora 2",
|
|
5182
|
+
pricing: {
|
|
5183
|
+
bySize: {
|
|
5184
|
+
"1920x1080": { standard: 0.5, high: 1 },
|
|
5185
|
+
"1080x1920": { standard: 0.5, high: 1 },
|
|
5186
|
+
"1024x1024": { standard: 0.4, high: 0.8 }
|
|
5187
|
+
}
|
|
5188
|
+
},
|
|
5189
|
+
supportedSizes: ["1920x1080", "1080x1920", "1024x1024"],
|
|
5190
|
+
supportedQualities: ["standard", "high"],
|
|
5191
|
+
maxImages: 1,
|
|
5192
|
+
defaultSize: "1920x1080",
|
|
5193
|
+
defaultQuality: "standard",
|
|
5194
|
+
features: {
|
|
5195
|
+
videoGeneration: true
|
|
5196
|
+
}
|
|
5197
|
+
},
|
|
5198
|
+
{
|
|
5199
|
+
provider: "openai",
|
|
5200
|
+
modelId: "sora-2-pro",
|
|
5201
|
+
displayName: "Sora 2 Pro",
|
|
5202
|
+
pricing: {
|
|
5203
|
+
bySize: {
|
|
5204
|
+
"1920x1080": { standard: 1, high: 2 },
|
|
5205
|
+
"1080x1920": { standard: 1, high: 2 },
|
|
5206
|
+
"1024x1024": { standard: 0.8, high: 1.6 }
|
|
5207
|
+
}
|
|
5208
|
+
},
|
|
5209
|
+
supportedSizes: ["1920x1080", "1080x1920", "1024x1024"],
|
|
5210
|
+
supportedQualities: ["standard", "high"],
|
|
5211
|
+
maxImages: 1,
|
|
5212
|
+
defaultSize: "1920x1080",
|
|
5213
|
+
defaultQuality: "standard",
|
|
5214
|
+
features: {
|
|
5215
|
+
videoGeneration: true,
|
|
5216
|
+
extendedDuration: true
|
|
5217
|
+
}
|
|
5218
|
+
},
|
|
5219
|
+
// DALL-E Family (deprecated - use GPT Image models instead)
|
|
5092
5220
|
{
|
|
5093
5221
|
provider: "openai",
|
|
5094
5222
|
modelId: "dall-e-3",
|
|
5095
|
-
displayName: "DALL-E 3",
|
|
5223
|
+
displayName: "DALL-E 3 (Deprecated)",
|
|
5096
5224
|
pricing: {
|
|
5097
5225
|
bySize: {
|
|
5098
5226
|
"1024x1024": { standard: 0.04, hd: 0.08 },
|
|
@@ -5113,7 +5241,7 @@ var init_openai_image_models = __esm({
|
|
|
5113
5241
|
{
|
|
5114
5242
|
provider: "openai",
|
|
5115
5243
|
modelId: "dall-e-2",
|
|
5116
|
-
displayName: "DALL-E 2 (
|
|
5244
|
+
displayName: "DALL-E 2 (Deprecated)",
|
|
5117
5245
|
pricing: {
|
|
5118
5246
|
bySize: {
|
|
5119
5247
|
"256x256": 0.016,
|
|
@@ -5135,12 +5263,65 @@ var init_openai_models = __esm({
|
|
|
5135
5263
|
"src/providers/openai-models.ts"() {
|
|
5136
5264
|
"use strict";
|
|
5137
5265
|
OPENAI_MODELS = [
|
|
5138
|
-
// GPT-5 Family
|
|
5266
|
+
// GPT-5.2 Family (Latest flagship)
|
|
5267
|
+
{
|
|
5268
|
+
provider: "openai",
|
|
5269
|
+
modelId: "gpt-5.2",
|
|
5270
|
+
displayName: "GPT-5.2",
|
|
5271
|
+
contextWindow: 1e6,
|
|
5272
|
+
maxOutputTokens: 128e3,
|
|
5273
|
+
pricing: {
|
|
5274
|
+
input: 1.25,
|
|
5275
|
+
output: 10,
|
|
5276
|
+
cachedInput: 0.125
|
|
5277
|
+
},
|
|
5278
|
+
knowledgeCutoff: "2025-03-31",
|
|
5279
|
+
features: {
|
|
5280
|
+
streaming: true,
|
|
5281
|
+
functionCalling: true,
|
|
5282
|
+
vision: true,
|
|
5283
|
+
reasoning: true,
|
|
5284
|
+
structuredOutputs: true,
|
|
5285
|
+
fineTuning: true
|
|
5286
|
+
},
|
|
5287
|
+
metadata: {
|
|
5288
|
+
family: "GPT-5.2",
|
|
5289
|
+
releaseDate: "2025-12-01",
|
|
5290
|
+
notes: "Latest flagship model with 1M context window and enhanced reasoning.",
|
|
5291
|
+
supportsTemperature: false
|
|
5292
|
+
}
|
|
5293
|
+
},
|
|
5294
|
+
{
|
|
5295
|
+
provider: "openai",
|
|
5296
|
+
modelId: "gpt-5.2-pro",
|
|
5297
|
+
displayName: "GPT-5.2 Pro",
|
|
5298
|
+
contextWindow: 1e6,
|
|
5299
|
+
maxOutputTokens: 128e3,
|
|
5300
|
+
pricing: {
|
|
5301
|
+
input: 15,
|
|
5302
|
+
output: 120
|
|
5303
|
+
},
|
|
5304
|
+
knowledgeCutoff: "2025-03-31",
|
|
5305
|
+
features: {
|
|
5306
|
+
streaming: true,
|
|
5307
|
+
functionCalling: true,
|
|
5308
|
+
vision: true,
|
|
5309
|
+
reasoning: true,
|
|
5310
|
+
structuredOutputs: true
|
|
5311
|
+
},
|
|
5312
|
+
metadata: {
|
|
5313
|
+
family: "GPT-5.2",
|
|
5314
|
+
releaseDate: "2025-12-01",
|
|
5315
|
+
notes: "Premium tier GPT-5.2 with enhanced reasoning. Does not support prompt caching.",
|
|
5316
|
+
supportsTemperature: false
|
|
5317
|
+
}
|
|
5318
|
+
},
|
|
5319
|
+
// GPT-5.1 Family
|
|
5139
5320
|
{
|
|
5140
5321
|
provider: "openai",
|
|
5141
5322
|
modelId: "gpt-5.1",
|
|
5142
5323
|
displayName: "GPT-5.1",
|
|
5143
|
-
contextWindow:
|
|
5324
|
+
contextWindow: 1e6,
|
|
5144
5325
|
maxOutputTokens: 32768,
|
|
5145
5326
|
pricing: {
|
|
5146
5327
|
input: 1.25,
|
|
@@ -5157,17 +5338,68 @@ var init_openai_models = __esm({
|
|
|
5157
5338
|
fineTuning: true
|
|
5158
5339
|
},
|
|
5159
5340
|
metadata: {
|
|
5160
|
-
family: "GPT-5",
|
|
5341
|
+
family: "GPT-5.1",
|
|
5161
5342
|
releaseDate: "2025-11-12",
|
|
5162
|
-
notes: "
|
|
5343
|
+
notes: "GPT-5 variant with improved instruction following. 2-3x faster than GPT-5.",
|
|
5344
|
+
supportsTemperature: false
|
|
5345
|
+
}
|
|
5346
|
+
},
|
|
5347
|
+
{
|
|
5348
|
+
provider: "openai",
|
|
5349
|
+
modelId: "gpt-5.1-codex",
|
|
5350
|
+
displayName: "GPT-5.1 Codex",
|
|
5351
|
+
contextWindow: 1e6,
|
|
5352
|
+
maxOutputTokens: 32768,
|
|
5353
|
+
pricing: {
|
|
5354
|
+
input: 1.25,
|
|
5355
|
+
output: 10,
|
|
5356
|
+
cachedInput: 0.125
|
|
5357
|
+
},
|
|
5358
|
+
knowledgeCutoff: "2024-09-30",
|
|
5359
|
+
features: {
|
|
5360
|
+
streaming: true,
|
|
5361
|
+
functionCalling: true,
|
|
5362
|
+
vision: true,
|
|
5363
|
+
reasoning: true,
|
|
5364
|
+
structuredOutputs: true
|
|
5365
|
+
},
|
|
5366
|
+
metadata: {
|
|
5367
|
+
family: "GPT-5.1",
|
|
5368
|
+
notes: "GPT-5.1 variant optimized for code generation and analysis.",
|
|
5369
|
+
supportsTemperature: false
|
|
5370
|
+
}
|
|
5371
|
+
},
|
|
5372
|
+
{
|
|
5373
|
+
provider: "openai",
|
|
5374
|
+
modelId: "gpt-5.1-codex-max",
|
|
5375
|
+
displayName: "GPT-5.1 Codex Max",
|
|
5376
|
+
contextWindow: 1e6,
|
|
5377
|
+
maxOutputTokens: 32768,
|
|
5378
|
+
pricing: {
|
|
5379
|
+
input: 1.25,
|
|
5380
|
+
output: 10,
|
|
5381
|
+
cachedInput: 0.125
|
|
5382
|
+
},
|
|
5383
|
+
knowledgeCutoff: "2024-09-30",
|
|
5384
|
+
features: {
|
|
5385
|
+
streaming: true,
|
|
5386
|
+
functionCalling: true,
|
|
5387
|
+
vision: true,
|
|
5388
|
+
reasoning: true,
|
|
5389
|
+
structuredOutputs: true
|
|
5390
|
+
},
|
|
5391
|
+
metadata: {
|
|
5392
|
+
family: "GPT-5.1",
|
|
5393
|
+
notes: "Extended thinking variant of GPT-5.1 Codex for complex code tasks.",
|
|
5163
5394
|
supportsTemperature: false
|
|
5164
5395
|
}
|
|
5165
5396
|
},
|
|
5397
|
+
// GPT-5 Family
|
|
5166
5398
|
{
|
|
5167
5399
|
provider: "openai",
|
|
5168
5400
|
modelId: "gpt-5",
|
|
5169
5401
|
displayName: "GPT-5",
|
|
5170
|
-
contextWindow:
|
|
5402
|
+
contextWindow: 1e6,
|
|
5171
5403
|
maxOutputTokens: 128e3,
|
|
5172
5404
|
pricing: {
|
|
5173
5405
|
input: 1.25,
|
|
@@ -5186,7 +5418,32 @@ var init_openai_models = __esm({
|
|
|
5186
5418
|
metadata: {
|
|
5187
5419
|
family: "GPT-5",
|
|
5188
5420
|
releaseDate: "2025-08-07",
|
|
5189
|
-
notes: "
|
|
5421
|
+
notes: "High-capability model for coding and agentic tasks. 90% caching discount.",
|
|
5422
|
+
supportsTemperature: false
|
|
5423
|
+
}
|
|
5424
|
+
},
|
|
5425
|
+
{
|
|
5426
|
+
provider: "openai",
|
|
5427
|
+
modelId: "gpt-5-codex",
|
|
5428
|
+
displayName: "GPT-5 Codex",
|
|
5429
|
+
contextWindow: 1e6,
|
|
5430
|
+
maxOutputTokens: 128e3,
|
|
5431
|
+
pricing: {
|
|
5432
|
+
input: 1.25,
|
|
5433
|
+
output: 10,
|
|
5434
|
+
cachedInput: 0.125
|
|
5435
|
+
},
|
|
5436
|
+
knowledgeCutoff: "2024-09-30",
|
|
5437
|
+
features: {
|
|
5438
|
+
streaming: true,
|
|
5439
|
+
functionCalling: true,
|
|
5440
|
+
vision: true,
|
|
5441
|
+
reasoning: true,
|
|
5442
|
+
structuredOutputs: true
|
|
5443
|
+
},
|
|
5444
|
+
metadata: {
|
|
5445
|
+
family: "GPT-5",
|
|
5446
|
+
notes: "GPT-5 variant optimized for code generation and analysis.",
|
|
5190
5447
|
supportsTemperature: false
|
|
5191
5448
|
}
|
|
5192
5449
|
},
|
|
@@ -5413,6 +5670,30 @@ var init_openai_models = __esm({
|
|
|
5413
5670
|
supportsTemperature: false
|
|
5414
5671
|
}
|
|
5415
5672
|
},
|
|
5673
|
+
{
|
|
5674
|
+
provider: "openai",
|
|
5675
|
+
modelId: "o1-pro",
|
|
5676
|
+
displayName: "o1 Pro",
|
|
5677
|
+
contextWindow: 2e5,
|
|
5678
|
+
maxOutputTokens: 1e5,
|
|
5679
|
+
pricing: {
|
|
5680
|
+
input: 150,
|
|
5681
|
+
output: 600
|
|
5682
|
+
},
|
|
5683
|
+
knowledgeCutoff: "2024-12-01",
|
|
5684
|
+
features: {
|
|
5685
|
+
streaming: true,
|
|
5686
|
+
functionCalling: true,
|
|
5687
|
+
vision: true,
|
|
5688
|
+
reasoning: true,
|
|
5689
|
+
structuredOutputs: true
|
|
5690
|
+
},
|
|
5691
|
+
metadata: {
|
|
5692
|
+
family: "o-series",
|
|
5693
|
+
notes: "Premium tier o1 with extended reasoning. Does not support prompt caching.",
|
|
5694
|
+
supportsTemperature: false
|
|
5695
|
+
}
|
|
5696
|
+
},
|
|
5416
5697
|
{
|
|
5417
5698
|
provider: "openai",
|
|
5418
5699
|
modelId: "o3",
|