@roo-code/types 1.85.0 → 1.87.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +343 -284
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1197 -3079
- package/dist/index.d.ts +1197 -3079
- package/dist/index.js +332 -283
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -21,10 +21,6 @@ var clineAsks = [
|
|
|
21
21
|
"auto_approval_max_req_reached"
|
|
22
22
|
];
|
|
23
23
|
var clineAskSchema = z.enum(clineAsks);
|
|
24
|
-
var nonBlockingAsks = ["command_output"];
|
|
25
|
-
function isNonBlockingAsk(ask) {
|
|
26
|
-
return nonBlockingAsks.includes(ask);
|
|
27
|
-
}
|
|
28
24
|
var idleAsks = [
|
|
29
25
|
"completion_result",
|
|
30
26
|
"api_req_failed",
|
|
@@ -49,6 +45,10 @@ var interactiveAsks = [
|
|
|
49
45
|
function isInteractiveAsk(ask) {
|
|
50
46
|
return interactiveAsks.includes(ask);
|
|
51
47
|
}
|
|
48
|
+
var nonBlockingAsks = ["command_output"];
|
|
49
|
+
function isNonBlockingAsk(ask) {
|
|
50
|
+
return nonBlockingAsks.includes(ask);
|
|
51
|
+
}
|
|
52
52
|
var clineSays = [
|
|
53
53
|
"error",
|
|
54
54
|
"api_req_started",
|
|
@@ -103,12 +103,7 @@ var clineMessageSchema = z.object({
|
|
|
103
103
|
contextCondense: contextCondenseSchema.optional(),
|
|
104
104
|
isProtected: z.boolean().optional(),
|
|
105
105
|
apiProtocol: z.union([z.literal("openai"), z.literal("anthropic")]).optional(),
|
|
106
|
-
isAnswered: z.boolean().optional()
|
|
107
|
-
metadata: z.object({
|
|
108
|
-
gpt5: z.object({
|
|
109
|
-
previous_response_id: z.string().optional()
|
|
110
|
-
}).optional()
|
|
111
|
-
}).optional()
|
|
106
|
+
isAnswered: z.boolean().optional()
|
|
112
107
|
});
|
|
113
108
|
var tokenUsageSchema = z.object({
|
|
114
109
|
totalTokensIn: z.number(),
|
|
@@ -159,6 +154,16 @@ var toolUsageSchema = z2.record(
|
|
|
159
154
|
failures: z2.number()
|
|
160
155
|
})
|
|
161
156
|
);
|
|
157
|
+
var TOOL_PROTOCOL = {
|
|
158
|
+
XML: "xml",
|
|
159
|
+
NATIVE: "native"
|
|
160
|
+
};
|
|
161
|
+
function isNativeProtocol(protocol) {
|
|
162
|
+
return protocol === TOOL_PROTOCOL.NATIVE;
|
|
163
|
+
}
|
|
164
|
+
function getEffectiveProtocol(toolProtocol) {
|
|
165
|
+
return toolProtocol || TOOL_PROTOCOL.XML;
|
|
166
|
+
}
|
|
162
167
|
|
|
163
168
|
// src/events.ts
|
|
164
169
|
var RooCodeEventName = /* @__PURE__ */ ((RooCodeEventName2) => {
|
|
@@ -358,6 +363,10 @@ import { z as z5 } from "zod";
|
|
|
358
363
|
var reasoningEfforts = ["low", "medium", "high"];
|
|
359
364
|
var reasoningEffortsSchema = z5.enum(reasoningEfforts);
|
|
360
365
|
var reasoningEffortWithMinimalSchema = z5.union([reasoningEffortsSchema, z5.literal("minimal")]);
|
|
366
|
+
var reasoningEffortsExtended = ["none", "minimal", "low", "medium", "high"];
|
|
367
|
+
var reasoningEffortExtendedSchema = z5.enum(reasoningEffortsExtended);
|
|
368
|
+
var reasoningEffortSettingValues = ["disable", "none", "minimal", "low", "medium", "high"];
|
|
369
|
+
var reasoningEffortSettingSchema = z5.enum(reasoningEffortSettingValues);
|
|
361
370
|
var verbosityLevels = ["low", "medium", "high"];
|
|
362
371
|
var verbosityLevelsSchema = z5.enum(verbosityLevels);
|
|
363
372
|
var serviceTiers = ["default", "flex", "priority"];
|
|
@@ -371,6 +380,10 @@ var modelInfoSchema = z5.object({
|
|
|
371
380
|
contextWindow: z5.number(),
|
|
372
381
|
supportsImages: z5.boolean().optional(),
|
|
373
382
|
supportsPromptCache: z5.boolean(),
|
|
383
|
+
// Optional default prompt cache retention policy for providers that support it.
|
|
384
|
+
// When set to "24h", extended prompt caching will be requested; when omitted
|
|
385
|
+
// or set to "in_memory", the default in‑memory cache is used.
|
|
386
|
+
promptCacheRetention: z5.enum(["in_memory", "24h"]).optional(),
|
|
374
387
|
// Capability flag to indicate whether the model supports an output verbosity parameter
|
|
375
388
|
supportsVerbosity: z5.boolean().optional(),
|
|
376
389
|
supportsReasoningBudget: z5.boolean().optional(),
|
|
@@ -380,7 +393,7 @@ var modelInfoSchema = z5.object({
|
|
|
380
393
|
supportsTemperature: z5.boolean().optional(),
|
|
381
394
|
defaultTemperature: z5.number().optional(),
|
|
382
395
|
requiredReasoningBudget: z5.boolean().optional(),
|
|
383
|
-
supportsReasoningEffort: z5.boolean().optional(),
|
|
396
|
+
supportsReasoningEffort: z5.union([z5.boolean(), z5.array(z5.enum(["disable", "none", "minimal", "low", "medium", "high"]))]).optional(),
|
|
384
397
|
requiredReasoningEffort: z5.boolean().optional(),
|
|
385
398
|
preserveReasoning: z5.boolean().optional(),
|
|
386
399
|
supportedParameters: z5.array(modelParametersSchema).optional(),
|
|
@@ -389,7 +402,8 @@ var modelInfoSchema = z5.object({
|
|
|
389
402
|
cacheWritesPrice: z5.number().optional(),
|
|
390
403
|
cacheReadsPrice: z5.number().optional(),
|
|
391
404
|
description: z5.string().optional(),
|
|
392
|
-
|
|
405
|
+
// Default effort value for models that support reasoning effort
|
|
406
|
+
reasoningEffort: reasoningEffortExtendedSchema.optional(),
|
|
393
407
|
minTokensPerCachePoint: z5.number().optional(),
|
|
394
408
|
maxCachePoints: z5.number().optional(),
|
|
395
409
|
cachableFields: z5.array(z5.string()).optional(),
|
|
@@ -397,6 +411,10 @@ var modelInfoSchema = z5.object({
|
|
|
397
411
|
deprecated: z5.boolean().optional(),
|
|
398
412
|
// Flag to indicate if the model is free (no cost)
|
|
399
413
|
isFree: z5.boolean().optional(),
|
|
414
|
+
// Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
|
|
415
|
+
supportsNativeTools: z5.boolean().optional(),
|
|
416
|
+
// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
|
|
417
|
+
defaultToolProtocol: z5.enum(["xml", "native"]).optional(),
|
|
400
418
|
/**
|
|
401
419
|
* Service tiers with pricing information.
|
|
402
420
|
* Each tier can have a name (for OpenAI service tiers) and pricing overrides.
|
|
@@ -1909,133 +1927,39 @@ var fireworksModels = {
|
|
|
1909
1927
|
};
|
|
1910
1928
|
|
|
1911
1929
|
// src/providers/gemini.ts
|
|
1912
|
-
var geminiDefaultModelId = "gemini-2.
|
|
1930
|
+
var geminiDefaultModelId = "gemini-2.5-pro";
|
|
1913
1931
|
var geminiModels = {
|
|
1914
|
-
|
|
1915
|
-
"gemini-flash-latest": {
|
|
1916
|
-
maxTokens: 65536,
|
|
1917
|
-
contextWindow: 1048576,
|
|
1918
|
-
supportsImages: true,
|
|
1919
|
-
supportsPromptCache: true,
|
|
1920
|
-
inputPrice: 0.3,
|
|
1921
|
-
outputPrice: 2.5,
|
|
1922
|
-
cacheReadsPrice: 0.075,
|
|
1923
|
-
cacheWritesPrice: 1,
|
|
1924
|
-
maxThinkingTokens: 24576,
|
|
1925
|
-
supportsReasoningBudget: true
|
|
1926
|
-
},
|
|
1927
|
-
"gemini-flash-lite-latest": {
|
|
1928
|
-
maxTokens: 65536,
|
|
1929
|
-
contextWindow: 1048576,
|
|
1930
|
-
supportsImages: true,
|
|
1931
|
-
supportsPromptCache: true,
|
|
1932
|
-
inputPrice: 0.1,
|
|
1933
|
-
outputPrice: 0.4,
|
|
1934
|
-
cacheReadsPrice: 0.025,
|
|
1935
|
-
cacheWritesPrice: 1,
|
|
1936
|
-
supportsReasoningBudget: true,
|
|
1937
|
-
maxThinkingTokens: 24576
|
|
1938
|
-
},
|
|
1939
|
-
// 2.5 Flash models (09-2025 versions - most recent)
|
|
1940
|
-
"gemini-2.5-flash-preview-09-2025": {
|
|
1941
|
-
maxTokens: 65536,
|
|
1942
|
-
contextWindow: 1048576,
|
|
1943
|
-
supportsImages: true,
|
|
1944
|
-
supportsPromptCache: true,
|
|
1945
|
-
inputPrice: 0.3,
|
|
1946
|
-
outputPrice: 2.5,
|
|
1947
|
-
cacheReadsPrice: 0.075,
|
|
1948
|
-
cacheWritesPrice: 1,
|
|
1949
|
-
maxThinkingTokens: 24576,
|
|
1950
|
-
supportsReasoningBudget: true
|
|
1951
|
-
},
|
|
1952
|
-
"gemini-2.5-flash-lite-preview-09-2025": {
|
|
1932
|
+
"gemini-3-pro-preview": {
|
|
1953
1933
|
maxTokens: 65536,
|
|
1954
1934
|
contextWindow: 1048576,
|
|
1955
1935
|
supportsImages: true,
|
|
1936
|
+
supportsNativeTools: true,
|
|
1956
1937
|
supportsPromptCache: true,
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
maxThinkingTokens: 24576
|
|
1976
|
-
},
|
|
1977
|
-
// 2.5 Flash models (05-20 versions)
|
|
1978
|
-
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
1979
|
-
maxTokens: 65535,
|
|
1980
|
-
contextWindow: 1048576,
|
|
1981
|
-
supportsImages: true,
|
|
1982
|
-
supportsPromptCache: true,
|
|
1983
|
-
inputPrice: 0.15,
|
|
1984
|
-
outputPrice: 3.5,
|
|
1985
|
-
cacheReadsPrice: 0.0375,
|
|
1986
|
-
cacheWritesPrice: 1,
|
|
1987
|
-
maxThinkingTokens: 24576,
|
|
1988
|
-
supportsReasoningBudget: true,
|
|
1989
|
-
requiredReasoningBudget: true
|
|
1990
|
-
},
|
|
1991
|
-
"gemini-2.5-flash-preview-05-20": {
|
|
1992
|
-
maxTokens: 65535,
|
|
1993
|
-
contextWindow: 1048576,
|
|
1994
|
-
supportsImages: true,
|
|
1995
|
-
supportsPromptCache: true,
|
|
1996
|
-
inputPrice: 0.15,
|
|
1997
|
-
outputPrice: 0.6,
|
|
1998
|
-
cacheReadsPrice: 0.0375,
|
|
1999
|
-
cacheWritesPrice: 1
|
|
2000
|
-
},
|
|
2001
|
-
// 2.5 Flash models (04-17 versions)
|
|
2002
|
-
"gemini-2.5-flash-preview-04-17:thinking": {
|
|
2003
|
-
maxTokens: 65535,
|
|
2004
|
-
contextWindow: 1048576,
|
|
2005
|
-
supportsImages: true,
|
|
2006
|
-
supportsPromptCache: false,
|
|
2007
|
-
inputPrice: 0.15,
|
|
2008
|
-
outputPrice: 3.5,
|
|
2009
|
-
maxThinkingTokens: 24576,
|
|
2010
|
-
supportsReasoningBudget: true,
|
|
2011
|
-
requiredReasoningBudget: true
|
|
2012
|
-
},
|
|
2013
|
-
"gemini-2.5-flash-preview-04-17": {
|
|
2014
|
-
maxTokens: 65535,
|
|
2015
|
-
contextWindow: 1048576,
|
|
2016
|
-
supportsImages: true,
|
|
2017
|
-
supportsPromptCache: false,
|
|
2018
|
-
inputPrice: 0.15,
|
|
2019
|
-
outputPrice: 0.6
|
|
2020
|
-
},
|
|
2021
|
-
// 2.5 Flash stable
|
|
2022
|
-
"gemini-2.5-flash": {
|
|
2023
|
-
maxTokens: 64e3,
|
|
2024
|
-
contextWindow: 1048576,
|
|
2025
|
-
supportsImages: true,
|
|
2026
|
-
supportsPromptCache: true,
|
|
2027
|
-
inputPrice: 0.3,
|
|
2028
|
-
outputPrice: 2.5,
|
|
2029
|
-
cacheReadsPrice: 0.075,
|
|
2030
|
-
cacheWritesPrice: 1,
|
|
2031
|
-
maxThinkingTokens: 24576,
|
|
2032
|
-
supportsReasoningBudget: true
|
|
1938
|
+
supportsReasoningEffort: ["low", "high"],
|
|
1939
|
+
reasoningEffort: "low",
|
|
1940
|
+
supportsTemperature: true,
|
|
1941
|
+
defaultTemperature: 1,
|
|
1942
|
+
inputPrice: 4,
|
|
1943
|
+
outputPrice: 18,
|
|
1944
|
+
tiers: [
|
|
1945
|
+
{
|
|
1946
|
+
contextWindow: 2e5,
|
|
1947
|
+
inputPrice: 2,
|
|
1948
|
+
outputPrice: 12
|
|
1949
|
+
},
|
|
1950
|
+
{
|
|
1951
|
+
contextWindow: Infinity,
|
|
1952
|
+
inputPrice: 4,
|
|
1953
|
+
outputPrice: 18
|
|
1954
|
+
}
|
|
1955
|
+
]
|
|
2033
1956
|
},
|
|
2034
1957
|
// 2.5 Pro models
|
|
2035
|
-
"gemini-2.5-pro
|
|
2036
|
-
maxTokens:
|
|
1958
|
+
"gemini-2.5-pro": {
|
|
1959
|
+
maxTokens: 64e3,
|
|
2037
1960
|
contextWindow: 1048576,
|
|
2038
1961
|
supportsImages: true,
|
|
1962
|
+
supportsNativeTools: true,
|
|
2039
1963
|
supportsPromptCache: true,
|
|
2040
1964
|
inputPrice: 2.5,
|
|
2041
1965
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2044,6 +1968,7 @@ var geminiModels = {
|
|
|
2044
1968
|
cacheWritesPrice: 4.5,
|
|
2045
1969
|
maxThinkingTokens: 32768,
|
|
2046
1970
|
supportsReasoningBudget: true,
|
|
1971
|
+
requiredReasoningBudget: true,
|
|
2047
1972
|
tiers: [
|
|
2048
1973
|
{
|
|
2049
1974
|
contextWindow: 2e5,
|
|
@@ -2059,16 +1984,19 @@ var geminiModels = {
|
|
|
2059
1984
|
}
|
|
2060
1985
|
]
|
|
2061
1986
|
},
|
|
2062
|
-
"gemini-2.5-pro-preview-05
|
|
1987
|
+
"gemini-2.5-pro-preview-06-05": {
|
|
2063
1988
|
maxTokens: 65535,
|
|
2064
1989
|
contextWindow: 1048576,
|
|
2065
1990
|
supportsImages: true,
|
|
1991
|
+
supportsNativeTools: true,
|
|
2066
1992
|
supportsPromptCache: true,
|
|
2067
1993
|
inputPrice: 2.5,
|
|
2068
1994
|
// This is the pricing for prompts above 200k tokens.
|
|
2069
1995
|
outputPrice: 15,
|
|
2070
1996
|
cacheReadsPrice: 0.625,
|
|
2071
1997
|
cacheWritesPrice: 4.5,
|
|
1998
|
+
maxThinkingTokens: 32768,
|
|
1999
|
+
supportsReasoningBudget: true,
|
|
2072
2000
|
tiers: [
|
|
2073
2001
|
{
|
|
2074
2002
|
contextWindow: 2e5,
|
|
@@ -2084,18 +2012,17 @@ var geminiModels = {
|
|
|
2084
2012
|
}
|
|
2085
2013
|
]
|
|
2086
2014
|
},
|
|
2087
|
-
"gemini-2.5-pro-preview-
|
|
2015
|
+
"gemini-2.5-pro-preview-05-06": {
|
|
2088
2016
|
maxTokens: 65535,
|
|
2089
2017
|
contextWindow: 1048576,
|
|
2090
2018
|
supportsImages: true,
|
|
2019
|
+
supportsNativeTools: true,
|
|
2091
2020
|
supportsPromptCache: true,
|
|
2092
2021
|
inputPrice: 2.5,
|
|
2093
2022
|
// This is the pricing for prompts above 200k tokens.
|
|
2094
2023
|
outputPrice: 15,
|
|
2095
2024
|
cacheReadsPrice: 0.625,
|
|
2096
2025
|
cacheWritesPrice: 4.5,
|
|
2097
|
-
maxThinkingTokens: 32768,
|
|
2098
|
-
supportsReasoningBudget: true,
|
|
2099
2026
|
tiers: [
|
|
2100
2027
|
{
|
|
2101
2028
|
contextWindow: 2e5,
|
|
@@ -2111,18 +2038,11 @@ var geminiModels = {
|
|
|
2111
2038
|
}
|
|
2112
2039
|
]
|
|
2113
2040
|
},
|
|
2114
|
-
"gemini-2.5-pro-
|
|
2041
|
+
"gemini-2.5-pro-preview-03-25": {
|
|
2115
2042
|
maxTokens: 65535,
|
|
2116
2043
|
contextWindow: 1048576,
|
|
2117
2044
|
supportsImages: true,
|
|
2118
|
-
|
|
2119
|
-
inputPrice: 0,
|
|
2120
|
-
outputPrice: 0
|
|
2121
|
-
},
|
|
2122
|
-
"gemini-2.5-pro": {
|
|
2123
|
-
maxTokens: 64e3,
|
|
2124
|
-
contextWindow: 1048576,
|
|
2125
|
-
supportsImages: true,
|
|
2045
|
+
supportsNativeTools: true,
|
|
2126
2046
|
supportsPromptCache: true,
|
|
2127
2047
|
inputPrice: 2.5,
|
|
2128
2048
|
// This is the pricing for prompts above 200k tokens.
|
|
@@ -2131,7 +2051,6 @@ var geminiModels = {
|
|
|
2131
2051
|
cacheWritesPrice: 4.5,
|
|
2132
2052
|
maxThinkingTokens: 32768,
|
|
2133
2053
|
supportsReasoningBudget: true,
|
|
2134
|
-
requiredReasoningBudget: true,
|
|
2135
2054
|
tiers: [
|
|
2136
2055
|
{
|
|
2137
2056
|
contextWindow: 2e5,
|
|
@@ -2147,125 +2066,72 @@ var geminiModels = {
|
|
|
2147
2066
|
}
|
|
2148
2067
|
]
|
|
2149
2068
|
},
|
|
2150
|
-
// 2.
|
|
2151
|
-
"gemini-
|
|
2152
|
-
maxTokens:
|
|
2069
|
+
// 2.5 Flash models
|
|
2070
|
+
"gemini-flash-latest": {
|
|
2071
|
+
maxTokens: 65536,
|
|
2153
2072
|
contextWindow: 1048576,
|
|
2154
2073
|
supportsImages: true,
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2074
|
+
supportsNativeTools: true,
|
|
2075
|
+
supportsPromptCache: true,
|
|
2076
|
+
inputPrice: 0.3,
|
|
2077
|
+
outputPrice: 2.5,
|
|
2078
|
+
cacheReadsPrice: 0.075,
|
|
2079
|
+
cacheWritesPrice: 1,
|
|
2080
|
+
maxThinkingTokens: 24576,
|
|
2081
|
+
supportsReasoningBudget: true
|
|
2158
2082
|
},
|
|
2159
|
-
"gemini-2.
|
|
2083
|
+
"gemini-2.5-flash-preview-09-2025": {
|
|
2160
2084
|
maxTokens: 65536,
|
|
2161
2085
|
contextWindow: 1048576,
|
|
2162
2086
|
supportsImages: true,
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
|
|
2166
|
-
|
|
2167
|
-
|
|
2168
|
-
|
|
2169
|
-
|
|
2170
|
-
|
|
2171
|
-
supportsPromptCache: false,
|
|
2172
|
-
inputPrice: 0,
|
|
2173
|
-
outputPrice: 0
|
|
2087
|
+
supportsNativeTools: true,
|
|
2088
|
+
supportsPromptCache: true,
|
|
2089
|
+
inputPrice: 0.3,
|
|
2090
|
+
outputPrice: 2.5,
|
|
2091
|
+
cacheReadsPrice: 0.075,
|
|
2092
|
+
cacheWritesPrice: 1,
|
|
2093
|
+
maxThinkingTokens: 24576,
|
|
2094
|
+
supportsReasoningBudget: true
|
|
2174
2095
|
},
|
|
2175
|
-
"gemini-2.
|
|
2176
|
-
maxTokens:
|
|
2096
|
+
"gemini-2.5-flash": {
|
|
2097
|
+
maxTokens: 64e3,
|
|
2177
2098
|
contextWindow: 1048576,
|
|
2178
2099
|
supportsImages: true,
|
|
2179
|
-
|
|
2180
|
-
|
|
2181
|
-
|
|
2100
|
+
supportsNativeTools: true,
|
|
2101
|
+
supportsPromptCache: true,
|
|
2102
|
+
inputPrice: 0.3,
|
|
2103
|
+
outputPrice: 2.5,
|
|
2104
|
+
cacheReadsPrice: 0.075,
|
|
2105
|
+
cacheWritesPrice: 1,
|
|
2106
|
+
maxThinkingTokens: 24576,
|
|
2107
|
+
supportsReasoningBudget: true
|
|
2182
2108
|
},
|
|
2183
|
-
|
|
2184
|
-
|
|
2109
|
+
// 2.5 Flash Lite models
|
|
2110
|
+
"gemini-flash-lite-latest": {
|
|
2111
|
+
maxTokens: 65536,
|
|
2185
2112
|
contextWindow: 1048576,
|
|
2186
2113
|
supportsImages: true,
|
|
2114
|
+
supportsNativeTools: true,
|
|
2187
2115
|
supportsPromptCache: true,
|
|
2188
2116
|
inputPrice: 0.1,
|
|
2189
2117
|
outputPrice: 0.4,
|
|
2190
2118
|
cacheReadsPrice: 0.025,
|
|
2191
|
-
cacheWritesPrice: 1
|
|
2192
|
-
|
|
2193
|
-
|
|
2194
|
-
"gemini-2.0-pro-exp-02-05": {
|
|
2195
|
-
maxTokens: 8192,
|
|
2196
|
-
contextWindow: 2097152,
|
|
2197
|
-
supportsImages: true,
|
|
2198
|
-
supportsPromptCache: false,
|
|
2199
|
-
inputPrice: 0,
|
|
2200
|
-
outputPrice: 0
|
|
2119
|
+
cacheWritesPrice: 1,
|
|
2120
|
+
supportsReasoningBudget: true,
|
|
2121
|
+
maxThinkingTokens: 24576
|
|
2201
2122
|
},
|
|
2202
|
-
|
|
2203
|
-
|
|
2204
|
-
maxTokens: 8192,
|
|
2123
|
+
"gemini-2.5-flash-lite-preview-09-2025": {
|
|
2124
|
+
maxTokens: 65536,
|
|
2205
2125
|
contextWindow: 1048576,
|
|
2206
2126
|
supportsImages: true,
|
|
2127
|
+
supportsNativeTools: true,
|
|
2207
2128
|
supportsPromptCache: true,
|
|
2208
|
-
inputPrice: 0.
|
|
2209
|
-
|
|
2210
|
-
|
|
2211
|
-
cacheReadsPrice: 0.0375,
|
|
2129
|
+
inputPrice: 0.1,
|
|
2130
|
+
outputPrice: 0.4,
|
|
2131
|
+
cacheReadsPrice: 0.025,
|
|
2212
2132
|
cacheWritesPrice: 1,
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
contextWindow: 128e3,
|
|
2216
|
-
inputPrice: 0.075,
|
|
2217
|
-
outputPrice: 0.3,
|
|
2218
|
-
cacheReadsPrice: 0.01875
|
|
2219
|
-
},
|
|
2220
|
-
{
|
|
2221
|
-
contextWindow: Infinity,
|
|
2222
|
-
inputPrice: 0.15,
|
|
2223
|
-
outputPrice: 0.6,
|
|
2224
|
-
cacheReadsPrice: 0.0375
|
|
2225
|
-
}
|
|
2226
|
-
]
|
|
2227
|
-
},
|
|
2228
|
-
"gemini-1.5-flash-exp-0827": {
|
|
2229
|
-
maxTokens: 8192,
|
|
2230
|
-
contextWindow: 1048576,
|
|
2231
|
-
supportsImages: true,
|
|
2232
|
-
supportsPromptCache: false,
|
|
2233
|
-
inputPrice: 0,
|
|
2234
|
-
outputPrice: 0
|
|
2235
|
-
},
|
|
2236
|
-
"gemini-1.5-flash-8b-exp-0827": {
|
|
2237
|
-
maxTokens: 8192,
|
|
2238
|
-
contextWindow: 1048576,
|
|
2239
|
-
supportsImages: true,
|
|
2240
|
-
supportsPromptCache: false,
|
|
2241
|
-
inputPrice: 0,
|
|
2242
|
-
outputPrice: 0
|
|
2243
|
-
},
|
|
2244
|
-
// 1.5 Pro models
|
|
2245
|
-
"gemini-1.5-pro-002": {
|
|
2246
|
-
maxTokens: 8192,
|
|
2247
|
-
contextWindow: 2097152,
|
|
2248
|
-
supportsImages: true,
|
|
2249
|
-
supportsPromptCache: false,
|
|
2250
|
-
inputPrice: 0,
|
|
2251
|
-
outputPrice: 0
|
|
2252
|
-
},
|
|
2253
|
-
"gemini-1.5-pro-exp-0827": {
|
|
2254
|
-
maxTokens: 8192,
|
|
2255
|
-
contextWindow: 2097152,
|
|
2256
|
-
supportsImages: true,
|
|
2257
|
-
supportsPromptCache: false,
|
|
2258
|
-
inputPrice: 0,
|
|
2259
|
-
outputPrice: 0
|
|
2260
|
-
},
|
|
2261
|
-
// Experimental models
|
|
2262
|
-
"gemini-exp-1206": {
|
|
2263
|
-
maxTokens: 8192,
|
|
2264
|
-
contextWindow: 2097152,
|
|
2265
|
-
supportsImages: true,
|
|
2266
|
-
supportsPromptCache: false,
|
|
2267
|
-
inputPrice: 0,
|
|
2268
|
-
outputPrice: 0
|
|
2133
|
+
supportsReasoningBudget: true,
|
|
2134
|
+
maxThinkingTokens: 24576
|
|
2269
2135
|
}
|
|
2270
2136
|
};
|
|
2271
2137
|
|
|
@@ -2635,89 +2501,143 @@ var ollamaDefaultModelInfo = {
|
|
|
2635
2501
|
};
|
|
2636
2502
|
|
|
2637
2503
|
// src/providers/openai.ts
|
|
2638
|
-
var openAiNativeDefaultModelId = "gpt-5
|
|
2504
|
+
var openAiNativeDefaultModelId = "gpt-5.1";
|
|
2639
2505
|
var openAiNativeModels = {
|
|
2640
|
-
"gpt-5
|
|
2506
|
+
"gpt-5.1": {
|
|
2641
2507
|
maxTokens: 128e3,
|
|
2642
2508
|
contextWindow: 4e5,
|
|
2509
|
+
supportsNativeTools: true,
|
|
2643
2510
|
supportsImages: true,
|
|
2644
2511
|
supportsPromptCache: true,
|
|
2645
|
-
|
|
2512
|
+
promptCacheRetention: "24h",
|
|
2513
|
+
supportsReasoningEffort: ["none", "low", "medium", "high"],
|
|
2514
|
+
reasoningEffort: "medium",
|
|
2646
2515
|
inputPrice: 1.25,
|
|
2647
2516
|
outputPrice: 10,
|
|
2648
|
-
cacheReadsPrice: 0.
|
|
2649
|
-
|
|
2650
|
-
|
|
2517
|
+
cacheReadsPrice: 0.125,
|
|
2518
|
+
supportsVerbosity: true,
|
|
2519
|
+
supportsTemperature: false,
|
|
2520
|
+
tiers: [
|
|
2521
|
+
{ name: "flex", contextWindow: 4e5, inputPrice: 0.625, outputPrice: 5, cacheReadsPrice: 0.0625 },
|
|
2522
|
+
{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }
|
|
2523
|
+
],
|
|
2524
|
+
description: "GPT-5.1: The best model for coding and agentic tasks across domains"
|
|
2651
2525
|
},
|
|
2652
|
-
"gpt-5-
|
|
2526
|
+
"gpt-5.1-codex": {
|
|
2653
2527
|
maxTokens: 128e3,
|
|
2654
2528
|
contextWindow: 4e5,
|
|
2529
|
+
supportsNativeTools: true,
|
|
2655
2530
|
supportsImages: true,
|
|
2656
2531
|
supportsPromptCache: true,
|
|
2657
|
-
|
|
2532
|
+
promptCacheRetention: "24h",
|
|
2533
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
2658
2534
|
reasoningEffort: "medium",
|
|
2659
2535
|
inputPrice: 1.25,
|
|
2660
2536
|
outputPrice: 10,
|
|
2661
|
-
cacheReadsPrice: 0.
|
|
2662
|
-
|
|
2663
|
-
|
|
2537
|
+
cacheReadsPrice: 0.125,
|
|
2538
|
+
supportsTemperature: false,
|
|
2539
|
+
tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }],
|
|
2540
|
+
description: "GPT-5.1 Codex: A version of GPT-5.1 optimized for agentic coding in Codex"
|
|
2541
|
+
},
|
|
2542
|
+
"gpt-5.1-codex-mini": {
|
|
2543
|
+
maxTokens: 128e3,
|
|
2544
|
+
contextWindow: 4e5,
|
|
2545
|
+
supportsNativeTools: true,
|
|
2546
|
+
supportsImages: true,
|
|
2547
|
+
supportsPromptCache: true,
|
|
2548
|
+
promptCacheRetention: "24h",
|
|
2549
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
2550
|
+
reasoningEffort: "medium",
|
|
2551
|
+
inputPrice: 0.25,
|
|
2552
|
+
outputPrice: 2,
|
|
2553
|
+
cacheReadsPrice: 0.025,
|
|
2554
|
+
supportsTemperature: false,
|
|
2555
|
+
description: "GPT-5.1 Codex mini: A version of GPT-5.1 optimized for agentic coding in Codex"
|
|
2556
|
+
},
|
|
2557
|
+
"gpt-5": {
|
|
2558
|
+
maxTokens: 128e3,
|
|
2559
|
+
contextWindow: 4e5,
|
|
2560
|
+
supportsNativeTools: true,
|
|
2561
|
+
supportsImages: true,
|
|
2562
|
+
supportsPromptCache: true,
|
|
2563
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2564
|
+
reasoningEffort: "medium",
|
|
2565
|
+
inputPrice: 1.25,
|
|
2566
|
+
outputPrice: 10,
|
|
2567
|
+
cacheReadsPrice: 0.125,
|
|
2664
2568
|
supportsVerbosity: true,
|
|
2665
2569
|
supportsTemperature: false,
|
|
2666
2570
|
tiers: [
|
|
2667
2571
|
{ name: "flex", contextWindow: 4e5, inputPrice: 0.625, outputPrice: 5, cacheReadsPrice: 0.0625 },
|
|
2668
2572
|
{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }
|
|
2669
|
-
]
|
|
2573
|
+
],
|
|
2574
|
+
description: "GPT-5: The best model for coding and agentic tasks across domains"
|
|
2670
2575
|
},
|
|
2671
|
-
"gpt-5-mini
|
|
2576
|
+
"gpt-5-mini": {
|
|
2672
2577
|
maxTokens: 128e3,
|
|
2673
2578
|
contextWindow: 4e5,
|
|
2579
|
+
supportsNativeTools: true,
|
|
2674
2580
|
supportsImages: true,
|
|
2675
2581
|
supportsPromptCache: true,
|
|
2676
|
-
supportsReasoningEffort:
|
|
2582
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2677
2583
|
reasoningEffort: "medium",
|
|
2678
2584
|
inputPrice: 0.25,
|
|
2679
2585
|
outputPrice: 2,
|
|
2680
|
-
cacheReadsPrice: 0.
|
|
2681
|
-
description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
|
|
2586
|
+
cacheReadsPrice: 0.025,
|
|
2682
2587
|
supportsVerbosity: true,
|
|
2683
2588
|
supportsTemperature: false,
|
|
2684
2589
|
tiers: [
|
|
2685
2590
|
{ name: "flex", contextWindow: 4e5, inputPrice: 0.125, outputPrice: 1, cacheReadsPrice: 0.0125 },
|
|
2686
2591
|
{ name: "priority", contextWindow: 4e5, inputPrice: 0.45, outputPrice: 3.6, cacheReadsPrice: 0.045 }
|
|
2687
|
-
]
|
|
2592
|
+
],
|
|
2593
|
+
description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks"
|
|
2688
2594
|
},
|
|
2689
|
-
"gpt-5-
|
|
2595
|
+
"gpt-5-codex": {
|
|
2596
|
+
maxTokens: 128e3,
|
|
2597
|
+
contextWindow: 4e5,
|
|
2598
|
+
supportsNativeTools: true,
|
|
2599
|
+
supportsImages: true,
|
|
2600
|
+
supportsPromptCache: true,
|
|
2601
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
2602
|
+
reasoningEffort: "medium",
|
|
2603
|
+
inputPrice: 1.25,
|
|
2604
|
+
outputPrice: 10,
|
|
2605
|
+
cacheReadsPrice: 0.125,
|
|
2606
|
+
supportsTemperature: false,
|
|
2607
|
+
tiers: [{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }],
|
|
2608
|
+
description: "GPT-5-Codex: A version of GPT-5 optimized for agentic coding in Codex"
|
|
2609
|
+
},
|
|
2610
|
+
"gpt-5-nano": {
|
|
2690
2611
|
maxTokens: 128e3,
|
|
2691
2612
|
contextWindow: 4e5,
|
|
2613
|
+
supportsNativeTools: true,
|
|
2692
2614
|
supportsImages: true,
|
|
2693
2615
|
supportsPromptCache: true,
|
|
2694
|
-
supportsReasoningEffort:
|
|
2616
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2695
2617
|
reasoningEffort: "medium",
|
|
2696
2618
|
inputPrice: 0.05,
|
|
2697
2619
|
outputPrice: 0.4,
|
|
2698
|
-
cacheReadsPrice:
|
|
2699
|
-
description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
|
|
2620
|
+
cacheReadsPrice: 5e-3,
|
|
2700
2621
|
supportsVerbosity: true,
|
|
2701
2622
|
supportsTemperature: false,
|
|
2702
|
-
tiers: [{ name: "flex", contextWindow: 4e5, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 25e-4 }]
|
|
2623
|
+
tiers: [{ name: "flex", contextWindow: 4e5, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 25e-4 }],
|
|
2624
|
+
description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5"
|
|
2703
2625
|
},
|
|
2704
|
-
"gpt-5-
|
|
2626
|
+
"gpt-5-chat-latest": {
|
|
2705
2627
|
maxTokens: 128e3,
|
|
2706
2628
|
contextWindow: 4e5,
|
|
2629
|
+
supportsNativeTools: true,
|
|
2707
2630
|
supportsImages: true,
|
|
2708
2631
|
supportsPromptCache: true,
|
|
2709
|
-
supportsReasoningEffort: true,
|
|
2710
|
-
reasoningEffort: "medium",
|
|
2711
2632
|
inputPrice: 1.25,
|
|
2712
2633
|
outputPrice: 10,
|
|
2713
|
-
cacheReadsPrice: 0.
|
|
2714
|
-
description: "GPT-5
|
|
2715
|
-
supportsVerbosity: true,
|
|
2716
|
-
supportsTemperature: false
|
|
2634
|
+
cacheReadsPrice: 0.125,
|
|
2635
|
+
description: "GPT-5 Chat: Optimized for conversational AI and non-reasoning tasks"
|
|
2717
2636
|
},
|
|
2718
2637
|
"gpt-4.1": {
|
|
2719
2638
|
maxTokens: 32768,
|
|
2720
2639
|
contextWindow: 1047576,
|
|
2640
|
+
supportsNativeTools: true,
|
|
2721
2641
|
supportsImages: true,
|
|
2722
2642
|
supportsPromptCache: true,
|
|
2723
2643
|
inputPrice: 2,
|
|
@@ -2731,6 +2651,7 @@ var openAiNativeModels = {
|
|
|
2731
2651
|
"gpt-4.1-mini": {
|
|
2732
2652
|
maxTokens: 32768,
|
|
2733
2653
|
contextWindow: 1047576,
|
|
2654
|
+
supportsNativeTools: true,
|
|
2734
2655
|
supportsImages: true,
|
|
2735
2656
|
supportsPromptCache: true,
|
|
2736
2657
|
inputPrice: 0.4,
|
|
@@ -2744,6 +2665,7 @@ var openAiNativeModels = {
|
|
|
2744
2665
|
"gpt-4.1-nano": {
|
|
2745
2666
|
maxTokens: 32768,
|
|
2746
2667
|
contextWindow: 1047576,
|
|
2668
|
+
supportsNativeTools: true,
|
|
2747
2669
|
supportsImages: true,
|
|
2748
2670
|
supportsPromptCache: true,
|
|
2749
2671
|
inputPrice: 0.1,
|
|
@@ -2757,12 +2679,13 @@ var openAiNativeModels = {
|
|
|
2757
2679
|
o3: {
|
|
2758
2680
|
maxTokens: 1e5,
|
|
2759
2681
|
contextWindow: 2e5,
|
|
2682
|
+
supportsNativeTools: true,
|
|
2760
2683
|
supportsImages: true,
|
|
2761
2684
|
supportsPromptCache: true,
|
|
2762
2685
|
inputPrice: 2,
|
|
2763
2686
|
outputPrice: 8,
|
|
2764
2687
|
cacheReadsPrice: 0.5,
|
|
2765
|
-
supportsReasoningEffort:
|
|
2688
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
2766
2689
|
reasoningEffort: "medium",
|
|
2767
2690
|
supportsTemperature: false,
|
|
2768
2691
|
tiers: [
|
|
@@ -2773,6 +2696,7 @@ var openAiNativeModels = {
|
|
|
2773
2696
|
"o3-high": {
|
|
2774
2697
|
maxTokens: 1e5,
|
|
2775
2698
|
contextWindow: 2e5,
|
|
2699
|
+
supportsNativeTools: true,
|
|
2776
2700
|
supportsImages: true,
|
|
2777
2701
|
supportsPromptCache: true,
|
|
2778
2702
|
inputPrice: 2,
|
|
@@ -2784,6 +2708,7 @@ var openAiNativeModels = {
|
|
|
2784
2708
|
"o3-low": {
|
|
2785
2709
|
maxTokens: 1e5,
|
|
2786
2710
|
contextWindow: 2e5,
|
|
2711
|
+
supportsNativeTools: true,
|
|
2787
2712
|
supportsImages: true,
|
|
2788
2713
|
supportsPromptCache: true,
|
|
2789
2714
|
inputPrice: 2,
|
|
@@ -2795,12 +2720,13 @@ var openAiNativeModels = {
|
|
|
2795
2720
|
"o4-mini": {
|
|
2796
2721
|
maxTokens: 1e5,
|
|
2797
2722
|
contextWindow: 2e5,
|
|
2723
|
+
supportsNativeTools: true,
|
|
2798
2724
|
supportsImages: true,
|
|
2799
2725
|
supportsPromptCache: true,
|
|
2800
2726
|
inputPrice: 1.1,
|
|
2801
2727
|
outputPrice: 4.4,
|
|
2802
2728
|
cacheReadsPrice: 0.275,
|
|
2803
|
-
supportsReasoningEffort:
|
|
2729
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
2804
2730
|
reasoningEffort: "medium",
|
|
2805
2731
|
supportsTemperature: false,
|
|
2806
2732
|
tiers: [
|
|
@@ -2811,6 +2737,7 @@ var openAiNativeModels = {
|
|
|
2811
2737
|
"o4-mini-high": {
|
|
2812
2738
|
maxTokens: 1e5,
|
|
2813
2739
|
contextWindow: 2e5,
|
|
2740
|
+
supportsNativeTools: true,
|
|
2814
2741
|
supportsImages: true,
|
|
2815
2742
|
supportsPromptCache: true,
|
|
2816
2743
|
inputPrice: 1.1,
|
|
@@ -2822,6 +2749,7 @@ var openAiNativeModels = {
|
|
|
2822
2749
|
"o4-mini-low": {
|
|
2823
2750
|
maxTokens: 1e5,
|
|
2824
2751
|
contextWindow: 2e5,
|
|
2752
|
+
supportsNativeTools: true,
|
|
2825
2753
|
supportsImages: true,
|
|
2826
2754
|
supportsPromptCache: true,
|
|
2827
2755
|
inputPrice: 1.1,
|
|
@@ -2833,18 +2761,20 @@ var openAiNativeModels = {
|
|
|
2833
2761
|
"o3-mini": {
|
|
2834
2762
|
maxTokens: 1e5,
|
|
2835
2763
|
contextWindow: 2e5,
|
|
2764
|
+
supportsNativeTools: true,
|
|
2836
2765
|
supportsImages: false,
|
|
2837
2766
|
supportsPromptCache: true,
|
|
2838
2767
|
inputPrice: 1.1,
|
|
2839
2768
|
outputPrice: 4.4,
|
|
2840
2769
|
cacheReadsPrice: 0.55,
|
|
2841
|
-
supportsReasoningEffort:
|
|
2770
|
+
supportsReasoningEffort: ["low", "medium", "high"],
|
|
2842
2771
|
reasoningEffort: "medium",
|
|
2843
2772
|
supportsTemperature: false
|
|
2844
2773
|
},
|
|
2845
2774
|
"o3-mini-high": {
|
|
2846
2775
|
maxTokens: 1e5,
|
|
2847
2776
|
contextWindow: 2e5,
|
|
2777
|
+
supportsNativeTools: true,
|
|
2848
2778
|
supportsImages: false,
|
|
2849
2779
|
supportsPromptCache: true,
|
|
2850
2780
|
inputPrice: 1.1,
|
|
@@ -2856,6 +2786,7 @@ var openAiNativeModels = {
|
|
|
2856
2786
|
"o3-mini-low": {
|
|
2857
2787
|
maxTokens: 1e5,
|
|
2858
2788
|
contextWindow: 2e5,
|
|
2789
|
+
supportsNativeTools: true,
|
|
2859
2790
|
supportsImages: false,
|
|
2860
2791
|
supportsPromptCache: true,
|
|
2861
2792
|
inputPrice: 1.1,
|
|
@@ -2867,6 +2798,7 @@ var openAiNativeModels = {
|
|
|
2867
2798
|
o1: {
|
|
2868
2799
|
maxTokens: 1e5,
|
|
2869
2800
|
contextWindow: 2e5,
|
|
2801
|
+
supportsNativeTools: true,
|
|
2870
2802
|
supportsImages: true,
|
|
2871
2803
|
supportsPromptCache: true,
|
|
2872
2804
|
inputPrice: 15,
|
|
@@ -2877,6 +2809,7 @@ var openAiNativeModels = {
|
|
|
2877
2809
|
"o1-preview": {
|
|
2878
2810
|
maxTokens: 32768,
|
|
2879
2811
|
contextWindow: 128e3,
|
|
2812
|
+
supportsNativeTools: true,
|
|
2880
2813
|
supportsImages: true,
|
|
2881
2814
|
supportsPromptCache: true,
|
|
2882
2815
|
inputPrice: 15,
|
|
@@ -2887,6 +2820,7 @@ var openAiNativeModels = {
|
|
|
2887
2820
|
"o1-mini": {
|
|
2888
2821
|
maxTokens: 65536,
|
|
2889
2822
|
contextWindow: 128e3,
|
|
2823
|
+
supportsNativeTools: true,
|
|
2890
2824
|
supportsImages: true,
|
|
2891
2825
|
supportsPromptCache: true,
|
|
2892
2826
|
inputPrice: 1.1,
|
|
@@ -2897,6 +2831,7 @@ var openAiNativeModels = {
|
|
|
2897
2831
|
"gpt-4o": {
|
|
2898
2832
|
maxTokens: 16384,
|
|
2899
2833
|
contextWindow: 128e3,
|
|
2834
|
+
supportsNativeTools: true,
|
|
2900
2835
|
supportsImages: true,
|
|
2901
2836
|
supportsPromptCache: true,
|
|
2902
2837
|
inputPrice: 2.5,
|
|
@@ -2910,6 +2845,7 @@ var openAiNativeModels = {
|
|
|
2910
2845
|
"gpt-4o-mini": {
|
|
2911
2846
|
maxTokens: 16384,
|
|
2912
2847
|
contextWindow: 128e3,
|
|
2848
|
+
supportsNativeTools: true,
|
|
2913
2849
|
supportsImages: true,
|
|
2914
2850
|
supportsPromptCache: true,
|
|
2915
2851
|
inputPrice: 0.15,
|
|
@@ -2923,13 +2859,69 @@ var openAiNativeModels = {
|
|
|
2923
2859
|
"codex-mini-latest": {
|
|
2924
2860
|
maxTokens: 16384,
|
|
2925
2861
|
contextWindow: 2e5,
|
|
2862
|
+
supportsNativeTools: true,
|
|
2926
2863
|
supportsImages: false,
|
|
2927
2864
|
supportsPromptCache: false,
|
|
2928
2865
|
inputPrice: 1.5,
|
|
2929
2866
|
outputPrice: 6,
|
|
2930
|
-
cacheReadsPrice: 0,
|
|
2867
|
+
cacheReadsPrice: 0.375,
|
|
2931
2868
|
supportsTemperature: false,
|
|
2932
2869
|
description: "Codex Mini: Cloud-based software engineering agent powered by codex-1, a version of o3 optimized for coding tasks. Trained with reinforcement learning to generate human-style code, adhere to instructions, and iteratively run tests."
|
|
2870
|
+
},
|
|
2871
|
+
// Dated clones (snapshots) preserved for backward compatibility
|
|
2872
|
+
"gpt-5-2025-08-07": {
|
|
2873
|
+
maxTokens: 128e3,
|
|
2874
|
+
contextWindow: 4e5,
|
|
2875
|
+
supportsNativeTools: true,
|
|
2876
|
+
supportsImages: true,
|
|
2877
|
+
supportsPromptCache: true,
|
|
2878
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2879
|
+
reasoningEffort: "medium",
|
|
2880
|
+
inputPrice: 1.25,
|
|
2881
|
+
outputPrice: 10,
|
|
2882
|
+
cacheReadsPrice: 0.125,
|
|
2883
|
+
supportsVerbosity: true,
|
|
2884
|
+
supportsTemperature: false,
|
|
2885
|
+
tiers: [
|
|
2886
|
+
{ name: "flex", contextWindow: 4e5, inputPrice: 0.625, outputPrice: 5, cacheReadsPrice: 0.0625 },
|
|
2887
|
+
{ name: "priority", contextWindow: 4e5, inputPrice: 2.5, outputPrice: 20, cacheReadsPrice: 0.25 }
|
|
2888
|
+
],
|
|
2889
|
+
description: "GPT-5: The best model for coding and agentic tasks across domains"
|
|
2890
|
+
},
|
|
2891
|
+
"gpt-5-mini-2025-08-07": {
|
|
2892
|
+
maxTokens: 128e3,
|
|
2893
|
+
contextWindow: 4e5,
|
|
2894
|
+
supportsNativeTools: true,
|
|
2895
|
+
supportsImages: true,
|
|
2896
|
+
supportsPromptCache: true,
|
|
2897
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2898
|
+
reasoningEffort: "medium",
|
|
2899
|
+
inputPrice: 0.25,
|
|
2900
|
+
outputPrice: 2,
|
|
2901
|
+
cacheReadsPrice: 0.025,
|
|
2902
|
+
supportsVerbosity: true,
|
|
2903
|
+
supportsTemperature: false,
|
|
2904
|
+
tiers: [
|
|
2905
|
+
{ name: "flex", contextWindow: 4e5, inputPrice: 0.125, outputPrice: 1, cacheReadsPrice: 0.0125 },
|
|
2906
|
+
{ name: "priority", contextWindow: 4e5, inputPrice: 0.45, outputPrice: 3.6, cacheReadsPrice: 0.045 }
|
|
2907
|
+
],
|
|
2908
|
+
description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks"
|
|
2909
|
+
},
|
|
2910
|
+
"gpt-5-nano-2025-08-07": {
|
|
2911
|
+
maxTokens: 128e3,
|
|
2912
|
+
contextWindow: 4e5,
|
|
2913
|
+
supportsNativeTools: true,
|
|
2914
|
+
supportsImages: true,
|
|
2915
|
+
supportsPromptCache: true,
|
|
2916
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2917
|
+
reasoningEffort: "medium",
|
|
2918
|
+
inputPrice: 0.05,
|
|
2919
|
+
outputPrice: 0.4,
|
|
2920
|
+
cacheReadsPrice: 5e-3,
|
|
2921
|
+
supportsVerbosity: true,
|
|
2922
|
+
supportsTemperature: false,
|
|
2923
|
+
tiers: [{ name: "flex", contextWindow: 4e5, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 25e-4 }],
|
|
2924
|
+
description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5"
|
|
2933
2925
|
}
|
|
2934
2926
|
};
|
|
2935
2927
|
var openAiModelInfoSaneDefaults = {
|
|
@@ -2938,11 +2930,11 @@ var openAiModelInfoSaneDefaults = {
|
|
|
2938
2930
|
supportsImages: true,
|
|
2939
2931
|
supportsPromptCache: false,
|
|
2940
2932
|
inputPrice: 0,
|
|
2941
|
-
outputPrice: 0
|
|
2933
|
+
outputPrice: 0,
|
|
2934
|
+
supportsNativeTools: true
|
|
2942
2935
|
};
|
|
2943
2936
|
var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
|
|
2944
2937
|
var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
|
|
2945
|
-
var GPT5_DEFAULT_TEMPERATURE = 1;
|
|
2946
2938
|
var OPENAI_AZURE_AI_INFERENCE_PATH = "/models/chat/completions";
|
|
2947
2939
|
|
|
2948
2940
|
// src/providers/openrouter.ts
|
|
@@ -2952,6 +2944,7 @@ var openRouterDefaultModelInfo = {
|
|
|
2952
2944
|
contextWindow: 2e5,
|
|
2953
2945
|
supportsImages: true,
|
|
2954
2946
|
supportsPromptCache: true,
|
|
2947
|
+
supportsNativeTools: true,
|
|
2955
2948
|
inputPrice: 3,
|
|
2956
2949
|
outputPrice: 15,
|
|
2957
2950
|
cacheWritesPrice: 3.75,
|
|
@@ -3197,6 +3190,30 @@ var unboundDefaultModelInfo = {
|
|
|
3197
3190
|
// src/providers/vertex.ts
|
|
3198
3191
|
var vertexDefaultModelId = "claude-sonnet-4-5@20250929";
|
|
3199
3192
|
var vertexModels = {
|
|
3193
|
+
"gemini-3-pro-preview": {
|
|
3194
|
+
maxTokens: 65536,
|
|
3195
|
+
contextWindow: 1048576,
|
|
3196
|
+
supportsImages: true,
|
|
3197
|
+
supportsPromptCache: true,
|
|
3198
|
+
supportsReasoningEffort: ["low", "high"],
|
|
3199
|
+
reasoningEffort: "low",
|
|
3200
|
+
supportsTemperature: true,
|
|
3201
|
+
defaultTemperature: 1,
|
|
3202
|
+
inputPrice: 4,
|
|
3203
|
+
outputPrice: 18,
|
|
3204
|
+
tiers: [
|
|
3205
|
+
{
|
|
3206
|
+
contextWindow: 2e5,
|
|
3207
|
+
inputPrice: 2,
|
|
3208
|
+
outputPrice: 12
|
|
3209
|
+
},
|
|
3210
|
+
{
|
|
3211
|
+
contextWindow: Infinity,
|
|
3212
|
+
inputPrice: 4,
|
|
3213
|
+
outputPrice: 18
|
|
3214
|
+
}
|
|
3215
|
+
]
|
|
3216
|
+
},
|
|
3200
3217
|
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
3201
3218
|
maxTokens: 65535,
|
|
3202
3219
|
contextWindow: 1048576,
|
|
@@ -4172,11 +4189,12 @@ var minimaxModels = {
|
|
|
4172
4189
|
contextWindow: 192e3,
|
|
4173
4190
|
supportsImages: false,
|
|
4174
4191
|
supportsPromptCache: true,
|
|
4192
|
+
supportsNativeTools: true,
|
|
4193
|
+
preserveReasoning: true,
|
|
4175
4194
|
inputPrice: 0.3,
|
|
4176
4195
|
outputPrice: 1.2,
|
|
4177
4196
|
cacheWritesPrice: 0.375,
|
|
4178
4197
|
cacheReadsPrice: 0.03,
|
|
4179
|
-
preserveReasoning: true,
|
|
4180
4198
|
description: "MiniMax M2, a model born for Agents and code, featuring Top-tier Coding Capabilities, Powerful Agentic Performance, and Ultimate Cost-Effectiveness & Speed."
|
|
4181
4199
|
},
|
|
4182
4200
|
"MiniMax-M2-Stable": {
|
|
@@ -4184,14 +4202,17 @@ var minimaxModels = {
|
|
|
4184
4202
|
contextWindow: 192e3,
|
|
4185
4203
|
supportsImages: false,
|
|
4186
4204
|
supportsPromptCache: true,
|
|
4205
|
+
supportsNativeTools: true,
|
|
4206
|
+
preserveReasoning: true,
|
|
4187
4207
|
inputPrice: 0.3,
|
|
4188
4208
|
outputPrice: 1.2,
|
|
4189
4209
|
cacheWritesPrice: 0.375,
|
|
4190
4210
|
cacheReadsPrice: 0.03,
|
|
4191
|
-
preserveReasoning: true,
|
|
4192
4211
|
description: "MiniMax M2 Stable (High Concurrency, Commercial Use), a model born for Agents and code, featuring Top-tier Coding Capabilities, Powerful Agentic Performance, and Ultimate Cost-Effectiveness & Speed."
|
|
4193
4212
|
}
|
|
4194
4213
|
};
|
|
4214
|
+
var minimaxDefaultModelInfo = minimaxModels[minimaxDefaultModelId];
|
|
4215
|
+
var MINIMAX_DEFAULT_MAX_TOKENS = 16384;
|
|
4195
4216
|
var MINIMAX_DEFAULT_TEMPERATURE = 1;
|
|
4196
4217
|
|
|
4197
4218
|
// src/providers/index.ts
|
|
@@ -4346,11 +4367,13 @@ var baseProviderSettingsSchema = z8.object({
|
|
|
4346
4367
|
consecutiveMistakeLimit: z8.number().min(0).optional(),
|
|
4347
4368
|
// Model reasoning.
|
|
4348
4369
|
enableReasoningEffort: z8.boolean().optional(),
|
|
4349
|
-
reasoningEffort:
|
|
4370
|
+
reasoningEffort: reasoningEffortSettingSchema.optional(),
|
|
4350
4371
|
modelMaxTokens: z8.number().optional(),
|
|
4351
4372
|
modelMaxThinkingTokens: z8.number().optional(),
|
|
4352
4373
|
// Model verbosity.
|
|
4353
|
-
verbosity: verbosityLevelsSchema.optional()
|
|
4374
|
+
verbosity: verbosityLevelsSchema.optional(),
|
|
4375
|
+
// Tool protocol override for this profile.
|
|
4376
|
+
toolProtocol: z8.enum(["xml", "native"]).optional()
|
|
4354
4377
|
});
|
|
4355
4378
|
var apiModelIdProviderModelSchema = baseProviderSettingsSchema.extend({
|
|
4356
4379
|
apiModelId: z8.string().optional()
|
|
@@ -4690,7 +4713,7 @@ var modelIdKeysByProvider = {
|
|
|
4690
4713
|
roo: "apiModelId",
|
|
4691
4714
|
"vercel-ai-gateway": "vercelAiGatewayModelId"
|
|
4692
4715
|
};
|
|
4693
|
-
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "claude-code", "bedrock"];
|
|
4716
|
+
var ANTHROPIC_STYLE_PROVIDERS = ["anthropic", "claude-code", "bedrock", "minimax"];
|
|
4694
4717
|
var getApiProtocol = (provider, modelId) => {
|
|
4695
4718
|
if (provider && ANTHROPIC_STYLE_PROVIDERS.includes(provider)) {
|
|
4696
4719
|
return "anthropic";
|
|
@@ -5227,6 +5250,12 @@ var globalSettingsSchema = z14.object({
|
|
|
5227
5250
|
* @default true
|
|
5228
5251
|
*/
|
|
5229
5252
|
includeCurrentCost: z14.boolean().optional(),
|
|
5253
|
+
/**
|
|
5254
|
+
* Maximum number of git status file entries to include in the environment details.
|
|
5255
|
+
* Set to 0 to disable git status. The header (branch, commits) is always included when > 0.
|
|
5256
|
+
* @default 0
|
|
5257
|
+
*/
|
|
5258
|
+
maxGitStatusFiles: z14.number().optional(),
|
|
5230
5259
|
/**
|
|
5231
5260
|
* Whether to include diagnostic messages (errors, warnings) in tool outputs
|
|
5232
5261
|
* @default true
|
|
@@ -5394,6 +5423,7 @@ var EVALS_SETTINGS = {
|
|
|
5394
5423
|
rateLimitSeconds: 0,
|
|
5395
5424
|
maxOpenTabsContext: 20,
|
|
5396
5425
|
maxWorkspaceFiles: 200,
|
|
5426
|
+
maxGitStatusFiles: 20,
|
|
5397
5427
|
showRooIgnoredFiles: true,
|
|
5398
5428
|
maxReadFileLine: -1,
|
|
5399
5429
|
// -1 to enable full file reading.
|
|
@@ -5858,6 +5888,15 @@ var followUpDataSchema = z17.object({
|
|
|
5858
5888
|
suggest: z17.array(suggestionItemSchema).optional()
|
|
5859
5889
|
});
|
|
5860
5890
|
|
|
5891
|
+
// src/image-generation.ts
|
|
5892
|
+
var IMAGE_GENERATION_MODELS = [
|
|
5893
|
+
{ value: "google/gemini-2.5-flash-image", label: "Gemini 2.5 Flash Image" },
|
|
5894
|
+
{ value: "google/gemini-3-pro-image-preview", label: "Gemini 3 Pro Image Preview" },
|
|
5895
|
+
{ value: "openai/gpt-5-image", label: "GPT-5 Image" },
|
|
5896
|
+
{ value: "openai/gpt-5-image-mini", label: "GPT-5 Image Mini" }
|
|
5897
|
+
];
|
|
5898
|
+
var IMAGE_GENERATION_MODEL_IDS = IMAGE_GENERATION_MODELS.map((m) => m.value);
|
|
5899
|
+
|
|
5861
5900
|
// src/ipc.ts
|
|
5862
5901
|
import { z as z18 } from "zod";
|
|
5863
5902
|
var IpcMessageType = /* @__PURE__ */ ((IpcMessageType2) => {
|
|
@@ -6036,7 +6075,6 @@ export {
|
|
|
6036
6075
|
GLOBAL_SECRET_KEYS,
|
|
6037
6076
|
GLOBAL_SETTINGS_KEYS,
|
|
6038
6077
|
GLOBAL_STATE_KEYS,
|
|
6039
|
-
GPT5_DEFAULT_TEMPERATURE,
|
|
6040
6078
|
HEARTBEAT_INTERVAL_MS,
|
|
6041
6079
|
HUGGINGFACE_API_URL,
|
|
6042
6080
|
HUGGINGFACE_CACHE_DURATION,
|
|
@@ -6046,12 +6084,15 @@ export {
|
|
|
6046
6084
|
HUGGINGFACE_SLIDER_MIN,
|
|
6047
6085
|
HUGGINGFACE_SLIDER_STEP,
|
|
6048
6086
|
HUGGINGFACE_TEMPERATURE_MAX_VALUE,
|
|
6087
|
+
IMAGE_GENERATION_MODELS,
|
|
6088
|
+
IMAGE_GENERATION_MODEL_IDS,
|
|
6049
6089
|
INSTANCE_TTL_SECONDS,
|
|
6050
6090
|
IO_INTELLIGENCE_CACHE_DURATION,
|
|
6051
6091
|
IpcMessageType,
|
|
6052
6092
|
IpcOrigin,
|
|
6053
6093
|
LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
6054
6094
|
MAX_CHECKPOINT_TIMEOUT_SECONDS,
|
|
6095
|
+
MINIMAX_DEFAULT_MAX_TOKENS,
|
|
6055
6096
|
MINIMAX_DEFAULT_TEMPERATURE,
|
|
6056
6097
|
MIN_CHECKPOINT_TIMEOUT_SECONDS,
|
|
6057
6098
|
MISTRAL_DEFAULT_TEMPERATURE,
|
|
@@ -6071,6 +6112,7 @@ export {
|
|
|
6071
6112
|
RooModelsResponseSchema,
|
|
6072
6113
|
RooPricingSchema,
|
|
6073
6114
|
SECRET_STATE_KEYS,
|
|
6115
|
+
TOOL_PROTOCOL,
|
|
6074
6116
|
TaskBridgeCommandName,
|
|
6075
6117
|
TaskBridgeEventName,
|
|
6076
6118
|
TaskCommandName,
|
|
@@ -6142,6 +6184,7 @@ export {
|
|
|
6142
6184
|
geminiModels,
|
|
6143
6185
|
getApiProtocol,
|
|
6144
6186
|
getClaudeCodeModelId,
|
|
6187
|
+
getEffectiveProtocol,
|
|
6145
6188
|
getModelId,
|
|
6146
6189
|
getProviderDefaultModelId,
|
|
6147
6190
|
gitPropertiesSchema,
|
|
@@ -6173,6 +6216,7 @@ export {
|
|
|
6173
6216
|
isLanguage,
|
|
6174
6217
|
isLocalProvider,
|
|
6175
6218
|
isModelParameter,
|
|
6219
|
+
isNativeProtocol,
|
|
6176
6220
|
isNonBlockingAsk,
|
|
6177
6221
|
isProviderName,
|
|
6178
6222
|
isResumableAsk,
|
|
@@ -6194,6 +6238,7 @@ export {
|
|
|
6194
6238
|
mcpMarketplaceItemSchema,
|
|
6195
6239
|
mcpParameterSchema,
|
|
6196
6240
|
minimaxDefaultModelId,
|
|
6241
|
+
minimaxDefaultModelInfo,
|
|
6197
6242
|
minimaxModels,
|
|
6198
6243
|
mistralDefaultModelId,
|
|
6199
6244
|
mistralModels,
|
|
@@ -6229,8 +6274,12 @@ export {
|
|
|
6229
6274
|
queuedMessageSchema,
|
|
6230
6275
|
qwenCodeDefaultModelId,
|
|
6231
6276
|
qwenCodeModels,
|
|
6277
|
+
reasoningEffortExtendedSchema,
|
|
6278
|
+
reasoningEffortSettingSchema,
|
|
6279
|
+
reasoningEffortSettingValues,
|
|
6232
6280
|
reasoningEffortWithMinimalSchema,
|
|
6233
6281
|
reasoningEfforts,
|
|
6282
|
+
reasoningEffortsExtended,
|
|
6234
6283
|
reasoningEffortsSchema,
|
|
6235
6284
|
requestyDefaultModelId,
|
|
6236
6285
|
requestyDefaultModelInfo,
|