rhachet-brains-openai 0.1.8 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import type { BrainSpec } from 'rhachet';
|
|
2
|
+
/**
|
|
3
|
+
* .what = supported openai brain atom slugs
|
|
4
|
+
* .why = enables type-safe slug specification with model variants
|
|
5
|
+
*/
|
|
6
|
+
export type OpenaiBrainAtomSlug = 'openai/gpt/4o' | 'openai/gpt/4o-mini' | 'openai/gpt/4-turbo' | 'openai/gpt/4.1' | 'openai/gpt/4.1-mini' | 'openai/gpt/4.1-nano' | 'openai/o/1' | 'openai/o/1-mini' | 'openai/o/1-preview' | 'openai/o/3' | 'openai/o/3-mini' | 'openai/o/3-pro' | 'openai/o/4-mini' | 'openai/gpt/5' | 'openai/gpt/5-mini' | 'openai/gpt/5-pro' | 'openai/gpt/5-thoughtful' | 'openai/gpt/5-thoughtful-mini' | 'openai/gpt/5.1-instant' | 'openai/gpt/5.1-thoughtful' | 'openai/gpt/5.2-instant' | 'openai/gpt/5.2-pro' | 'openai/gpt/5.2-thoughtful' | 'openai/gpt/codex/5.1-max' | 'openai/gpt/codex/5.1-mini' | 'openai/gpt/codex/5.2';
|
|
7
|
+
/**
|
|
8
|
+
* .what = atom config type
|
|
9
|
+
* .why = shared type for model configs
|
|
10
|
+
*/
|
|
11
|
+
export type BrainAtomConfig = {
|
|
12
|
+
model: string;
|
|
13
|
+
description: string;
|
|
14
|
+
spec: BrainSpec;
|
|
15
|
+
};
|
|
16
|
+
/**
|
|
17
|
+
* .what = brain spec configuration by atom slug
|
|
18
|
+
* .why = maps slugs to api model names, descriptions, and specs
|
|
19
|
+
*
|
|
20
|
+
* .refs
|
|
21
|
+
* - https://openai.com/api/pricing/
|
|
22
|
+
* - https://platform.openai.com/docs/models
|
|
23
|
+
* - https://platform.openai.com/docs/api-reference/responses
|
|
24
|
+
*/
|
|
25
|
+
export declare const CONFIG_BY_ATOM_SLUG: Record<OpenaiBrainAtomSlug, BrainAtomConfig>;
|
|
@@ -1,19 +1,14 @@
|
|
|
1
1
|
import { BrainAtom } from 'rhachet';
|
|
2
|
-
|
|
3
|
-
* .what = supported openai atom slugs
|
|
4
|
-
* .why = enables type-safe slug specification with model variants
|
|
5
|
-
*/
|
|
6
|
-
type OpenAIAtomSlug = 'openai/gpt-4o' | 'openai/gpt-4o-mini' | 'openai/gpt-4-turbo' | 'openai/o1' | 'openai/o1-mini' | 'openai/o1-preview';
|
|
2
|
+
import { type OpenaiBrainAtomSlug } from '../../domain.objects/BrainAtom.config';
|
|
7
3
|
/**
|
|
8
4
|
* .what = factory to generate openai brain atom instances
|
|
9
5
|
* .why = enables model variant selection via slug
|
|
10
6
|
*
|
|
11
7
|
* .example
|
|
12
|
-
* genBrainAtom({ slug: 'openai/gpt
|
|
13
|
-
* genBrainAtom({ slug: 'openai/gpt
|
|
14
|
-
* genBrainAtom({ slug: 'openai/
|
|
8
|
+
* genBrainAtom({ slug: 'openai/gpt/4o' })
|
|
9
|
+
* genBrainAtom({ slug: 'openai/gpt/4o-mini' }) // fast + cheap
|
|
10
|
+
* genBrainAtom({ slug: 'openai/o/1' }) // advanced reason
|
|
15
11
|
*/
|
|
16
12
|
export declare const genBrainAtom: (input: {
|
|
17
|
-
slug:
|
|
13
|
+
slug: OpenaiBrainAtomSlug;
|
|
18
14
|
}) => BrainAtom;
|
|
19
|
-
export {};
|
|
@@ -1,19 +1,27 @@
|
|
|
1
1
|
import { BrainRepl } from 'rhachet';
|
|
2
2
|
/**
|
|
3
|
-
* .what = supported openai
|
|
3
|
+
* .what = supported openai brain repl slugs
|
|
4
4
|
* .why = enables type-safe slug specification with model variants
|
|
5
|
+
*
|
|
6
|
+
* .structure
|
|
7
|
+
* openai/codex → default (5.1-max)
|
|
8
|
+
* openai/codex/{5.1,5.2} → version (defaults to max tier)
|
|
9
|
+
* openai/codex/{mini,max} → capability tier (defaults to 5.1)
|
|
10
|
+
* openai/codex/{mini,max}/5.1 → explicit version + tier
|
|
11
|
+
*
|
|
12
|
+
* .note = 5.2 has only one variant (gpt-5.2-codex); mini/max/5.2 slugs will be added when available
|
|
5
13
|
*/
|
|
6
|
-
type
|
|
14
|
+
export type OpenaiBrainReplSlug = 'openai/codex' | 'openai/codex/5.1' | 'openai/codex/5.2' | 'openai/codex/mini' | 'openai/codex/max' | 'openai/codex/mini/5.1' | 'openai/codex/max/5.1';
|
|
7
15
|
/**
|
|
8
16
|
* .what = factory to generate openai codex brain repl instances
|
|
9
17
|
* .why = enables model variant selection via slug
|
|
10
18
|
*
|
|
11
19
|
* .example
|
|
12
|
-
* genBrainRepl({ slug: 'openai/codex' })
|
|
13
|
-
* genBrainRepl({ slug: 'openai/codex/
|
|
14
|
-
* genBrainRepl({ slug: 'openai/codex/
|
|
20
|
+
* genBrainRepl({ slug: 'openai/codex' }) // default (5.1-max)
|
|
21
|
+
* genBrainRepl({ slug: 'openai/codex/5.2' }) // version only (5.2)
|
|
22
|
+
* genBrainRepl({ slug: 'openai/codex/mini' }) // tier only (5.1-mini)
|
|
23
|
+
* genBrainRepl({ slug: 'openai/codex/max/5.2' }) // tier + version
|
|
15
24
|
*/
|
|
16
25
|
export declare const genBrainRepl: (input: {
|
|
17
|
-
slug:
|
|
26
|
+
slug: OpenaiBrainReplSlug;
|
|
18
27
|
}) => BrainRepl;
|
|
19
|
-
export {};
|
package/dist/index.js
CHANGED
|
@@ -40,60 +40,799 @@ module.exports = __toCommonJS(index_exports);
|
|
|
40
40
|
// src/domain.operations/atoms/genBrainAtom.ts
|
|
41
41
|
var import_openai = __toESM(require("openai"));
|
|
42
42
|
var import_rhachet = require("rhachet");
|
|
43
|
+
var import_calcBrainOutputCost = require("rhachet/dist/domain.operations/brainCost/calcBrainOutputCost");
|
|
43
44
|
var import_zod = require("zod");
|
|
44
|
-
|
|
45
|
-
|
|
45
|
+
|
|
46
|
+
// src/domain.objects/BrainAtom.config.ts
|
|
47
|
+
var import_iso_price = require("iso-price");
|
|
48
|
+
var CONFIG_BY_ATOM_SLUG = {
|
|
49
|
+
// =========================================================================
|
|
50
|
+
// gpt-4 family
|
|
51
|
+
// =========================================================================
|
|
52
|
+
"openai/gpt/4o": {
|
|
46
53
|
model: "gpt-4o",
|
|
47
|
-
description: "gpt-4o - multimodal model for
|
|
54
|
+
description: "gpt-4o - multimodal model for reason and vision",
|
|
55
|
+
spec: {
|
|
56
|
+
cost: {
|
|
57
|
+
time: {
|
|
58
|
+
speed: { tokens: 100, per: { seconds: 1 } },
|
|
59
|
+
latency: { milliseconds: 500 }
|
|
60
|
+
},
|
|
61
|
+
cash: {
|
|
62
|
+
per: "token",
|
|
63
|
+
cache: {
|
|
64
|
+
get: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 }),
|
|
65
|
+
set: (0, import_iso_price.dividePrice)({ of: "$2.50", by: 1e6 })
|
|
66
|
+
},
|
|
67
|
+
input: (0, import_iso_price.dividePrice)({ of: "$2.50", by: 1e6 }),
|
|
68
|
+
output: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 })
|
|
69
|
+
}
|
|
70
|
+
},
|
|
71
|
+
gain: {
|
|
72
|
+
size: { context: { tokens: 128e3 } },
|
|
73
|
+
grades: { swe: 33.2, mmlu: 88.7, humaneval: 90.2 },
|
|
74
|
+
cutoff: "2023-10-01",
|
|
75
|
+
domain: "ALL",
|
|
76
|
+
skills: { tooluse: true, vision: true }
|
|
77
|
+
}
|
|
78
|
+
}
|
|
48
79
|
},
|
|
49
|
-
"openai/gpt
|
|
80
|
+
"openai/gpt/4o-mini": {
|
|
50
81
|
model: "gpt-4o-mini",
|
|
51
|
-
description: "gpt-4o-mini - fast and cost-effective multimodal model"
|
|
82
|
+
description: "gpt-4o-mini - fast and cost-effective multimodal model",
|
|
83
|
+
spec: {
|
|
84
|
+
cost: {
|
|
85
|
+
time: {
|
|
86
|
+
speed: { tokens: 150, per: { seconds: 1 } },
|
|
87
|
+
latency: { milliseconds: 300 }
|
|
88
|
+
},
|
|
89
|
+
cash: {
|
|
90
|
+
per: "token",
|
|
91
|
+
cache: {
|
|
92
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.075", by: 1e6 }),
|
|
93
|
+
set: (0, import_iso_price.dividePrice)({ of: "$0.15", by: 1e6 })
|
|
94
|
+
},
|
|
95
|
+
input: (0, import_iso_price.dividePrice)({ of: "$0.15", by: 1e6 }),
|
|
96
|
+
output: (0, import_iso_price.dividePrice)({ of: "$0.60", by: 1e6 })
|
|
97
|
+
}
|
|
98
|
+
},
|
|
99
|
+
gain: {
|
|
100
|
+
size: { context: { tokens: 128e3 } },
|
|
101
|
+
grades: { mmlu: 82, humaneval: 87 },
|
|
102
|
+
cutoff: "2023-10-01",
|
|
103
|
+
domain: "ALL",
|
|
104
|
+
skills: { tooluse: true, vision: true }
|
|
105
|
+
}
|
|
106
|
+
}
|
|
52
107
|
},
|
|
53
|
-
"openai/gpt
|
|
108
|
+
"openai/gpt/4-turbo": {
|
|
54
109
|
model: "gpt-4-turbo",
|
|
55
|
-
description: "gpt-4-turbo - high capability with vision support"
|
|
110
|
+
description: "gpt-4-turbo - high capability with vision support",
|
|
111
|
+
spec: {
|
|
112
|
+
cost: {
|
|
113
|
+
time: {
|
|
114
|
+
speed: { tokens: 80, per: { seconds: 1 } },
|
|
115
|
+
latency: { milliseconds: 600 }
|
|
116
|
+
},
|
|
117
|
+
cash: {
|
|
118
|
+
per: "token",
|
|
119
|
+
cache: {
|
|
120
|
+
get: (0, import_iso_price.dividePrice)({ of: "$5.00", by: 1e6 }),
|
|
121
|
+
set: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 })
|
|
122
|
+
},
|
|
123
|
+
input: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 }),
|
|
124
|
+
output: (0, import_iso_price.dividePrice)({ of: "$30.00", by: 1e6 })
|
|
125
|
+
}
|
|
126
|
+
},
|
|
127
|
+
gain: {
|
|
128
|
+
size: { context: { tokens: 128e3 } },
|
|
129
|
+
grades: { mmlu: 86.4, humaneval: 87.1 },
|
|
130
|
+
cutoff: "2023-12-01",
|
|
131
|
+
domain: "ALL",
|
|
132
|
+
skills: { tooluse: true, vision: true }
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
},
|
|
136
|
+
// =========================================================================
|
|
137
|
+
// gpt-4.1 family (april 2025)
|
|
138
|
+
// =========================================================================
|
|
139
|
+
"openai/gpt/4.1": {
|
|
140
|
+
model: "gpt-4.1",
|
|
141
|
+
description: "gpt-4.1 - 1M context with improved instruction follow",
|
|
142
|
+
spec: {
|
|
143
|
+
cost: {
|
|
144
|
+
time: {
|
|
145
|
+
speed: { tokens: 100, per: { seconds: 1 } },
|
|
146
|
+
latency: { milliseconds: 400 }
|
|
147
|
+
},
|
|
148
|
+
cash: {
|
|
149
|
+
per: "token",
|
|
150
|
+
cache: {
|
|
151
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.50", by: 1e6 }),
|
|
152
|
+
set: (0, import_iso_price.dividePrice)({ of: "$2.00", by: 1e6 })
|
|
153
|
+
},
|
|
154
|
+
input: (0, import_iso_price.dividePrice)({ of: "$2.00", by: 1e6 }),
|
|
155
|
+
output: (0, import_iso_price.dividePrice)({ of: "$8.00", by: 1e6 })
|
|
156
|
+
}
|
|
157
|
+
},
|
|
158
|
+
gain: {
|
|
159
|
+
size: { context: { tokens: 1e6 } },
|
|
160
|
+
grades: {},
|
|
161
|
+
cutoff: "2024-06-01",
|
|
162
|
+
domain: "ALL",
|
|
163
|
+
skills: { tooluse: true, vision: true }
|
|
164
|
+
}
|
|
165
|
+
}
|
|
56
166
|
},
|
|
57
|
-
"openai/
|
|
167
|
+
"openai/gpt/4.1-mini": {
|
|
168
|
+
model: "gpt-4.1-mini",
|
|
169
|
+
description: "gpt-4.1-mini - fast and cost-effective",
|
|
170
|
+
spec: {
|
|
171
|
+
cost: {
|
|
172
|
+
time: {
|
|
173
|
+
speed: { tokens: 150, per: { seconds: 1 } },
|
|
174
|
+
latency: { milliseconds: 250 }
|
|
175
|
+
},
|
|
176
|
+
cash: {
|
|
177
|
+
per: "token",
|
|
178
|
+
cache: {
|
|
179
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.10", by: 1e6 }),
|
|
180
|
+
set: (0, import_iso_price.dividePrice)({ of: "$0.40", by: 1e6 })
|
|
181
|
+
},
|
|
182
|
+
input: (0, import_iso_price.dividePrice)({ of: "$0.40", by: 1e6 }),
|
|
183
|
+
output: (0, import_iso_price.dividePrice)({ of: "$1.60", by: 1e6 })
|
|
184
|
+
}
|
|
185
|
+
},
|
|
186
|
+
gain: {
|
|
187
|
+
size: { context: { tokens: 128e3 } },
|
|
188
|
+
grades: {},
|
|
189
|
+
cutoff: "2024-06-01",
|
|
190
|
+
domain: "ALL",
|
|
191
|
+
skills: { tooluse: true, vision: true }
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
},
|
|
195
|
+
"openai/gpt/4.1-nano": {
|
|
196
|
+
model: "gpt-4.1-nano",
|
|
197
|
+
description: "gpt-4.1-nano - ultra low cost for simple tasks",
|
|
198
|
+
spec: {
|
|
199
|
+
cost: {
|
|
200
|
+
time: {
|
|
201
|
+
speed: { tokens: 200, per: { seconds: 1 } },
|
|
202
|
+
latency: { milliseconds: 150 }
|
|
203
|
+
},
|
|
204
|
+
cash: {
|
|
205
|
+
per: "token",
|
|
206
|
+
cache: {
|
|
207
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.025", by: 1e6 }),
|
|
208
|
+
set: (0, import_iso_price.dividePrice)({ of: "$0.10", by: 1e6 })
|
|
209
|
+
},
|
|
210
|
+
input: (0, import_iso_price.dividePrice)({ of: "$0.10", by: 1e6 }),
|
|
211
|
+
output: (0, import_iso_price.dividePrice)({ of: "$0.40", by: 1e6 })
|
|
212
|
+
}
|
|
213
|
+
},
|
|
214
|
+
gain: {
|
|
215
|
+
size: { context: { tokens: 1e6 } },
|
|
216
|
+
grades: {},
|
|
217
|
+
cutoff: "2024-06-01",
|
|
218
|
+
domain: "ALL",
|
|
219
|
+
skills: { tooluse: true }
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
},
|
|
223
|
+
// =========================================================================
|
|
224
|
+
// o1 family
|
|
225
|
+
// =========================================================================
|
|
226
|
+
"openai/o/1": {
|
|
58
227
|
model: "o1",
|
|
59
|
-
description: "o1 - advanced
|
|
228
|
+
description: "o1 - advanced reason model for complex problems",
|
|
229
|
+
spec: {
|
|
230
|
+
cost: {
|
|
231
|
+
time: {
|
|
232
|
+
speed: { tokens: 50, per: { seconds: 1 } },
|
|
233
|
+
latency: { seconds: 2 }
|
|
234
|
+
},
|
|
235
|
+
cash: {
|
|
236
|
+
per: "token",
|
|
237
|
+
cache: {
|
|
238
|
+
get: (0, import_iso_price.dividePrice)({ of: "$7.50", by: 1e6 }),
|
|
239
|
+
set: (0, import_iso_price.dividePrice)({ of: "$15.00", by: 1e6 })
|
|
240
|
+
},
|
|
241
|
+
input: (0, import_iso_price.dividePrice)({ of: "$15.00", by: 1e6 }),
|
|
242
|
+
output: (0, import_iso_price.dividePrice)({ of: "$60.00", by: 1e6 })
|
|
243
|
+
}
|
|
244
|
+
},
|
|
245
|
+
gain: {
|
|
246
|
+
size: { context: { tokens: 2e5 } },
|
|
247
|
+
grades: { swe: 48.9, mmlu: 92.3, humaneval: 94.8 },
|
|
248
|
+
cutoff: "2023-10-01",
|
|
249
|
+
domain: "ALL",
|
|
250
|
+
skills: { tooluse: true, vision: true }
|
|
251
|
+
}
|
|
252
|
+
}
|
|
60
253
|
},
|
|
61
|
-
"openai/
|
|
254
|
+
"openai/o/1-mini": {
|
|
62
255
|
model: "o1-mini",
|
|
63
|
-
description: "o1-mini - fast
|
|
256
|
+
description: "o1-mini - fast reason model for code and math",
|
|
257
|
+
spec: {
|
|
258
|
+
cost: {
|
|
259
|
+
time: {
|
|
260
|
+
speed: { tokens: 80, per: { seconds: 1 } },
|
|
261
|
+
latency: { seconds: 1 }
|
|
262
|
+
},
|
|
263
|
+
cash: {
|
|
264
|
+
per: "token",
|
|
265
|
+
cache: {
|
|
266
|
+
get: (0, import_iso_price.dividePrice)({ of: "$1.50", by: 1e6 }),
|
|
267
|
+
set: (0, import_iso_price.dividePrice)({ of: "$3.00", by: 1e6 })
|
|
268
|
+
},
|
|
269
|
+
input: (0, import_iso_price.dividePrice)({ of: "$3.00", by: 1e6 }),
|
|
270
|
+
output: (0, import_iso_price.dividePrice)({ of: "$12.00", by: 1e6 })
|
|
271
|
+
}
|
|
272
|
+
},
|
|
273
|
+
gain: {
|
|
274
|
+
size: { context: { tokens: 128e3 } },
|
|
275
|
+
grades: { humaneval: 92.4 },
|
|
276
|
+
cutoff: "2023-10-01",
|
|
277
|
+
domain: "ALL",
|
|
278
|
+
skills: { tooluse: true }
|
|
279
|
+
}
|
|
280
|
+
}
|
|
64
281
|
},
|
|
65
|
-
"openai/
|
|
282
|
+
"openai/o/1-preview": {
|
|
66
283
|
model: "o1-preview",
|
|
67
|
-
description: "o1-preview - preview of advanced
|
|
284
|
+
description: "o1-preview - preview of advanced reason capabilities",
|
|
285
|
+
spec: {
|
|
286
|
+
cost: {
|
|
287
|
+
time: {
|
|
288
|
+
speed: { tokens: 40, per: { seconds: 1 } },
|
|
289
|
+
latency: { seconds: 3 }
|
|
290
|
+
},
|
|
291
|
+
cash: {
|
|
292
|
+
per: "token",
|
|
293
|
+
cache: {
|
|
294
|
+
get: (0, import_iso_price.dividePrice)({ of: "$7.50", by: 1e6 }),
|
|
295
|
+
set: (0, import_iso_price.dividePrice)({ of: "$15.00", by: 1e6 })
|
|
296
|
+
},
|
|
297
|
+
input: (0, import_iso_price.dividePrice)({ of: "$15.00", by: 1e6 }),
|
|
298
|
+
output: (0, import_iso_price.dividePrice)({ of: "$60.00", by: 1e6 })
|
|
299
|
+
}
|
|
300
|
+
},
|
|
301
|
+
gain: {
|
|
302
|
+
size: { context: { tokens: 128e3 } },
|
|
303
|
+
grades: { mmlu: 90.8 },
|
|
304
|
+
cutoff: "2023-10-01",
|
|
305
|
+
domain: "ALL",
|
|
306
|
+
skills: { tooluse: true }
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
},
|
|
310
|
+
// =========================================================================
|
|
311
|
+
// o3 family (april 2025)
|
|
312
|
+
// =========================================================================
|
|
313
|
+
"openai/o/3": {
|
|
314
|
+
model: "o3",
|
|
315
|
+
description: "o3 - frontier reason model for code, math, science",
|
|
316
|
+
spec: {
|
|
317
|
+
cost: {
|
|
318
|
+
time: {
|
|
319
|
+
speed: { tokens: 60, per: { seconds: 1 } },
|
|
320
|
+
latency: { seconds: 2 }
|
|
321
|
+
},
|
|
322
|
+
cash: {
|
|
323
|
+
per: "token",
|
|
324
|
+
cache: {
|
|
325
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.10", by: 1e6 }),
|
|
326
|
+
set: (0, import_iso_price.dividePrice)({ of: "$0.40", by: 1e6 })
|
|
327
|
+
},
|
|
328
|
+
input: (0, import_iso_price.dividePrice)({ of: "$0.40", by: 1e6 }),
|
|
329
|
+
output: (0, import_iso_price.dividePrice)({ of: "$1.60", by: 1e6 })
|
|
330
|
+
}
|
|
331
|
+
},
|
|
332
|
+
gain: {
|
|
333
|
+
size: { context: { tokens: 2e5 } },
|
|
334
|
+
grades: {},
|
|
335
|
+
cutoff: "2024-06-01",
|
|
336
|
+
domain: "ALL",
|
|
337
|
+
skills: { tooluse: true, vision: true }
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
},
|
|
341
|
+
"openai/o/3-mini": {
|
|
342
|
+
model: "o3-mini",
|
|
343
|
+
description: "o3-mini - fast reason for code and math",
|
|
344
|
+
spec: {
|
|
345
|
+
cost: {
|
|
346
|
+
time: {
|
|
347
|
+
speed: { tokens: 100, per: { seconds: 1 } },
|
|
348
|
+
latency: { seconds: 1 }
|
|
349
|
+
},
|
|
350
|
+
cash: {
|
|
351
|
+
per: "token",
|
|
352
|
+
cache: {
|
|
353
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.275", by: 1e6 }),
|
|
354
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.10", by: 1e6 })
|
|
355
|
+
},
|
|
356
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.10", by: 1e6 }),
|
|
357
|
+
output: (0, import_iso_price.dividePrice)({ of: "$4.40", by: 1e6 })
|
|
358
|
+
}
|
|
359
|
+
},
|
|
360
|
+
gain: {
|
|
361
|
+
size: { context: { tokens: 2e5 } },
|
|
362
|
+
grades: {},
|
|
363
|
+
cutoff: "2024-06-01",
|
|
364
|
+
domain: "ALL",
|
|
365
|
+
skills: { tooluse: true }
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
},
|
|
369
|
+
"openai/o/3-pro": {
|
|
370
|
+
model: "o3-pro",
|
|
371
|
+
description: "o3-pro - highest tier with extended compute",
|
|
372
|
+
spec: {
|
|
373
|
+
cost: {
|
|
374
|
+
time: {
|
|
375
|
+
speed: { tokens: 30, per: { seconds: 1 } },
|
|
376
|
+
latency: { seconds: 5 }
|
|
377
|
+
},
|
|
378
|
+
cash: {
|
|
379
|
+
per: "token",
|
|
380
|
+
cache: {
|
|
381
|
+
get: (0, import_iso_price.dividePrice)({ of: "$5.00", by: 1e6 }),
|
|
382
|
+
set: (0, import_iso_price.dividePrice)({ of: "$20.00", by: 1e6 })
|
|
383
|
+
},
|
|
384
|
+
input: (0, import_iso_price.dividePrice)({ of: "$20.00", by: 1e6 }),
|
|
385
|
+
output: (0, import_iso_price.dividePrice)({ of: "$80.00", by: 1e6 })
|
|
386
|
+
}
|
|
387
|
+
},
|
|
388
|
+
gain: {
|
|
389
|
+
size: { context: { tokens: 2e5 } },
|
|
390
|
+
grades: {},
|
|
391
|
+
cutoff: "2024-06-01",
|
|
392
|
+
domain: "ALL",
|
|
393
|
+
skills: { tooluse: true, vision: true }
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
},
|
|
397
|
+
// =========================================================================
|
|
398
|
+
// o4 family (april 2025)
|
|
399
|
+
// =========================================================================
|
|
400
|
+
"openai/o/4-mini": {
|
|
401
|
+
model: "o4-mini",
|
|
402
|
+
description: "o4-mini - fast efficient reason model",
|
|
403
|
+
spec: {
|
|
404
|
+
cost: {
|
|
405
|
+
time: {
|
|
406
|
+
speed: { tokens: 100, per: { seconds: 1 } },
|
|
407
|
+
latency: { seconds: 1 }
|
|
408
|
+
},
|
|
409
|
+
cash: {
|
|
410
|
+
per: "token",
|
|
411
|
+
cache: {
|
|
412
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.275", by: 1e6 }),
|
|
413
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.10", by: 1e6 })
|
|
414
|
+
},
|
|
415
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.10", by: 1e6 }),
|
|
416
|
+
output: (0, import_iso_price.dividePrice)({ of: "$4.40", by: 1e6 })
|
|
417
|
+
}
|
|
418
|
+
},
|
|
419
|
+
gain: {
|
|
420
|
+
size: { context: { tokens: 2e5 } },
|
|
421
|
+
grades: {},
|
|
422
|
+
cutoff: "2024-06-01",
|
|
423
|
+
domain: "ALL",
|
|
424
|
+
skills: { tooluse: true, vision: true }
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
},
|
|
428
|
+
// =========================================================================
|
|
429
|
+
// gpt-5 family (august 2025)
|
|
430
|
+
// =========================================================================
|
|
431
|
+
"openai/gpt/5": {
|
|
432
|
+
model: "gpt-5",
|
|
433
|
+
description: "gpt-5 - frontier multimodal model",
|
|
434
|
+
spec: {
|
|
435
|
+
cost: {
|
|
436
|
+
time: {
|
|
437
|
+
speed: { tokens: 120, per: { seconds: 1 } },
|
|
438
|
+
latency: { milliseconds: 400 }
|
|
439
|
+
},
|
|
440
|
+
cash: {
|
|
441
|
+
per: "token",
|
|
442
|
+
cache: {
|
|
443
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.125", by: 1e6 }),
|
|
444
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 })
|
|
445
|
+
},
|
|
446
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 }),
|
|
447
|
+
output: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 })
|
|
448
|
+
}
|
|
449
|
+
},
|
|
450
|
+
gain: {
|
|
451
|
+
size: { context: { tokens: 4e5 } },
|
|
452
|
+
grades: {},
|
|
453
|
+
cutoff: "2024-09-01",
|
|
454
|
+
domain: "ALL",
|
|
455
|
+
skills: { tooluse: true, vision: true }
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
},
|
|
459
|
+
"openai/gpt/5-mini": {
|
|
460
|
+
model: "gpt-5-mini",
|
|
461
|
+
description: "gpt-5-mini - fast and cost-effective",
|
|
462
|
+
spec: {
|
|
463
|
+
cost: {
|
|
464
|
+
time: {
|
|
465
|
+
speed: { tokens: 180, per: { seconds: 1 } },
|
|
466
|
+
latency: { milliseconds: 200 }
|
|
467
|
+
},
|
|
468
|
+
cash: {
|
|
469
|
+
per: "token",
|
|
470
|
+
cache: {
|
|
471
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.025", by: 1e6 }),
|
|
472
|
+
set: (0, import_iso_price.dividePrice)({ of: "$0.25", by: 1e6 })
|
|
473
|
+
},
|
|
474
|
+
input: (0, import_iso_price.dividePrice)({ of: "$0.25", by: 1e6 }),
|
|
475
|
+
output: (0, import_iso_price.dividePrice)({ of: "$2.00", by: 1e6 })
|
|
476
|
+
}
|
|
477
|
+
},
|
|
478
|
+
gain: {
|
|
479
|
+
size: { context: { tokens: 4e5 } },
|
|
480
|
+
grades: {},
|
|
481
|
+
cutoff: "2024-05-01",
|
|
482
|
+
domain: "ALL",
|
|
483
|
+
skills: { tooluse: true, vision: true }
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
},
|
|
487
|
+
"openai/gpt/5-pro": {
|
|
488
|
+
model: "gpt-5-pro",
|
|
489
|
+
description: "gpt-5-pro - highest tier with xhigh reason support",
|
|
490
|
+
spec: {
|
|
491
|
+
cost: {
|
|
492
|
+
time: {
|
|
493
|
+
speed: { tokens: 40, per: { seconds: 1 } },
|
|
494
|
+
latency: { seconds: 3 }
|
|
495
|
+
},
|
|
496
|
+
cash: {
|
|
497
|
+
per: "token",
|
|
498
|
+
cache: {
|
|
499
|
+
get: (0, import_iso_price.dividePrice)({ of: "$1.50", by: 1e6 }),
|
|
500
|
+
set: (0, import_iso_price.dividePrice)({ of: "$15.00", by: 1e6 })
|
|
501
|
+
},
|
|
502
|
+
input: (0, import_iso_price.dividePrice)({ of: "$15.00", by: 1e6 }),
|
|
503
|
+
output: (0, import_iso_price.dividePrice)({ of: "$120.00", by: 1e6 })
|
|
504
|
+
}
|
|
505
|
+
},
|
|
506
|
+
gain: {
|
|
507
|
+
size: { context: { tokens: 272e3 } },
|
|
508
|
+
grades: {},
|
|
509
|
+
cutoff: "2024-09-01",
|
|
510
|
+
domain: "ALL",
|
|
511
|
+
skills: { tooluse: true, vision: true }
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
},
|
|
515
|
+
"openai/gpt/5-thoughtful": {
|
|
516
|
+
model: "gpt-5-thinking",
|
|
517
|
+
description: "gpt-5-thinking - deep reason for complex problems",
|
|
518
|
+
spec: {
|
|
519
|
+
cost: {
|
|
520
|
+
time: {
|
|
521
|
+
speed: { tokens: 60, per: { seconds: 1 } },
|
|
522
|
+
latency: { seconds: 2 }
|
|
523
|
+
},
|
|
524
|
+
cash: {
|
|
525
|
+
per: "token",
|
|
526
|
+
cache: {
|
|
527
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.125", by: 1e6 }),
|
|
528
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 })
|
|
529
|
+
},
|
|
530
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 }),
|
|
531
|
+
output: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 })
|
|
532
|
+
}
|
|
533
|
+
},
|
|
534
|
+
gain: {
|
|
535
|
+
size: { context: { tokens: 4e5 } },
|
|
536
|
+
grades: {},
|
|
537
|
+
cutoff: "2024-09-01",
|
|
538
|
+
domain: "ALL",
|
|
539
|
+
skills: { tooluse: true, vision: true }
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
},
|
|
543
|
+
"openai/gpt/5-thoughtful-mini": {
|
|
544
|
+
model: "gpt-5-thinking-mini",
|
|
545
|
+
description: "gpt-5-thinking-mini - fast reason model",
|
|
546
|
+
spec: {
|
|
547
|
+
cost: {
|
|
548
|
+
time: {
|
|
549
|
+
speed: { tokens: 100, per: { seconds: 1 } },
|
|
550
|
+
latency: { seconds: 1 }
|
|
551
|
+
},
|
|
552
|
+
cash: {
|
|
553
|
+
per: "token",
|
|
554
|
+
cache: {
|
|
555
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.025", by: 1e6 }),
|
|
556
|
+
set: (0, import_iso_price.dividePrice)({ of: "$0.25", by: 1e6 })
|
|
557
|
+
},
|
|
558
|
+
input: (0, import_iso_price.dividePrice)({ of: "$0.25", by: 1e6 }),
|
|
559
|
+
output: (0, import_iso_price.dividePrice)({ of: "$2.00", by: 1e6 })
|
|
560
|
+
}
|
|
561
|
+
},
|
|
562
|
+
gain: {
|
|
563
|
+
size: { context: { tokens: 4e5 } },
|
|
564
|
+
grades: {},
|
|
565
|
+
cutoff: "2024-09-01",
|
|
566
|
+
domain: "ALL",
|
|
567
|
+
skills: { tooluse: true }
|
|
568
|
+
}
|
|
569
|
+
}
|
|
570
|
+
},
|
|
571
|
+
// =========================================================================
|
|
572
|
+
// gpt-5.1 family (november 2025)
|
|
573
|
+
// =========================================================================
|
|
574
|
+
"openai/gpt/5.1-instant": {
|
|
575
|
+
model: "gpt-5.1-chat-latest",
|
|
576
|
+
description: "gpt-5.1-instant - fast model for everyday tasks",
|
|
577
|
+
spec: {
|
|
578
|
+
cost: {
|
|
579
|
+
time: {
|
|
580
|
+
speed: { tokens: 150, per: { seconds: 1 } },
|
|
581
|
+
latency: { milliseconds: 300 }
|
|
582
|
+
},
|
|
583
|
+
cash: {
|
|
584
|
+
per: "token",
|
|
585
|
+
cache: {
|
|
586
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.125", by: 1e6 }),
|
|
587
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 })
|
|
588
|
+
},
|
|
589
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 }),
|
|
590
|
+
output: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 })
|
|
591
|
+
}
|
|
592
|
+
},
|
|
593
|
+
gain: {
|
|
594
|
+
size: { context: { tokens: 4e5 } },
|
|
595
|
+
grades: {},
|
|
596
|
+
cutoff: "2024-09-01",
|
|
597
|
+
domain: "ALL",
|
|
598
|
+
skills: { tooluse: true, vision: true }
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
},
|
|
602
|
+
"openai/gpt/5.1-thoughtful": {
|
|
603
|
+
model: "gpt-5.1",
|
|
604
|
+
description: "gpt-5.1 - deep reason with adaptive thought",
|
|
605
|
+
spec: {
|
|
606
|
+
cost: {
|
|
607
|
+
time: {
|
|
608
|
+
speed: { tokens: 80, per: { seconds: 1 } },
|
|
609
|
+
latency: { seconds: 1 }
|
|
610
|
+
},
|
|
611
|
+
cash: {
|
|
612
|
+
per: "token",
|
|
613
|
+
cache: {
|
|
614
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.125", by: 1e6 }),
|
|
615
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 })
|
|
616
|
+
},
|
|
617
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.25", by: 1e6 }),
|
|
618
|
+
output: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 })
|
|
619
|
+
}
|
|
620
|
+
},
|
|
621
|
+
gain: {
|
|
622
|
+
size: { context: { tokens: 4e5 } },
|
|
623
|
+
grades: {},
|
|
624
|
+
cutoff: "2024-09-01",
|
|
625
|
+
domain: "ALL",
|
|
626
|
+
skills: { tooluse: true, vision: true }
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
},
|
|
630
|
+
// =========================================================================
|
|
631
|
+
// gpt-5.2 family (december 2025)
|
|
632
|
+
// =========================================================================
|
|
633
|
+
"openai/gpt/5.2-instant": {
|
|
634
|
+
model: "gpt-5.2-instant",
|
|
635
|
+
description: "gpt-5.2-instant - low latency for daily tasks",
|
|
636
|
+
spec: {
|
|
637
|
+
cost: {
|
|
638
|
+
time: {
|
|
639
|
+
speed: { tokens: 150, per: { seconds: 1 } },
|
|
640
|
+
latency: { milliseconds: 300 }
|
|
641
|
+
},
|
|
642
|
+
cash: {
|
|
643
|
+
per: "token",
|
|
644
|
+
cache: {
|
|
645
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.175", by: 1e6 }),
|
|
646
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.75", by: 1e6 })
|
|
647
|
+
},
|
|
648
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.75", by: 1e6 }),
|
|
649
|
+
output: (0, import_iso_price.dividePrice)({ of: "$14.00", by: 1e6 })
|
|
650
|
+
}
|
|
651
|
+
},
|
|
652
|
+
gain: {
|
|
653
|
+
size: { context: { tokens: 4e5 } },
|
|
654
|
+
grades: {},
|
|
655
|
+
cutoff: "2025-08-01",
|
|
656
|
+
domain: "ALL",
|
|
657
|
+
skills: { tooluse: true, vision: true }
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
},
|
|
661
|
+
"openai/gpt/5.2-pro": {
|
|
662
|
+
model: "gpt-5.2-pro",
|
|
663
|
+
description: "gpt-5.2-pro - highest tier with xhigh reason support",
|
|
664
|
+
spec: {
|
|
665
|
+
cost: {
|
|
666
|
+
time: {
|
|
667
|
+
speed: { tokens: 40, per: { seconds: 1 } },
|
|
668
|
+
latency: { seconds: 3 }
|
|
669
|
+
},
|
|
670
|
+
cash: {
|
|
671
|
+
per: "token",
|
|
672
|
+
cache: {
|
|
673
|
+
get: (0, import_iso_price.dividePrice)({ of: "$2.10", by: 1e6 }),
|
|
674
|
+
set: (0, import_iso_price.dividePrice)({ of: "$21.00", by: 1e6 })
|
|
675
|
+
},
|
|
676
|
+
input: (0, import_iso_price.dividePrice)({ of: "$21.00", by: 1e6 }),
|
|
677
|
+
output: (0, import_iso_price.dividePrice)({ of: "$168.00", by: 1e6 })
|
|
678
|
+
}
|
|
679
|
+
},
|
|
680
|
+
gain: {
|
|
681
|
+
size: { context: { tokens: 4e5 } },
|
|
682
|
+
grades: {},
|
|
683
|
+
cutoff: "2025-08-01",
|
|
684
|
+
domain: "ALL",
|
|
685
|
+
skills: { tooluse: true, vision: true }
|
|
686
|
+
}
|
|
687
|
+
}
|
|
688
|
+
},
|
|
689
|
+
"openai/gpt/5.2-thoughtful": {
|
|
690
|
+
model: "gpt-5.2",
|
|
691
|
+
description: "gpt-5.2 - most advanced frontier model for deep reason",
|
|
692
|
+
spec: {
|
|
693
|
+
cost: {
|
|
694
|
+
time: {
|
|
695
|
+
speed: { tokens: 60, per: { seconds: 1 } },
|
|
696
|
+
latency: { seconds: 2 }
|
|
697
|
+
},
|
|
698
|
+
cash: {
|
|
699
|
+
per: "token",
|
|
700
|
+
cache: {
|
|
701
|
+
get: (0, import_iso_price.dividePrice)({ of: "$0.175", by: 1e6 }),
|
|
702
|
+
set: (0, import_iso_price.dividePrice)({ of: "$1.75", by: 1e6 })
|
|
703
|
+
},
|
|
704
|
+
input: (0, import_iso_price.dividePrice)({ of: "$1.75", by: 1e6 }),
|
|
705
|
+
output: (0, import_iso_price.dividePrice)({ of: "$14.00", by: 1e6 })
|
|
706
|
+
}
|
|
707
|
+
},
|
|
708
|
+
gain: {
|
|
709
|
+
size: { context: { tokens: 4e5 } },
|
|
710
|
+
grades: {},
|
|
711
|
+
cutoff: "2025-08-01",
|
|
712
|
+
domain: "ALL",
|
|
713
|
+
skills: { tooluse: true, vision: true }
|
|
714
|
+
}
|
|
715
|
+
}
|
|
716
|
+
},
|
|
717
|
+
// =========================================================================
|
|
718
|
+
// codex family (agentic code via codex sdk)
|
|
719
|
+
// =========================================================================
|
|
720
|
+
"openai/gpt/codex/5.1-max": {
|
|
721
|
+
model: "gpt-5.1-codex-max",
|
|
722
|
+
description: "gpt-5.1-codex-max - optimized for long-horizon agentic code",
|
|
723
|
+
spec: {
|
|
724
|
+
cost: {
|
|
725
|
+
time: {
|
|
726
|
+
speed: { tokens: 80, per: { seconds: 1 } },
|
|
727
|
+
latency: { seconds: 2 }
|
|
728
|
+
},
|
|
729
|
+
cash: {
|
|
730
|
+
per: "token",
|
|
731
|
+
cache: {
|
|
732
|
+
get: (0, import_iso_price.dividePrice)({ of: "$3.75", by: 1e6 }),
|
|
733
|
+
set: (0, import_iso_price.dividePrice)({ of: "$7.50", by: 1e6 })
|
|
734
|
+
},
|
|
735
|
+
input: (0, import_iso_price.dividePrice)({ of: "$7.50", by: 1e6 }),
|
|
736
|
+
output: (0, import_iso_price.dividePrice)({ of: "$30.00", by: 1e6 })
|
|
737
|
+
}
|
|
738
|
+
},
|
|
739
|
+
gain: {
|
|
740
|
+
size: { context: { tokens: 4e5 } },
|
|
741
|
+
grades: { swe: 72 },
|
|
742
|
+
cutoff: "2024-09-01",
|
|
743
|
+
domain: "SOFTWARE",
|
|
744
|
+
skills: { tooluse: true }
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
},
|
|
748
|
+
"openai/gpt/codex/5.1-mini": {
|
|
749
|
+
model: "gpt-5.1-codex-mini",
|
|
750
|
+
description: "gpt-5.1-codex-mini - fast and cost-effective",
|
|
751
|
+
spec: {
|
|
752
|
+
cost: {
|
|
753
|
+
time: {
|
|
754
|
+
speed: { tokens: 120, per: { seconds: 1 } },
|
|
755
|
+
latency: { seconds: 1 }
|
|
756
|
+
},
|
|
757
|
+
cash: {
|
|
758
|
+
per: "token",
|
|
759
|
+
cache: {
|
|
760
|
+
get: (0, import_iso_price.dividePrice)({ of: "$1.50", by: 1e6 }),
|
|
761
|
+
set: (0, import_iso_price.dividePrice)({ of: "$3.00", by: 1e6 })
|
|
762
|
+
},
|
|
763
|
+
input: (0, import_iso_price.dividePrice)({ of: "$3.00", by: 1e6 }),
|
|
764
|
+
output: (0, import_iso_price.dividePrice)({ of: "$12.00", by: 1e6 })
|
|
765
|
+
}
|
|
766
|
+
},
|
|
767
|
+
gain: {
|
|
768
|
+
size: { context: { tokens: 4e5 } },
|
|
769
|
+
grades: { swe: 55 },
|
|
770
|
+
cutoff: "2024-09-01",
|
|
771
|
+
domain: "SOFTWARE",
|
|
772
|
+
skills: { tooluse: true }
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
},
|
|
776
|
+
"openai/gpt/codex/5.2": {
|
|
777
|
+
model: "gpt-5.2-codex",
|
|
778
|
+
description: "gpt-5.2-codex - most advanced agentic code model",
|
|
779
|
+
spec: {
|
|
780
|
+
cost: {
|
|
781
|
+
time: {
|
|
782
|
+
speed: { tokens: 60, per: { seconds: 1 } },
|
|
783
|
+
latency: { seconds: 3 }
|
|
784
|
+
},
|
|
785
|
+
cash: {
|
|
786
|
+
per: "token",
|
|
787
|
+
cache: {
|
|
788
|
+
get: (0, import_iso_price.dividePrice)({ of: "$5.00", by: 1e6 }),
|
|
789
|
+
set: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 })
|
|
790
|
+
},
|
|
791
|
+
input: (0, import_iso_price.dividePrice)({ of: "$10.00", by: 1e6 }),
|
|
792
|
+
output: (0, import_iso_price.dividePrice)({ of: "$40.00", by: 1e6 })
|
|
793
|
+
}
|
|
794
|
+
},
|
|
795
|
+
gain: {
|
|
796
|
+
size: { context: { tokens: 4e5 } },
|
|
797
|
+
grades: { swe: 78 },
|
|
798
|
+
cutoff: "2025-08-01",
|
|
799
|
+
domain: "SOFTWARE",
|
|
800
|
+
skills: { tooluse: true }
|
|
801
|
+
}
|
|
802
|
+
}
|
|
68
803
|
}
|
|
69
804
|
};
|
|
805
|
+
|
|
806
|
+
// src/domain.operations/atoms/genBrainAtom.ts
|
|
70
807
|
var genBrainAtom = (input) => {
|
|
71
|
-
const config =
|
|
808
|
+
const config = CONFIG_BY_ATOM_SLUG[input.slug];
|
|
72
809
|
return new import_rhachet.BrainAtom({
|
|
73
810
|
repo: "openai",
|
|
74
811
|
slug: input.slug,
|
|
75
812
|
description: config.description,
|
|
813
|
+
spec: config.spec,
|
|
76
814
|
/**
|
|
77
815
|
* .what = stateless inference (no tool use)
|
|
78
|
-
* .why = provides direct model access for
|
|
816
|
+
* .why = provides direct model access for reason tasks
|
|
79
817
|
*/
|
|
80
818
|
ask: async (askInput, context) => {
|
|
819
|
+
const startTime = Date.now();
|
|
81
820
|
const systemPrompt = askInput.role.briefs ? await (0, import_rhachet.castBriefsToPrompt)({ briefs: askInput.role.briefs }) : void 0;
|
|
82
821
|
const openai = context?.openai ?? new import_openai.default({ apiKey: process.env.OPENAI_API_KEY });
|
|
83
|
-
const messages = [];
|
|
84
|
-
if (systemPrompt) {
|
|
85
|
-
messages.push({ role: "system", content: systemPrompt });
|
|
86
|
-
}
|
|
87
|
-
messages.push({ role: "user", content: askInput.prompt });
|
|
88
822
|
const jsonSchema = import_zod.z.toJSONSchema(askInput.schema.output);
|
|
89
823
|
const isObjectSchema = typeof jsonSchema === "object" && jsonSchema !== null && "type" in jsonSchema && jsonSchema.type === "object";
|
|
90
|
-
const
|
|
824
|
+
const fullPrompt = systemPrompt ? `${systemPrompt}
|
|
825
|
+
|
|
826
|
+
---
|
|
827
|
+
|
|
828
|
+
${askInput.prompt}` : askInput.prompt;
|
|
829
|
+
const response = await openai.responses.create({
|
|
91
830
|
model: config.model,
|
|
92
|
-
|
|
831
|
+
input: fullPrompt,
|
|
93
832
|
...isObjectSchema && {
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
833
|
+
text: {
|
|
834
|
+
format: {
|
|
835
|
+
type: "json_schema",
|
|
97
836
|
name: "response",
|
|
98
837
|
strict: true,
|
|
99
838
|
schema: jsonSchema
|
|
@@ -101,12 +840,42 @@ var genBrainAtom = (input) => {
|
|
|
101
840
|
}
|
|
102
841
|
}
|
|
103
842
|
});
|
|
104
|
-
const
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
843
|
+
const outputItem = response.output.find(
|
|
844
|
+
(item) => item.type === "message"
|
|
845
|
+
);
|
|
846
|
+
const textContent = outputItem?.type === "message" ? outputItem.content.find((c) => c.type === "output_text") : void 0;
|
|
847
|
+
const content = textContent?.type === "output_text" ? textContent.text : "";
|
|
848
|
+
const tokensInput = response.usage?.input_tokens ?? 0;
|
|
849
|
+
const tokensOutput = response.usage?.output_tokens ?? 0;
|
|
850
|
+
const tokensCacheGet = response.usage?.input_tokens_details?.cached_tokens ?? 0;
|
|
851
|
+
const elapsedMs = Date.now() - startTime;
|
|
852
|
+
const charsInput = fullPrompt.length;
|
|
853
|
+
const charsOutput = content.length;
|
|
854
|
+
const size = {
|
|
855
|
+
tokens: {
|
|
856
|
+
input: tokensInput,
|
|
857
|
+
output: tokensOutput,
|
|
858
|
+
cache: { get: tokensCacheGet, set: 0 }
|
|
859
|
+
},
|
|
860
|
+
chars: {
|
|
861
|
+
input: charsInput,
|
|
862
|
+
output: charsOutput,
|
|
863
|
+
cache: { get: 0, set: 0 }
|
|
864
|
+
}
|
|
865
|
+
};
|
|
866
|
+
const { cash } = (0, import_calcBrainOutputCost.calcBrainOutputCost)({
|
|
867
|
+
for: { tokens: size.tokens },
|
|
868
|
+
with: { cost: { cash: config.spec.cost.cash } }
|
|
869
|
+
});
|
|
870
|
+
const metrics = new import_rhachet.BrainOutputMetrics({
|
|
871
|
+
size,
|
|
872
|
+
cost: {
|
|
873
|
+
time: { milliseconds: elapsedMs },
|
|
874
|
+
cash
|
|
875
|
+
}
|
|
876
|
+
});
|
|
877
|
+
const output = isObjectSchema ? askInput.schema.output.parse(JSON.parse(content)) : askInput.schema.output.parse(content);
|
|
878
|
+
return new import_rhachet.BrainOutput({ output, metrics });
|
|
110
879
|
}
|
|
111
880
|
});
|
|
112
881
|
};
|
|
@@ -122,7 +891,7 @@ var import_url = require("url");
|
|
|
122
891
|
var import_meta = { get url() {
|
|
123
892
|
var path = require('path');
|
|
124
893
|
var fs = require('fs');
|
|
125
|
-
var dir =
|
|
894
|
+
var dir = __dirname;
|
|
126
895
|
while (dir !== path.dirname(dir)) {
|
|
127
896
|
var candidate = path.join(dir, 'node_modules', '@openai', 'codex-sdk', 'dist', 'index.js');
|
|
128
897
|
if (fs.existsSync(candidate)) return require('url').pathToFileURL(candidate).href;
|
|
@@ -467,6 +1236,7 @@ var Codex = class {
|
|
|
467
1236
|
|
|
468
1237
|
// src/domain.operations/repls/genBrainRepl.ts
|
|
469
1238
|
var import_rhachet2 = require("rhachet");
|
|
1239
|
+
var import_calcBrainOutputCost2 = require("rhachet/dist/domain.operations/brainCost/calcBrainOutputCost");
|
|
470
1240
|
var import_wrapper_fns = require("wrapper-fns");
|
|
471
1241
|
|
|
472
1242
|
// src/infra/schema/asJsonSchema.ts
|
|
@@ -476,24 +1246,18 @@ var asJsonSchema = (input) => {
|
|
|
476
1246
|
};
|
|
477
1247
|
|
|
478
1248
|
// src/domain.operations/repls/genBrainRepl.ts
|
|
479
|
-
var
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
"openai/codex/mini":
|
|
490
|
-
|
|
491
|
-
description: "codex mini - fast and cost-effective"
|
|
492
|
-
},
|
|
493
|
-
"openai/codex/5.2": {
|
|
494
|
-
model: "gpt-5.2-codex",
|
|
495
|
-
description: "codex 5.2 - most advanced agentic coding model"
|
|
496
|
-
}
|
|
1249
|
+
var CONFIG_BY_REPL_SLUG = {
|
|
1250
|
+
// default
|
|
1251
|
+
"openai/codex": CONFIG_BY_ATOM_SLUG["openai/gpt/codex/5.1-max"],
|
|
1252
|
+
// version only (defaults to max tier)
|
|
1253
|
+
"openai/codex/5.1": CONFIG_BY_ATOM_SLUG["openai/gpt/codex/5.1-max"],
|
|
1254
|
+
"openai/codex/5.2": CONFIG_BY_ATOM_SLUG["openai/gpt/codex/5.2"],
|
|
1255
|
+
// capability tier (defaults to 5.1)
|
|
1256
|
+
"openai/codex/mini": CONFIG_BY_ATOM_SLUG["openai/gpt/codex/5.1-mini"],
|
|
1257
|
+
"openai/codex/max": CONFIG_BY_ATOM_SLUG["openai/gpt/codex/5.1-max"],
|
|
1258
|
+
// explicit 5.1 versions
|
|
1259
|
+
"openai/codex/mini/5.1": CONFIG_BY_ATOM_SLUG["openai/gpt/codex/5.1-mini"],
|
|
1260
|
+
"openai/codex/max/5.1": CONFIG_BY_ATOM_SLUG["openai/gpt/codex/5.1-max"]
|
|
497
1261
|
};
|
|
498
1262
|
var composePromptWithSystem = (userPrompt, systemPrompt) => {
|
|
499
1263
|
if (!systemPrompt) return userPrompt;
|
|
@@ -504,6 +1268,7 @@ var composePromptWithSystem = (userPrompt, systemPrompt) => {
|
|
|
504
1268
|
${userPrompt}`;
|
|
505
1269
|
};
|
|
506
1270
|
var invokeCodex = async (input) => {
|
|
1271
|
+
const startTime = Date.now();
|
|
507
1272
|
const systemPrompt = input.role.briefs ? await (0, import_rhachet2.castBriefsToPrompt)({ briefs: input.role.briefs }) : void 0;
|
|
508
1273
|
const outputSchema = asJsonSchema({
|
|
509
1274
|
schema: input.schema.output
|
|
@@ -522,31 +1287,78 @@ var invokeCodex = async (input) => {
|
|
|
522
1287
|
threshold: { seconds: 60 }
|
|
523
1288
|
})
|
|
524
1289
|
)();
|
|
525
|
-
|
|
1290
|
+
const elapsedMs = Date.now() - startTime;
|
|
1291
|
+
const content = response.finalResponse;
|
|
1292
|
+
const output = input.schema.output.parse(JSON.parse(content));
|
|
1293
|
+
const usage = response.usage ?? {
|
|
1294
|
+
input_tokens: 0,
|
|
1295
|
+
output_tokens: 0,
|
|
1296
|
+
cached_input_tokens: 0
|
|
1297
|
+
};
|
|
1298
|
+
const tokensInput = usage.input_tokens ?? 0;
|
|
1299
|
+
const tokensOutput = usage.output_tokens ?? 0;
|
|
1300
|
+
const tokensCacheGet = usage.cached_input_tokens ?? 0;
|
|
1301
|
+
const tokensCacheSet = 0;
|
|
1302
|
+
const charsInput = (systemPrompt?.length ?? 0) + input.prompt.length;
|
|
1303
|
+
const charsOutput = content.length;
|
|
1304
|
+
const size = {
|
|
1305
|
+
tokens: {
|
|
1306
|
+
input: tokensInput,
|
|
1307
|
+
output: tokensOutput,
|
|
1308
|
+
cache: { get: tokensCacheGet, set: tokensCacheSet }
|
|
1309
|
+
},
|
|
1310
|
+
chars: {
|
|
1311
|
+
input: charsInput,
|
|
1312
|
+
output: charsOutput,
|
|
1313
|
+
cache: { get: 0, set: 0 }
|
|
1314
|
+
}
|
|
1315
|
+
};
|
|
1316
|
+
const { cash } = (0, import_calcBrainOutputCost2.calcBrainOutputCost)({
|
|
1317
|
+
for: { tokens: size.tokens },
|
|
1318
|
+
with: { cost: { cash: input.spec.cost.cash } }
|
|
1319
|
+
});
|
|
1320
|
+
const metrics = new import_rhachet2.BrainOutputMetrics({
|
|
1321
|
+
size,
|
|
1322
|
+
cost: {
|
|
1323
|
+
time: { milliseconds: elapsedMs },
|
|
1324
|
+
cash
|
|
1325
|
+
}
|
|
1326
|
+
});
|
|
1327
|
+
return new import_rhachet2.BrainOutput({ output, metrics });
|
|
526
1328
|
};
|
|
527
1329
|
var genBrainRepl = (input) => {
|
|
528
|
-
const config =
|
|
529
|
-
const modelSlug = input.slug.replace(/^openai\//, "");
|
|
1330
|
+
const config = CONFIG_BY_REPL_SLUG[input.slug];
|
|
530
1331
|
return new import_rhachet2.BrainRepl({
|
|
531
1332
|
repo: "openai",
|
|
532
|
-
slug:
|
|
1333
|
+
slug: input.slug,
|
|
533
1334
|
description: config.description,
|
|
1335
|
+
spec: config.spec,
|
|
534
1336
|
/**
|
|
535
1337
|
* .what = readonly analysis (research, queries, code review)
|
|
536
|
-
* .why = provides safe, non-
|
|
1338
|
+
* .why = provides safe, non-mutate agent interactions via read-only sandbox
|
|
537
1339
|
*/
|
|
538
|
-
ask: async (askInput, _context) => invokeCodex({
|
|
1340
|
+
ask: async (askInput, _context) => invokeCodex({
|
|
1341
|
+
mode: "ask",
|
|
1342
|
+
model: config.model,
|
|
1343
|
+
spec: config.spec,
|
|
1344
|
+
...askInput
|
|
1345
|
+
}),
|
|
539
1346
|
/**
|
|
540
1347
|
* .what = read+write actions (code changes, file edits)
|
|
541
1348
|
* .why = provides full agentic capabilities via workspace-write sandbox
|
|
542
1349
|
*/
|
|
543
|
-
act: async (actInput, _context) => invokeCodex({
|
|
1350
|
+
act: async (actInput, _context) => invokeCodex({
|
|
1351
|
+
mode: "act",
|
|
1352
|
+
model: config.model,
|
|
1353
|
+
spec: config.spec,
|
|
1354
|
+
...actInput
|
|
1355
|
+
})
|
|
544
1356
|
});
|
|
545
1357
|
};
|
|
546
1358
|
|
|
547
1359
|
// src/contract/sdk/index.ts
|
|
548
1360
|
var getBrainAtomsByOpenAI = () => {
|
|
549
|
-
return [genBrainAtom({ slug: "openai/gpt
|
|
1361
|
+
return [genBrainAtom({ slug: "openai/gpt/4o" })];
|
|
550
1362
|
};
|
|
551
1363
|
var getBrainReplsByOpenAI = () => {
|
|
552
1364
|
return [genBrainRepl({ slug: "openai/codex" })];
|
package/package.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"name": "rhachet-brains-openai",
|
|
3
3
|
"author": "ehmpathy",
|
|
4
4
|
"description": "rhachet brain.atom and brain.repl adapter for openai",
|
|
5
|
-
"version": "0.
|
|
5
|
+
"version": "0.2.0",
|
|
6
6
|
"repository": "ehmpathy/rhachet-brains-openai",
|
|
7
7
|
"homepage": "https://github.com/ehmpathy/rhachet-brains-openai",
|
|
8
8
|
"keywords": [
|
|
@@ -60,6 +60,7 @@
|
|
|
60
60
|
"dependencies": {
|
|
61
61
|
"domain-objects": "0.31.9",
|
|
62
62
|
"helpful-errors": "1.5.3",
|
|
63
|
+
"iso-price": "1.1.1",
|
|
63
64
|
"openai": "5.8.2",
|
|
64
65
|
"rhachet-artifact": "1.0.1",
|
|
65
66
|
"rhachet-artifact-git": "1.1.5",
|
|
@@ -87,12 +88,13 @@
|
|
|
87
88
|
"esbuild": "0.27.2",
|
|
88
89
|
"esbuild-register": "3.6.0",
|
|
89
90
|
"husky": "8.0.3",
|
|
91
|
+
"iso-time": "1.11.1",
|
|
90
92
|
"jest": "30.2.0",
|
|
91
|
-
"rhachet": "1.
|
|
93
|
+
"rhachet": "1.26.0",
|
|
92
94
|
"rhachet-roles-bhrain": "0.5.9",
|
|
93
95
|
"rhachet-roles-bhuild": "0.6.3",
|
|
94
96
|
"rhachet-roles-ehmpathy": "1.17.15",
|
|
95
|
-
"test-fns": "1.
|
|
97
|
+
"test-fns": "1.10.0",
|
|
96
98
|
"tsc-alias": "1.8.10",
|
|
97
99
|
"tsx": "4.20.6",
|
|
98
100
|
"typescript": "5.4.5",
|
package/readme.md
CHANGED
|
@@ -8,6 +8,8 @@ rhachet brain.atom and brain.repl adapter for openai
|
|
|
8
8
|
npm install rhachet-brains-openai
|
|
9
9
|
```
|
|
10
10
|
|
|
11
|
+
note: this package bundles `@openai/codex-sdk` js for seamless cjs (e.g., jest) compatibility. vendor binaries come from the peer dep.
|
|
12
|
+
|
|
11
13
|
## usage
|
|
12
14
|
|
|
13
15
|
```ts
|
|
@@ -15,17 +17,17 @@ import { genBrainAtom, genBrainRepl } from 'rhachet-brains-openai';
|
|
|
15
17
|
import { z } from 'zod';
|
|
16
18
|
|
|
17
19
|
// create a brain atom for direct model inference
|
|
18
|
-
const brainAtom = genBrainAtom({ slug: 'openai/gpt
|
|
20
|
+
const brainAtom = genBrainAtom({ slug: 'openai/gpt/4o-mini' });
|
|
19
21
|
|
|
20
22
|
// simple string output
|
|
21
|
-
const explanation = await brainAtom.ask({
|
|
23
|
+
const { output: explanation } = await brainAtom.ask({
|
|
22
24
|
role: { briefs: [] },
|
|
23
25
|
prompt: 'explain this code',
|
|
24
26
|
schema: { output: z.string() },
|
|
25
27
|
});
|
|
26
28
|
|
|
27
29
|
// structured object output
|
|
28
|
-
const { summary, issues } = await brainAtom.ask({
|
|
30
|
+
const { output: { summary, issues } } = await brainAtom.ask({
|
|
29
31
|
role: { briefs: [] },
|
|
30
32
|
prompt: 'analyze this code',
|
|
31
33
|
schema: { output: z.object({ summary: z.string(), issues: z.array(z.string()) }) },
|
|
@@ -35,14 +37,14 @@ const { summary, issues } = await brainAtom.ask({
|
|
|
35
37
|
const brainRepl = genBrainRepl({ slug: 'openai/codex' });
|
|
36
38
|
|
|
37
39
|
// use ask() for read-only operations
|
|
38
|
-
const { analysis } = await brainRepl.ask({
|
|
40
|
+
const { output: { analysis } } = await brainRepl.ask({
|
|
39
41
|
role: { briefs: [] },
|
|
40
42
|
prompt: 'analyze this codebase',
|
|
41
43
|
schema: { output: z.object({ analysis: z.string() }) },
|
|
42
44
|
});
|
|
43
45
|
|
|
44
46
|
// use act() for read+write operations
|
|
45
|
-
const { proposal } = await brainRepl.act({
|
|
47
|
+
const { output: { proposal } } = await brainRepl.act({
|
|
46
48
|
role: { briefs: [] },
|
|
47
49
|
prompt: 'refactor this module',
|
|
48
50
|
schema: { output: z.object({ proposal: z.string() }) },
|
|
@@ -55,27 +57,51 @@ const { proposal } = await brainRepl.act({
|
|
|
55
57
|
|
|
56
58
|
stateless inference without tool use.
|
|
57
59
|
|
|
58
|
-
| slug
|
|
59
|
-
|
|
|
60
|
-
| `openai/gpt-
|
|
61
|
-
| `openai/gpt-
|
|
62
|
-
| `openai/gpt-
|
|
63
|
-
| `openai/
|
|
64
|
-
| `openai/
|
|
65
|
-
| `openai/
|
|
60
|
+
| slug | model | context | cost (input) | cost (output) | cutoff |
|
|
61
|
+
| ------------------------------ | ------------------- | ------- | ------------ | ------------- | ------- |
|
|
62
|
+
| `openai/gpt/5.2-instant` | gpt-5.2-instant | 400K | $1.75/1M | $14/1M | 2025-08 |
|
|
63
|
+
| `openai/gpt/5.2-pro` | gpt-5.2-pro | 400K | $21/1M | $168/1M | 2025-08 |
|
|
64
|
+
| `openai/gpt/5.2-thoughtful` | gpt-5.2 | 400K | $1.75/1M | $14/1M | 2025-08 |
|
|
65
|
+
| `openai/gpt/codex/5.2` | gpt-5.2-codex | 400K | $10/1M | $40/1M | 2025-08 |
|
|
66
|
+
| `openai/gpt/5` | gpt-5 | 400K | $1.25/1M | $10/1M | 2024-09 |
|
|
67
|
+
| `openai/gpt/5-pro` | gpt-5-pro | 272K | $15/1M | $120/1M | 2024-09 |
|
|
68
|
+
| `openai/gpt/5-thoughtful` | gpt-5-thinking | 400K | $1.25/1M | $10/1M | 2024-09 |
|
|
69
|
+
| `openai/gpt/5-thoughtful-mini` | gpt-5-thinking-mini | 400K | $0.25/1M | $2/1M | 2024-09 |
|
|
70
|
+
| `openai/gpt/5.1-instant` | gpt-5.1-chat-latest | 400K | $1.25/1M | $10/1M | 2024-09 |
|
|
71
|
+
| `openai/gpt/5.1-thoughtful` | gpt-5.1 | 400K | $1.25/1M | $10/1M | 2024-09 |
|
|
72
|
+
| `openai/gpt/codex/5.1-max` | gpt-5.1-codex-max | 400K | $7.50/1M | $30/1M | 2024-09 |
|
|
73
|
+
| `openai/gpt/codex/5.1-mini` | gpt-5.1-codex-mini | 400K | $3/1M | $12/1M | 2024-09 |
|
|
74
|
+
| `openai/gpt/4.1` | gpt-4.1 | 1M | $2/1M | $8/1M | 2024-06 |
|
|
75
|
+
| `openai/gpt/4.1-mini` | gpt-4.1-mini | 128K | $0.40/1M | $1.60/1M | 2024-06 |
|
|
76
|
+
| `openai/gpt/4.1-nano` | gpt-4.1-nano | 1M | $0.10/1M | $0.40/1M | 2024-06 |
|
|
77
|
+
| `openai/o/3` | o3 | 200K | $0.40/1M | $1.60/1M | 2024-06 |
|
|
78
|
+
| `openai/o/3-mini` | o3-mini | 200K | $1.10/1M | $4.40/1M | 2024-06 |
|
|
79
|
+
| `openai/o/3-pro` | o3-pro | 200K | $20/1M | $80/1M | 2024-06 |
|
|
80
|
+
| `openai/o/4-mini` | o4-mini | 200K | $1.10/1M | $4.40/1M | 2024-06 |
|
|
81
|
+
| `openai/gpt/5-mini` | gpt-5-mini | 400K | $0.25/1M | $2/1M | 2024-05 |
|
|
82
|
+
| `openai/gpt/4-turbo` | gpt-4-turbo | 128K | $10/1M | $30/1M | 2023-12 |
|
|
83
|
+
| `openai/gpt/4o` | gpt-4o | 128K | $2.50/1M | $10/1M | 2023-10 |
|
|
84
|
+
| `openai/gpt/4o-mini` | gpt-4o-mini | 128K | $0.15/1M | $0.60/1M | 2023-10 |
|
|
85
|
+
| `openai/o/1` | o1 | 200K | $15/1M | $60/1M | 2023-10 |
|
|
86
|
+
| `openai/o/1-mini` | o1-mini | 128K | $3/1M | $12/1M | 2023-10 |
|
|
87
|
+
| `openai/o/1-preview` | o1-preview | 128K | $15/1M | $60/1M | 2023-10 |
|
|
66
88
|
|
|
67
89
|
### repls (via genBrainRepl)
|
|
68
90
|
|
|
69
|
-
agentic
|
|
91
|
+
agentic code assistant with tool use via codex-sdk.
|
|
70
92
|
|
|
71
|
-
| slug
|
|
72
|
-
|
|
|
73
|
-
| `openai/codex`
|
|
74
|
-
| `openai/codex/
|
|
75
|
-
| `openai/codex/
|
|
76
|
-
| `openai/codex/5.
|
|
93
|
+
| slug | model | context | cost (input) | cost (output) | cutoff |
|
|
94
|
+
| ------------------------ | ------------------ | ------- | ------------ | ------------- | ------- |
|
|
95
|
+
| `openai/codex` | gpt-5.1-codex-max | 400K | $7.50/1M | $30/1M | 2024-09 |
|
|
96
|
+
| `openai/codex/mini` | gpt-5.1-codex-mini | 400K | $3/1M | $12/1M | 2024-09 |
|
|
97
|
+
| `openai/codex/max` | gpt-5.1-codex-max | 400K | $7.50/1M | $30/1M | 2024-09 |
|
|
98
|
+
| `openai/codex/5.1` | gpt-5.1-codex-max | 400K | $7.50/1M | $30/1M | 2024-09 |
|
|
99
|
+
| `openai/codex/5.2` | gpt-5.2-codex | 400K | $10/1M | $40/1M | 2025-08 |
|
|
100
|
+
| `openai/codex/mini/5.1` | gpt-5.1-codex-mini | 400K | $3/1M | $12/1M | 2024-09 |
|
|
101
|
+
| `openai/codex/max/5.1` | gpt-5.1-codex-max | 400K | $7.50/1M | $30/1M | 2024-09 |
|
|
77
102
|
|
|
78
103
|
## sources
|
|
79
104
|
|
|
80
|
-
- [
|
|
81
|
-
- [
|
|
105
|
+
- [openai api costs](https://openai.com/api/pricing/)
|
|
106
|
+
- [codex models documentation](https://developers.openai.com/codex/models/)
|
|
107
|
+
- [codex sdk documentation](https://developers.openai.com/codex/sdk/)
|