@promptbook/openai 0.50.0 → 0.52.0-0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -2
- package/esm/index.es.js +360 -247
- package/esm/index.es.js.map +1 -1
- package/esm/typings/_packages/execute-javascript.index.d.ts +44 -1
- package/esm/typings/_packages/openai.index.d.ts +2 -1
- package/esm/typings/_packages/types.index.d.ts +3 -2
- package/esm/typings/_packages/utils.index.d.ts +9 -2
- package/esm/typings/conversion/utils/extractParametersFromPromptTemplate.d.ts +13 -0
- package/esm/typings/conversion/utils/extractParametersFromPromptTemplate.test.d.ts +1 -0
- package/esm/typings/conversion/utils/extractVariables.d.ts +4 -3
- package/esm/typings/execution/plugins/llm-execution-tools/openai/computeOpenaiUsage.d.ts +0 -3
- package/esm/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +13 -0
- package/esm/typings/execution/plugins/llm-execution-tools/openai/computeUsage.test.d.ts +1 -0
- package/esm/typings/execution/plugins/llm-execution-tools/openai/models.d.ts +25 -0
- package/esm/typings/types/ModelRequirements.d.ts +1 -0
- package/esm/typings/types/PromptbookJson/PromptTemplateJson.d.ts +2 -1
- package/esm/typings/utils/extractParameters.d.ts +1 -3
- package/esm/typings/utils/sets/difference.d.ts +4 -0
- package/esm/typings/utils/sets/difference.test.d.ts +1 -0
- package/esm/typings/utils/sets/intersection.d.ts +4 -0
- package/esm/typings/utils/sets/intersection.test.d.ts +1 -0
- package/esm/typings/utils/sets/union.d.ts +4 -0
- package/esm/typings/utils/sets/union.test.d.ts +1 -0
- package/package.json +2 -2
- package/umd/index.umd.js +360 -246
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/_packages/execute-javascript.index.d.ts +44 -1
- package/umd/typings/_packages/openai.index.d.ts +2 -1
- package/umd/typings/_packages/types.index.d.ts +3 -2
- package/umd/typings/_packages/utils.index.d.ts +9 -2
- package/umd/typings/conversion/utils/extractParametersFromPromptTemplate.d.ts +13 -0
- package/umd/typings/conversion/utils/extractParametersFromPromptTemplate.test.d.ts +1 -0
- package/umd/typings/conversion/utils/extractVariables.d.ts +4 -3
- package/umd/typings/execution/plugins/llm-execution-tools/openai/computeOpenaiUsage.d.ts +0 -3
- package/umd/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +13 -0
- package/umd/typings/execution/plugins/llm-execution-tools/openai/computeUsage.test.d.ts +1 -0
- package/umd/typings/execution/plugins/llm-execution-tools/openai/models.d.ts +25 -0
- package/umd/typings/types/ModelRequirements.d.ts +1 -0
- package/umd/typings/types/PromptbookJson/PromptTemplateJson.d.ts +2 -1
- package/umd/typings/utils/extractParameters.d.ts +1 -3
- package/umd/typings/utils/sets/difference.d.ts +4 -0
- package/umd/typings/utils/sets/difference.test.d.ts +1 -0
- package/umd/typings/utils/sets/intersection.d.ts +4 -0
- package/umd/typings/utils/sets/intersection.test.d.ts +1 -0
- package/umd/typings/utils/sets/union.d.ts +4 -0
- package/umd/typings/utils/sets/union.test.d.ts +1 -0
package/README.md
CHANGED
|
@@ -332,6 +332,8 @@ Or you can install them separately:
|
|
|
332
332
|
- _(Not finished)_ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizard for creating+running promptbooks in single line
|
|
333
333
|
- **[@promptbook/execute-javascript](https://www.npmjs.com/package/@promptbook/execute-javascript)** - Execution tools for javascript inside promptbooks
|
|
334
334
|
- **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
|
|
335
|
+
- **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
|
|
336
|
+
- **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API
|
|
335
337
|
- **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK
|
|
336
338
|
- **[@promptbook/mock](https://www.npmjs.com/package/@promptbook/mock)** - Mocked execution tools for testing the library and saving the tokens
|
|
337
339
|
- **[@promptbook/remote-client](https://www.npmjs.com/package/@promptbook/remote-client)** - Remote client for remote execution of promptbooks
|
|
@@ -339,6 +341,8 @@ Or you can install them separately:
|
|
|
339
341
|
- **[@promptbook/types](https://www.npmjs.com/package/@promptbook/types)** - Just typescript types used in the library
|
|
340
342
|
- **[@promptbook/cli](https://www.npmjs.com/package/@promptbook/cli)** - Command line interface utilities for promptbooks
|
|
341
343
|
|
|
344
|
+
|
|
345
|
+
|
|
342
346
|
## 📚 Dictionary
|
|
343
347
|
|
|
344
348
|
The following glossary is used to clarify certain basic concepts:
|
|
@@ -481,8 +485,8 @@ Internally it calls OpenAI, Azure, GPU, proxy, cache, logging,...
|
|
|
481
485
|
`LlmExecutionTools` an abstract interface that is implemented by concrete execution tools:
|
|
482
486
|
|
|
483
487
|
- `OpenAiExecutionTools`
|
|
484
|
-
- _(Not implemented yet)_ `AnthropicClaudeExecutionTools`
|
|
485
|
-
- _(Not implemented yet)_ `AzureOpenAiExecutionTools`
|
|
488
|
+
- _(Not implemented yet !!!!! )_ `AnthropicClaudeExecutionTools`
|
|
489
|
+
- _(Not implemented yet !!!!! )_ `AzureOpenAiExecutionTools`
|
|
486
490
|
- _(Not implemented yet)_ `BardExecutionTools`
|
|
487
491
|
- _(Not implemented yet)_ `LamaExecutionTools`
|
|
488
492
|
- _(Not implemented yet)_ `GpuExecutionTools`
|
package/esm/index.es.js
CHANGED
|
@@ -79,8 +79,363 @@ function __generator(thisArg, body) {
|
|
|
79
79
|
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
|
|
80
80
|
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
|
|
81
81
|
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
function __read(o, n) {
|
|
85
|
+
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
86
|
+
if (!m) return o;
|
|
87
|
+
var i = m.call(o), r, ar = [], e;
|
|
88
|
+
try {
|
|
89
|
+
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
90
|
+
}
|
|
91
|
+
catch (error) { e = { error: error }; }
|
|
92
|
+
finally {
|
|
93
|
+
try {
|
|
94
|
+
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
95
|
+
}
|
|
96
|
+
finally { if (e) throw e.error; }
|
|
97
|
+
}
|
|
98
|
+
return ar;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Function computeUsage will create price per one token based on the string value found on openai page
|
|
103
|
+
*
|
|
104
|
+
* @private within the library, used only as internal helper for `OPENAI_MODELS`
|
|
105
|
+
*/
|
|
106
|
+
function computeUsage(value) {
|
|
107
|
+
var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
|
|
108
|
+
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
|
|
82
109
|
}
|
|
83
110
|
|
|
111
|
+
/**
|
|
112
|
+
* List of available OpenAI models with pricing
|
|
113
|
+
*
|
|
114
|
+
* Note: Done at 2024-05-20
|
|
115
|
+
*
|
|
116
|
+
* @see https://platform.openai.com/docs/models/
|
|
117
|
+
* @see https://openai.com/api/pricing/
|
|
118
|
+
*/
|
|
119
|
+
var OPENAI_MODELS = [
|
|
120
|
+
/*/
|
|
121
|
+
{
|
|
122
|
+
modelTitle: 'dall-e-3',
|
|
123
|
+
modelName: 'dall-e-3',
|
|
124
|
+
},
|
|
125
|
+
/**/
|
|
126
|
+
/*/
|
|
127
|
+
{
|
|
128
|
+
modelTitle: 'whisper-1',
|
|
129
|
+
modelName: 'whisper-1',
|
|
130
|
+
},
|
|
131
|
+
/**/
|
|
132
|
+
/**/
|
|
133
|
+
{
|
|
134
|
+
modelVariant: 'COMPLETION',
|
|
135
|
+
modelTitle: 'davinci-002',
|
|
136
|
+
modelName: 'davinci-002',
|
|
137
|
+
pricing: {
|
|
138
|
+
prompt: computeUsage("$2.00 / 1M tokens"),
|
|
139
|
+
output: computeUsage("$2.00 / 1M tokens"), // <- not sure
|
|
140
|
+
},
|
|
141
|
+
},
|
|
142
|
+
/**/
|
|
143
|
+
/*/
|
|
144
|
+
{
|
|
145
|
+
modelTitle: 'dall-e-2',
|
|
146
|
+
modelName: 'dall-e-2',
|
|
147
|
+
},
|
|
148
|
+
/**/
|
|
149
|
+
/**/
|
|
150
|
+
{
|
|
151
|
+
modelVariant: 'CHAT',
|
|
152
|
+
modelTitle: 'gpt-3.5-turbo-16k',
|
|
153
|
+
modelName: 'gpt-3.5-turbo-16k',
|
|
154
|
+
pricing: {
|
|
155
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
156
|
+
output: computeUsage("$4.00 / 1M tokens"),
|
|
157
|
+
},
|
|
158
|
+
},
|
|
159
|
+
/**/
|
|
160
|
+
/*/
|
|
161
|
+
{
|
|
162
|
+
modelTitle: 'tts-1-hd-1106',
|
|
163
|
+
modelName: 'tts-1-hd-1106',
|
|
164
|
+
},
|
|
165
|
+
/**/
|
|
166
|
+
/*/
|
|
167
|
+
{
|
|
168
|
+
modelTitle: 'tts-1-hd',
|
|
169
|
+
modelName: 'tts-1-hd',
|
|
170
|
+
},
|
|
171
|
+
/**/
|
|
172
|
+
/**/
|
|
173
|
+
{
|
|
174
|
+
modelVariant: 'CHAT',
|
|
175
|
+
modelTitle: 'gpt-4',
|
|
176
|
+
modelName: 'gpt-4',
|
|
177
|
+
pricing: {
|
|
178
|
+
prompt: computeUsage("$30.00 / 1M tokens"),
|
|
179
|
+
output: computeUsage("$60.00 / 1M tokens"),
|
|
180
|
+
},
|
|
181
|
+
},
|
|
182
|
+
/**/
|
|
183
|
+
/**/
|
|
184
|
+
{
|
|
185
|
+
modelVariant: 'CHAT',
|
|
186
|
+
modelTitle: 'gpt-4-32k',
|
|
187
|
+
modelName: 'gpt-4-32k',
|
|
188
|
+
pricing: {
|
|
189
|
+
prompt: computeUsage("$60.00 / 1M tokens"),
|
|
190
|
+
output: computeUsage("$120.00 / 1M tokens"),
|
|
191
|
+
},
|
|
192
|
+
},
|
|
193
|
+
/**/
|
|
194
|
+
/*/
|
|
195
|
+
{
|
|
196
|
+
modelVariant: 'CHAT',
|
|
197
|
+
modelTitle: 'gpt-4-0613',
|
|
198
|
+
modelName: 'gpt-4-0613',
|
|
199
|
+
pricing: {
|
|
200
|
+
prompt: computeUsage(` / 1M tokens`),
|
|
201
|
+
output: computeUsage(` / 1M tokens`),
|
|
202
|
+
},
|
|
203
|
+
},
|
|
204
|
+
/**/
|
|
205
|
+
/**/
|
|
206
|
+
{
|
|
207
|
+
modelVariant: 'CHAT',
|
|
208
|
+
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
209
|
+
modelName: 'gpt-4-turbo-2024-04-09',
|
|
210
|
+
pricing: {
|
|
211
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
212
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
213
|
+
},
|
|
214
|
+
},
|
|
215
|
+
/**/
|
|
216
|
+
/**/
|
|
217
|
+
{
|
|
218
|
+
modelVariant: 'CHAT',
|
|
219
|
+
modelTitle: 'gpt-3.5-turbo-1106',
|
|
220
|
+
modelName: 'gpt-3.5-turbo-1106',
|
|
221
|
+
pricing: {
|
|
222
|
+
prompt: computeUsage("$1.00 / 1M tokens"),
|
|
223
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
224
|
+
},
|
|
225
|
+
},
|
|
226
|
+
/**/
|
|
227
|
+
/**/
|
|
228
|
+
{
|
|
229
|
+
modelVariant: 'CHAT',
|
|
230
|
+
modelTitle: 'gpt-4-turbo',
|
|
231
|
+
modelName: 'gpt-4-turbo',
|
|
232
|
+
pricing: {
|
|
233
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
234
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
235
|
+
},
|
|
236
|
+
},
|
|
237
|
+
/**/
|
|
238
|
+
/**/
|
|
239
|
+
{
|
|
240
|
+
modelVariant: 'COMPLETION',
|
|
241
|
+
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
242
|
+
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
243
|
+
pricing: {
|
|
244
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
245
|
+
output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
|
|
246
|
+
},
|
|
247
|
+
},
|
|
248
|
+
/**/
|
|
249
|
+
/**/
|
|
250
|
+
{
|
|
251
|
+
modelVariant: 'COMPLETION',
|
|
252
|
+
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
253
|
+
modelName: 'gpt-3.5-turbo-instruct',
|
|
254
|
+
pricing: {
|
|
255
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
256
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
257
|
+
},
|
|
258
|
+
},
|
|
259
|
+
/**/
|
|
260
|
+
/*/
|
|
261
|
+
{
|
|
262
|
+
modelTitle: 'tts-1',
|
|
263
|
+
modelName: 'tts-1',
|
|
264
|
+
},
|
|
265
|
+
/**/
|
|
266
|
+
/**/
|
|
267
|
+
{
|
|
268
|
+
modelVariant: 'CHAT',
|
|
269
|
+
modelTitle: 'gpt-3.5-turbo',
|
|
270
|
+
modelName: 'gpt-3.5-turbo',
|
|
271
|
+
pricing: {
|
|
272
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
273
|
+
output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
|
|
274
|
+
},
|
|
275
|
+
},
|
|
276
|
+
/**/
|
|
277
|
+
/**/
|
|
278
|
+
{
|
|
279
|
+
modelVariant: 'CHAT',
|
|
280
|
+
modelTitle: 'gpt-3.5-turbo-0301',
|
|
281
|
+
modelName: 'gpt-3.5-turbo-0301',
|
|
282
|
+
pricing: {
|
|
283
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
284
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
285
|
+
},
|
|
286
|
+
},
|
|
287
|
+
/**/
|
|
288
|
+
/**/
|
|
289
|
+
{
|
|
290
|
+
modelVariant: 'COMPLETION',
|
|
291
|
+
modelTitle: 'babbage-002',
|
|
292
|
+
modelName: 'babbage-002',
|
|
293
|
+
pricing: {
|
|
294
|
+
prompt: computeUsage("$0.40 / 1M tokens"),
|
|
295
|
+
output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
|
|
296
|
+
},
|
|
297
|
+
},
|
|
298
|
+
/**/
|
|
299
|
+
/**/
|
|
300
|
+
{
|
|
301
|
+
modelVariant: 'CHAT',
|
|
302
|
+
modelTitle: 'gpt-4-1106-preview',
|
|
303
|
+
modelName: 'gpt-4-1106-preview',
|
|
304
|
+
pricing: {
|
|
305
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
306
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
307
|
+
},
|
|
308
|
+
},
|
|
309
|
+
/**/
|
|
310
|
+
/**/
|
|
311
|
+
{
|
|
312
|
+
modelVariant: 'CHAT',
|
|
313
|
+
modelTitle: 'gpt-4-0125-preview',
|
|
314
|
+
modelName: 'gpt-4-0125-preview',
|
|
315
|
+
pricing: {
|
|
316
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
317
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
318
|
+
},
|
|
319
|
+
},
|
|
320
|
+
/**/
|
|
321
|
+
/*/
|
|
322
|
+
{
|
|
323
|
+
modelTitle: 'tts-1-1106',
|
|
324
|
+
modelName: 'tts-1-1106',
|
|
325
|
+
},
|
|
326
|
+
/**/
|
|
327
|
+
/**/
|
|
328
|
+
{
|
|
329
|
+
modelVariant: 'CHAT',
|
|
330
|
+
modelTitle: 'gpt-3.5-turbo-0125',
|
|
331
|
+
modelName: 'gpt-3.5-turbo-0125',
|
|
332
|
+
pricing: {
|
|
333
|
+
prompt: computeUsage("$0.50 / 1M tokens"),
|
|
334
|
+
output: computeUsage("$1.50 / 1M tokens"),
|
|
335
|
+
},
|
|
336
|
+
},
|
|
337
|
+
/**/
|
|
338
|
+
/**/
|
|
339
|
+
{
|
|
340
|
+
modelVariant: 'CHAT',
|
|
341
|
+
modelTitle: 'gpt-4-turbo-preview',
|
|
342
|
+
modelName: 'gpt-4-turbo-preview',
|
|
343
|
+
pricing: {
|
|
344
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
345
|
+
output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
|
|
346
|
+
},
|
|
347
|
+
},
|
|
348
|
+
/**/
|
|
349
|
+
/*/
|
|
350
|
+
{
|
|
351
|
+
modelTitle: 'text-embedding-3-large',
|
|
352
|
+
modelName: 'text-embedding-3-large',
|
|
353
|
+
},
|
|
354
|
+
/**/
|
|
355
|
+
/*/
|
|
356
|
+
{
|
|
357
|
+
modelTitle: 'text-embedding-3-small',
|
|
358
|
+
modelName: 'text-embedding-3-small',
|
|
359
|
+
},
|
|
360
|
+
/**/
|
|
361
|
+
/**/
|
|
362
|
+
{
|
|
363
|
+
modelVariant: 'CHAT',
|
|
364
|
+
modelTitle: 'gpt-3.5-turbo-0613',
|
|
365
|
+
modelName: 'gpt-3.5-turbo-0613',
|
|
366
|
+
pricing: {
|
|
367
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
368
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
369
|
+
},
|
|
370
|
+
},
|
|
371
|
+
/**/
|
|
372
|
+
/*/
|
|
373
|
+
{
|
|
374
|
+
modelTitle: 'text-embedding-ada-002',
|
|
375
|
+
modelName: 'text-embedding-ada-002',
|
|
376
|
+
},
|
|
377
|
+
/**/
|
|
378
|
+
/*/
|
|
379
|
+
{
|
|
380
|
+
modelVariant: 'CHAT',
|
|
381
|
+
modelTitle: 'gpt-4-1106-vision-preview',
|
|
382
|
+
modelName: 'gpt-4-1106-vision-preview',
|
|
383
|
+
},
|
|
384
|
+
/**/
|
|
385
|
+
/*/
|
|
386
|
+
{
|
|
387
|
+
modelTitle: 'gpt-4-vision-preview',
|
|
388
|
+
modelName: 'gpt-4-vision-preview',
|
|
389
|
+
pricing: {
|
|
390
|
+
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
391
|
+
output: computeUsage(`$30.00 / 1M tokens`),
|
|
392
|
+
},
|
|
393
|
+
},
|
|
394
|
+
/**/
|
|
395
|
+
/**/
|
|
396
|
+
{
|
|
397
|
+
modelVariant: 'CHAT',
|
|
398
|
+
modelTitle: 'gpt-4o-2024-05-13',
|
|
399
|
+
modelName: 'gpt-4o-2024-05-13',
|
|
400
|
+
pricing: {
|
|
401
|
+
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
402
|
+
output: computeUsage("$15.00 / 1M tokens"),
|
|
403
|
+
},
|
|
404
|
+
},
|
|
405
|
+
/**/
|
|
406
|
+
/**/
|
|
407
|
+
{
|
|
408
|
+
modelVariant: 'CHAT',
|
|
409
|
+
modelTitle: 'gpt-4o',
|
|
410
|
+
modelName: 'gpt-4o',
|
|
411
|
+
pricing: {
|
|
412
|
+
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
413
|
+
output: computeUsage("$15.00 / 1M tokens"),
|
|
414
|
+
},
|
|
415
|
+
},
|
|
416
|
+
/**/
|
|
417
|
+
/**/
|
|
418
|
+
{
|
|
419
|
+
modelVariant: 'CHAT',
|
|
420
|
+
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
421
|
+
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
422
|
+
pricing: {
|
|
423
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
424
|
+
output: computeUsage("$4.00 / 1M tokens"),
|
|
425
|
+
},
|
|
426
|
+
},
|
|
427
|
+
/**/
|
|
428
|
+
];
|
|
429
|
+
/**
|
|
430
|
+
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
431
|
+
* TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
432
|
+
* @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
|
433
|
+
* @see https://openai.com/api/pricing/
|
|
434
|
+
* @see /other/playground/playground.ts
|
|
435
|
+
* TODO: [🍓] Make better
|
|
436
|
+
* TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
|
|
437
|
+
*/
|
|
438
|
+
|
|
84
439
|
/**
|
|
85
440
|
* This error indicates errors during the execution of the promptbook
|
|
86
441
|
*/
|
|
@@ -122,33 +477,13 @@ function computeOpenaiUsage(rawResponse) {
|
|
|
122
477
|
}
|
|
123
478
|
var inputTokens = rawResponse.usage.prompt_tokens;
|
|
124
479
|
var outputTokens = rawResponse.usage.completion_tokens;
|
|
125
|
-
|
|
126
|
-
var pricePerThousandTokens = {
|
|
127
|
-
'gpt-3.5-turbo-0613': {
|
|
128
|
-
prompt: 0.0015,
|
|
129
|
-
completion: 0.002,
|
|
130
|
-
},
|
|
131
|
-
'gpt-4-0613': {
|
|
132
|
-
// TODO: Not sure if this is correct
|
|
133
|
-
prompt: 0.01,
|
|
134
|
-
completion: 0.03,
|
|
135
|
-
},
|
|
136
|
-
'gpt-3.5-turbo-instruct': {
|
|
137
|
-
prompt: 0.0015,
|
|
138
|
-
completion: 0.002,
|
|
139
|
-
},
|
|
140
|
-
'gpt-4-0125-preview': {
|
|
141
|
-
prompt: 0.01,
|
|
142
|
-
completion: 0.03,
|
|
143
|
-
},
|
|
144
|
-
}[rawResponse.model];
|
|
145
|
-
// TODO: !!! Retrieve dynamically
|
|
480
|
+
var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
|
|
146
481
|
var price;
|
|
147
|
-
if (
|
|
482
|
+
if (modelInfo === undefined || modelInfo.pricing === undefined) {
|
|
148
483
|
price = 'UNKNOWN';
|
|
149
484
|
}
|
|
150
485
|
else {
|
|
151
|
-
price =
|
|
486
|
+
price = inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output;
|
|
152
487
|
}
|
|
153
488
|
return {
|
|
154
489
|
price: price,
|
|
@@ -156,9 +491,6 @@ function computeOpenaiUsage(rawResponse) {
|
|
|
156
491
|
outputTokens: outputTokens,
|
|
157
492
|
};
|
|
158
493
|
}
|
|
159
|
-
/**
|
|
160
|
-
* TODO: [🍓] Make better
|
|
161
|
-
*/
|
|
162
494
|
|
|
163
495
|
/**
|
|
164
496
|
* Execution Tools for calling OpenAI API.
|
|
@@ -339,226 +671,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
339
671
|
console.log({ models });
|
|
340
672
|
console.log(models.data);
|
|
341
673
|
*/
|
|
342
|
-
return
|
|
343
|
-
// Note: Done at 2024-05-15
|
|
344
|
-
// TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
345
|
-
// @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
|
346
|
-
// @see https://openai.com/api/pricing/
|
|
347
|
-
// @see /other/playground/playground.ts
|
|
348
|
-
/*/
|
|
349
|
-
{
|
|
350
|
-
modelTitle: 'dall-e-3',
|
|
351
|
-
modelName: 'dall-e-3',
|
|
352
|
-
},
|
|
353
|
-
/**/
|
|
354
|
-
/*/
|
|
355
|
-
{
|
|
356
|
-
modelTitle: 'whisper-1',
|
|
357
|
-
modelName: 'whisper-1',
|
|
358
|
-
},
|
|
359
|
-
/**/
|
|
360
|
-
/**/
|
|
361
|
-
{
|
|
362
|
-
modelVariant: 'COMPLETION',
|
|
363
|
-
modelTitle: 'davinci-002',
|
|
364
|
-
modelName: 'davinci-002',
|
|
365
|
-
},
|
|
366
|
-
/**/
|
|
367
|
-
/*/
|
|
368
|
-
{
|
|
369
|
-
modelTitle: 'dall-e-2',
|
|
370
|
-
modelName: 'dall-e-2',
|
|
371
|
-
},
|
|
372
|
-
/**/
|
|
373
|
-
/**/
|
|
374
|
-
{
|
|
375
|
-
modelVariant: 'CHAT',
|
|
376
|
-
modelTitle: 'gpt-3.5-turbo-16k',
|
|
377
|
-
modelName: 'gpt-3.5-turbo-16k',
|
|
378
|
-
},
|
|
379
|
-
/**/
|
|
380
|
-
/*/
|
|
381
|
-
{
|
|
382
|
-
modelTitle: 'tts-1-hd-1106',
|
|
383
|
-
modelName: 'tts-1-hd-1106',
|
|
384
|
-
},
|
|
385
|
-
/**/
|
|
386
|
-
/*/
|
|
387
|
-
{
|
|
388
|
-
modelTitle: 'tts-1-hd',
|
|
389
|
-
modelName: 'tts-1-hd',
|
|
390
|
-
},
|
|
391
|
-
/**/
|
|
392
|
-
/**/
|
|
393
|
-
{
|
|
394
|
-
modelVariant: 'CHAT',
|
|
395
|
-
modelTitle: 'gpt-4',
|
|
396
|
-
modelName: 'gpt-4',
|
|
397
|
-
},
|
|
398
|
-
/**/
|
|
399
|
-
/**/
|
|
400
|
-
{
|
|
401
|
-
modelVariant: 'CHAT',
|
|
402
|
-
modelTitle: 'gpt-4-0613',
|
|
403
|
-
modelName: 'gpt-4-0613',
|
|
404
|
-
},
|
|
405
|
-
/**/
|
|
406
|
-
/**/
|
|
407
|
-
{
|
|
408
|
-
modelVariant: 'CHAT',
|
|
409
|
-
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
410
|
-
modelName: 'gpt-4-turbo-2024-04-09',
|
|
411
|
-
},
|
|
412
|
-
/**/
|
|
413
|
-
/**/
|
|
414
|
-
{
|
|
415
|
-
modelVariant: 'CHAT',
|
|
416
|
-
modelTitle: 'gpt-3.5-turbo-1106',
|
|
417
|
-
modelName: 'gpt-3.5-turbo-1106',
|
|
418
|
-
},
|
|
419
|
-
/**/
|
|
420
|
-
/**/
|
|
421
|
-
{
|
|
422
|
-
modelVariant: 'CHAT',
|
|
423
|
-
modelTitle: 'gpt-4-turbo',
|
|
424
|
-
modelName: 'gpt-4-turbo',
|
|
425
|
-
},
|
|
426
|
-
/**/
|
|
427
|
-
/**/
|
|
428
|
-
{
|
|
429
|
-
modelVariant: 'COMPLETION',
|
|
430
|
-
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
431
|
-
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
432
|
-
},
|
|
433
|
-
/**/
|
|
434
|
-
/**/
|
|
435
|
-
{
|
|
436
|
-
modelVariant: 'COMPLETION',
|
|
437
|
-
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
438
|
-
modelName: 'gpt-3.5-turbo-instruct',
|
|
439
|
-
},
|
|
440
|
-
/**/
|
|
441
|
-
/*/
|
|
442
|
-
{
|
|
443
|
-
modelTitle: 'tts-1',
|
|
444
|
-
modelName: 'tts-1',
|
|
445
|
-
},
|
|
446
|
-
/**/
|
|
447
|
-
/**/
|
|
448
|
-
{
|
|
449
|
-
modelVariant: 'CHAT',
|
|
450
|
-
modelTitle: 'gpt-3.5-turbo',
|
|
451
|
-
modelName: 'gpt-3.5-turbo',
|
|
452
|
-
},
|
|
453
|
-
/**/
|
|
454
|
-
/**/
|
|
455
|
-
{
|
|
456
|
-
modelVariant: 'CHAT',
|
|
457
|
-
modelTitle: 'gpt-3.5-turbo-0301',
|
|
458
|
-
modelName: 'gpt-3.5-turbo-0301',
|
|
459
|
-
},
|
|
460
|
-
/**/
|
|
461
|
-
/**/
|
|
462
|
-
{
|
|
463
|
-
modelVariant: 'COMPLETION',
|
|
464
|
-
modelTitle: 'babbage-002',
|
|
465
|
-
modelName: 'babbage-002',
|
|
466
|
-
},
|
|
467
|
-
/**/
|
|
468
|
-
/**/
|
|
469
|
-
{
|
|
470
|
-
modelVariant: 'CHAT',
|
|
471
|
-
modelTitle: 'gpt-4-1106-preview',
|
|
472
|
-
modelName: 'gpt-4-1106-preview',
|
|
473
|
-
},
|
|
474
|
-
/**/
|
|
475
|
-
/**/
|
|
476
|
-
{
|
|
477
|
-
modelVariant: 'CHAT',
|
|
478
|
-
modelTitle: 'gpt-4-0125-preview',
|
|
479
|
-
modelName: 'gpt-4-0125-preview',
|
|
480
|
-
},
|
|
481
|
-
/**/
|
|
482
|
-
/*/
|
|
483
|
-
{
|
|
484
|
-
modelTitle: 'tts-1-1106',
|
|
485
|
-
modelName: 'tts-1-1106',
|
|
486
|
-
},
|
|
487
|
-
/**/
|
|
488
|
-
/**/
|
|
489
|
-
{
|
|
490
|
-
modelVariant: 'CHAT',
|
|
491
|
-
modelTitle: 'gpt-3.5-turbo-0125',
|
|
492
|
-
modelName: 'gpt-3.5-turbo-0125',
|
|
493
|
-
},
|
|
494
|
-
/**/
|
|
495
|
-
/**/
|
|
496
|
-
{
|
|
497
|
-
modelVariant: 'CHAT',
|
|
498
|
-
modelTitle: 'gpt-4-turbo-preview',
|
|
499
|
-
modelName: 'gpt-4-turbo-preview',
|
|
500
|
-
},
|
|
501
|
-
/**/
|
|
502
|
-
/*/
|
|
503
|
-
{
|
|
504
|
-
modelTitle: 'text-embedding-3-large',
|
|
505
|
-
modelName: 'text-embedding-3-large',
|
|
506
|
-
},
|
|
507
|
-
/**/
|
|
508
|
-
/*/
|
|
509
|
-
{
|
|
510
|
-
modelTitle: 'text-embedding-3-small',
|
|
511
|
-
modelName: 'text-embedding-3-small',
|
|
512
|
-
},
|
|
513
|
-
/**/
|
|
514
|
-
/**/
|
|
515
|
-
{
|
|
516
|
-
modelVariant: 'CHAT',
|
|
517
|
-
modelTitle: 'gpt-3.5-turbo-0613',
|
|
518
|
-
modelName: 'gpt-3.5-turbo-0613',
|
|
519
|
-
},
|
|
520
|
-
/**/
|
|
521
|
-
/*/
|
|
522
|
-
{
|
|
523
|
-
modelTitle: 'text-embedding-ada-002',
|
|
524
|
-
modelName: 'text-embedding-ada-002',
|
|
525
|
-
},
|
|
526
|
-
/**/
|
|
527
|
-
/*/
|
|
528
|
-
{
|
|
529
|
-
modelVariant: 'CHAT',
|
|
530
|
-
modelTitle: 'gpt-4-1106-vision-preview',
|
|
531
|
-
modelName: 'gpt-4-1106-vision-preview',
|
|
532
|
-
},
|
|
533
|
-
/**/
|
|
534
|
-
/*/
|
|
535
|
-
{
|
|
536
|
-
modelTitle: 'gpt-4-vision-preview',
|
|
537
|
-
modelName: 'gpt-4-vision-preview',
|
|
538
|
-
},
|
|
539
|
-
/**/
|
|
540
|
-
/**/
|
|
541
|
-
{
|
|
542
|
-
modelVariant: 'CHAT',
|
|
543
|
-
modelTitle: 'gpt-4o-2024-05-13',
|
|
544
|
-
modelName: 'gpt-4o-2024-05-13',
|
|
545
|
-
},
|
|
546
|
-
/**/
|
|
547
|
-
/**/
|
|
548
|
-
{
|
|
549
|
-
modelVariant: 'CHAT',
|
|
550
|
-
modelTitle: 'gpt-4o',
|
|
551
|
-
modelName: 'gpt-4o',
|
|
552
|
-
},
|
|
553
|
-
/**/
|
|
554
|
-
/**/
|
|
555
|
-
{
|
|
556
|
-
modelVariant: 'CHAT',
|
|
557
|
-
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
558
|
-
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
559
|
-
},
|
|
560
|
-
/**/
|
|
561
|
-
];
|
|
674
|
+
return OPENAI_MODELS;
|
|
562
675
|
};
|
|
563
676
|
return OpenAiExecutionTools;
|
|
564
677
|
}());
|
|
@@ -568,5 +681,5 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
568
681
|
* TODO: Maybe make custom OpenaiError
|
|
569
682
|
*/
|
|
570
683
|
|
|
571
|
-
export { OpenAiExecutionTools };
|
|
684
|
+
export { OPENAI_MODELS, OpenAiExecutionTools };
|
|
572
685
|
//# sourceMappingURL=index.es.js.map
|