@promptbook/openai 0.66.0-0 → 0.66.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/esm/index.es.js +604 -536
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +6 -2
  7. package/esm/typings/src/config.d.ts +15 -0
  8. package/esm/typings/src/execution/AvailableModel.d.ts +20 -0
  9. package/esm/typings/src/execution/LlmExecutionTools.d.ts +12 -24
  10. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +10 -0
  11. package/esm/typings/src/llm-providers/_common/$llmToolsConfigurationBoilerplatesRegister.d.ts +12 -0
  12. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +10 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +2 -10
  14. package/esm/typings/src/llm-providers/_common/config.d.ts +5 -6
  15. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
  19. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +7 -2
  20. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +8 -0
  21. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +11 -0
  22. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
  23. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  24. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  28. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts} +6 -2
  29. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.test.d.ts +4 -0
  30. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +15 -0
  31. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +8 -0
  33. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +11 -0
  34. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  35. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +5 -2
  36. package/esm/typings/src/utils/Register.d.ts +22 -0
  37. package/esm/typings/src/utils/environment/getGlobalScope.d.ts +3 -0
  38. package/package.json +2 -2
  39. package/umd/index.umd.js +605 -535
  40. package/umd/index.umd.js.map +1 -1
  41. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +0 -1
package/umd/index.umd.js CHANGED
@@ -14,7 +14,7 @@
14
14
  /**
15
15
  * The version of the Promptbook library
16
16
  */
17
- var PROMPTBOOK_VERSION = '0.65.0';
17
+ var PROMPTBOOK_VERSION = '0.66.0-3';
18
18
  // TODO: !!!! List here all the versions and annotate + put into script
19
19
 
20
20
  /*! *****************************************************************************
@@ -136,368 +136,6 @@
136
136
  return to.concat(ar || Array.prototype.slice.call(from));
137
137
  }
138
138
 
139
- /**
140
- * Function computeUsage will create price per one token based on the string value found on openai page
141
- *
142
- * @private within the repository, used only as internal helper for `OPENAI_MODELS`
143
- */
144
- function computeUsage(value) {
145
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
146
- return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
147
- }
148
-
149
- /**
150
- * List of available OpenAI models with pricing
151
- *
152
- * Note: Done at 2024-05-20
153
- *
154
- * @see https://platform.openai.com/docs/models/
155
- * @see https://openai.com/api/pricing/
156
- * @public exported from `@promptbook/openai`
157
- */
158
- var OPENAI_MODELS = [
159
- /*/
160
- {
161
- modelTitle: 'dall-e-3',
162
- modelName: 'dall-e-3',
163
- },
164
- /**/
165
- /*/
166
- {
167
- modelTitle: 'whisper-1',
168
- modelName: 'whisper-1',
169
- },
170
- /**/
171
- /**/
172
- {
173
- modelVariant: 'COMPLETION',
174
- modelTitle: 'davinci-002',
175
- modelName: 'davinci-002',
176
- pricing: {
177
- prompt: computeUsage("$2.00 / 1M tokens"),
178
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
179
- },
180
- },
181
- /**/
182
- /*/
183
- {
184
- modelTitle: 'dall-e-2',
185
- modelName: 'dall-e-2',
186
- },
187
- /**/
188
- /**/
189
- {
190
- modelVariant: 'CHAT',
191
- modelTitle: 'gpt-3.5-turbo-16k',
192
- modelName: 'gpt-3.5-turbo-16k',
193
- pricing: {
194
- prompt: computeUsage("$3.00 / 1M tokens"),
195
- output: computeUsage("$4.00 / 1M tokens"),
196
- },
197
- },
198
- /**/
199
- /*/
200
- {
201
- modelTitle: 'tts-1-hd-1106',
202
- modelName: 'tts-1-hd-1106',
203
- },
204
- /**/
205
- /*/
206
- {
207
- modelTitle: 'tts-1-hd',
208
- modelName: 'tts-1-hd',
209
- },
210
- /**/
211
- /**/
212
- {
213
- modelVariant: 'CHAT',
214
- modelTitle: 'gpt-4',
215
- modelName: 'gpt-4',
216
- pricing: {
217
- prompt: computeUsage("$30.00 / 1M tokens"),
218
- output: computeUsage("$60.00 / 1M tokens"),
219
- },
220
- },
221
- /**/
222
- /**/
223
- {
224
- modelVariant: 'CHAT',
225
- modelTitle: 'gpt-4-32k',
226
- modelName: 'gpt-4-32k',
227
- pricing: {
228
- prompt: computeUsage("$60.00 / 1M tokens"),
229
- output: computeUsage("$120.00 / 1M tokens"),
230
- },
231
- },
232
- /**/
233
- /*/
234
- {
235
- modelVariant: 'CHAT',
236
- modelTitle: 'gpt-4-0613',
237
- modelName: 'gpt-4-0613',
238
- pricing: {
239
- prompt: computeUsage(` / 1M tokens`),
240
- output: computeUsage(` / 1M tokens`),
241
- },
242
- },
243
- /**/
244
- /**/
245
- {
246
- modelVariant: 'CHAT',
247
- modelTitle: 'gpt-4-turbo-2024-04-09',
248
- modelName: 'gpt-4-turbo-2024-04-09',
249
- pricing: {
250
- prompt: computeUsage("$10.00 / 1M tokens"),
251
- output: computeUsage("$30.00 / 1M tokens"),
252
- },
253
- },
254
- /**/
255
- /**/
256
- {
257
- modelVariant: 'CHAT',
258
- modelTitle: 'gpt-3.5-turbo-1106',
259
- modelName: 'gpt-3.5-turbo-1106',
260
- pricing: {
261
- prompt: computeUsage("$1.00 / 1M tokens"),
262
- output: computeUsage("$2.00 / 1M tokens"),
263
- },
264
- },
265
- /**/
266
- /**/
267
- {
268
- modelVariant: 'CHAT',
269
- modelTitle: 'gpt-4-turbo',
270
- modelName: 'gpt-4-turbo',
271
- pricing: {
272
- prompt: computeUsage("$10.00 / 1M tokens"),
273
- output: computeUsage("$30.00 / 1M tokens"),
274
- },
275
- },
276
- /**/
277
- /**/
278
- {
279
- modelVariant: 'COMPLETION',
280
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
281
- modelName: 'gpt-3.5-turbo-instruct-0914',
282
- pricing: {
283
- prompt: computeUsage("$1.50 / 1M tokens"),
284
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
285
- },
286
- },
287
- /**/
288
- /**/
289
- {
290
- modelVariant: 'COMPLETION',
291
- modelTitle: 'gpt-3.5-turbo-instruct',
292
- modelName: 'gpt-3.5-turbo-instruct',
293
- pricing: {
294
- prompt: computeUsage("$1.50 / 1M tokens"),
295
- output: computeUsage("$2.00 / 1M tokens"),
296
- },
297
- },
298
- /**/
299
- /*/
300
- {
301
- modelTitle: 'tts-1',
302
- modelName: 'tts-1',
303
- },
304
- /**/
305
- /**/
306
- {
307
- modelVariant: 'CHAT',
308
- modelTitle: 'gpt-3.5-turbo',
309
- modelName: 'gpt-3.5-turbo',
310
- pricing: {
311
- prompt: computeUsage("$3.00 / 1M tokens"),
312
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
313
- },
314
- },
315
- /**/
316
- /**/
317
- {
318
- modelVariant: 'CHAT',
319
- modelTitle: 'gpt-3.5-turbo-0301',
320
- modelName: 'gpt-3.5-turbo-0301',
321
- pricing: {
322
- prompt: computeUsage("$1.50 / 1M tokens"),
323
- output: computeUsage("$2.00 / 1M tokens"),
324
- },
325
- },
326
- /**/
327
- /**/
328
- {
329
- modelVariant: 'COMPLETION',
330
- modelTitle: 'babbage-002',
331
- modelName: 'babbage-002',
332
- pricing: {
333
- prompt: computeUsage("$0.40 / 1M tokens"),
334
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
335
- },
336
- },
337
- /**/
338
- /**/
339
- {
340
- modelVariant: 'CHAT',
341
- modelTitle: 'gpt-4-1106-preview',
342
- modelName: 'gpt-4-1106-preview',
343
- pricing: {
344
- prompt: computeUsage("$10.00 / 1M tokens"),
345
- output: computeUsage("$30.00 / 1M tokens"),
346
- },
347
- },
348
- /**/
349
- /**/
350
- {
351
- modelVariant: 'CHAT',
352
- modelTitle: 'gpt-4-0125-preview',
353
- modelName: 'gpt-4-0125-preview',
354
- pricing: {
355
- prompt: computeUsage("$10.00 / 1M tokens"),
356
- output: computeUsage("$30.00 / 1M tokens"),
357
- },
358
- },
359
- /**/
360
- /*/
361
- {
362
- modelTitle: 'tts-1-1106',
363
- modelName: 'tts-1-1106',
364
- },
365
- /**/
366
- /**/
367
- {
368
- modelVariant: 'CHAT',
369
- modelTitle: 'gpt-3.5-turbo-0125',
370
- modelName: 'gpt-3.5-turbo-0125',
371
- pricing: {
372
- prompt: computeUsage("$0.50 / 1M tokens"),
373
- output: computeUsage("$1.50 / 1M tokens"),
374
- },
375
- },
376
- /**/
377
- /**/
378
- {
379
- modelVariant: 'CHAT',
380
- modelTitle: 'gpt-4-turbo-preview',
381
- modelName: 'gpt-4-turbo-preview',
382
- pricing: {
383
- prompt: computeUsage("$10.00 / 1M tokens"),
384
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
385
- },
386
- },
387
- /**/
388
- /**/
389
- {
390
- modelVariant: 'EMBEDDING',
391
- modelTitle: 'text-embedding-3-large',
392
- modelName: 'text-embedding-3-large',
393
- pricing: {
394
- prompt: computeUsage("$0.13 / 1M tokens"),
395
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
396
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
397
- },
398
- },
399
- /**/
400
- /**/
401
- {
402
- modelVariant: 'EMBEDDING',
403
- modelTitle: 'text-embedding-3-small',
404
- modelName: 'text-embedding-3-small',
405
- pricing: {
406
- prompt: computeUsage("$0.02 / 1M tokens"),
407
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
408
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
409
- },
410
- },
411
- /**/
412
- /**/
413
- {
414
- modelVariant: 'CHAT',
415
- modelTitle: 'gpt-3.5-turbo-0613',
416
- modelName: 'gpt-3.5-turbo-0613',
417
- pricing: {
418
- prompt: computeUsage("$1.50 / 1M tokens"),
419
- output: computeUsage("$2.00 / 1M tokens"),
420
- },
421
- },
422
- /**/
423
- /**/
424
- {
425
- modelVariant: 'EMBEDDING',
426
- modelTitle: 'text-embedding-ada-002',
427
- modelName: 'text-embedding-ada-002',
428
- pricing: {
429
- prompt: computeUsage("$0.1 / 1M tokens"),
430
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
431
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
432
- },
433
- },
434
- /**/
435
- /*/
436
- {
437
- modelVariant: 'CHAT',
438
- modelTitle: 'gpt-4-1106-vision-preview',
439
- modelName: 'gpt-4-1106-vision-preview',
440
- },
441
- /**/
442
- /*/
443
- {
444
- modelVariant: 'CHAT',
445
- modelTitle: 'gpt-4-vision-preview',
446
- modelName: 'gpt-4-vision-preview',
447
- pricing: {
448
- prompt: computeUsage(`$10.00 / 1M tokens`),
449
- output: computeUsage(`$30.00 / 1M tokens`),
450
- },
451
- },
452
- /**/
453
- /**/
454
- {
455
- modelVariant: 'CHAT',
456
- modelTitle: 'gpt-4o-2024-05-13',
457
- modelName: 'gpt-4o-2024-05-13',
458
- pricing: {
459
- prompt: computeUsage("$5.00 / 1M tokens"),
460
- output: computeUsage("$15.00 / 1M tokens"),
461
- },
462
- },
463
- /**/
464
- /**/
465
- {
466
- modelVariant: 'CHAT',
467
- modelTitle: 'gpt-4o',
468
- modelName: 'gpt-4o',
469
- pricing: {
470
- prompt: computeUsage("$5.00 / 1M tokens"),
471
- output: computeUsage("$15.00 / 1M tokens"),
472
- },
473
- },
474
- /**/
475
- /**/
476
- {
477
- modelVariant: 'CHAT',
478
- modelTitle: 'gpt-3.5-turbo-16k-0613',
479
- modelName: 'gpt-3.5-turbo-16k-0613',
480
- pricing: {
481
- prompt: computeUsage("$3.00 / 1M tokens"),
482
- output: computeUsage("$4.00 / 1M tokens"),
483
- },
484
- },
485
- /**/
486
- ];
487
- /**
488
- * Note: [🤖] Add models of new variant
489
- * TODO: [🧠] Some mechanism to propagate unsureness
490
- * TODO: [🎰] Some mechanism to auto-update available models
491
- * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
492
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
493
- * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
494
- * @see https://openai.com/api/pricing/
495
- * @see /other/playground/playground.ts
496
- * TODO: [🍓] Make better
497
- * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
498
- * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
499
- */
500
-
501
139
  /**
502
140
  * This error indicates errors during the execution of the pipeline
503
141
  *
@@ -613,6 +251,9 @@
613
251
  * @private within the repository
614
252
  */
615
253
  var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
254
+ /**
255
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
256
+ */
616
257
 
617
258
  /**
618
259
  * This error type indicates that some limit was reached
@@ -862,222 +503,584 @@
862
503
  },
863
504
  { base: 'TZ', letters: '\uA728' },
864
505
  {
865
- base: 'U',
866
- letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
506
+ base: 'U',
507
+ letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
508
+ },
509
+ { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
510
+ { base: 'VY', letters: '\uA760' },
511
+ {
512
+ base: 'W',
513
+ letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
514
+ },
515
+ { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
516
+ {
517
+ base: 'Y',
518
+ letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
519
+ },
520
+ {
521
+ base: 'Z',
522
+ letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
523
+ },
524
+ {
525
+ base: 'a',
526
+ letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
527
+ },
528
+ { base: 'aa', letters: '\uA733' },
529
+ { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
530
+ { base: 'ao', letters: '\uA735' },
531
+ { base: 'au', letters: '\uA737' },
532
+ { base: 'av', letters: '\uA739\uA73B' },
533
+ { base: 'ay', letters: '\uA73D' },
534
+ {
535
+ base: 'b',
536
+ letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
537
+ },
538
+ {
539
+ base: 'c',
540
+ letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
541
+ },
542
+ {
543
+ base: 'd',
544
+ letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
545
+ },
546
+ { base: 'dz', letters: '\u01F3\u01C6' },
547
+ {
548
+ base: 'e',
549
+ letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
550
+ },
551
+ { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
552
+ {
553
+ base: 'g',
554
+ letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
555
+ },
556
+ {
557
+ base: 'h',
558
+ letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
559
+ },
560
+ { base: 'hv', letters: '\u0195' },
561
+ {
562
+ base: 'i',
563
+ letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
564
+ },
565
+ { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
566
+ {
567
+ base: 'k',
568
+ letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
569
+ },
570
+ {
571
+ base: 'l',
572
+ letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
573
+ },
574
+ { base: 'lj', letters: '\u01C9' },
575
+ { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
576
+ {
577
+ base: 'n',
578
+ letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
579
+ },
580
+ { base: 'nj', letters: '\u01CC' },
581
+ {
582
+ base: 'o',
583
+ letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
584
+ },
585
+ { base: 'oi', letters: '\u01A3' },
586
+ { base: 'ou', letters: '\u0223' },
587
+ { base: 'oo', letters: '\uA74F' },
588
+ {
589
+ base: 'p',
590
+ letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
591
+ },
592
+ { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
593
+ {
594
+ base: 'r',
595
+ letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
596
+ },
597
+ {
598
+ base: 's',
599
+ letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
600
+ },
601
+ {
602
+ base: 't',
603
+ letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
604
+ },
605
+ { base: 'tz', letters: '\uA729' },
606
+ {
607
+ base: 'u',
608
+ letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
609
+ },
610
+ { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
611
+ { base: 'vy', letters: '\uA761' },
612
+ {
613
+ base: 'w',
614
+ letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
615
+ },
616
+ { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
617
+ {
618
+ base: 'y',
619
+ letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
620
+ },
621
+ {
622
+ base: 'z',
623
+ letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
624
+ },
625
+ ];
626
+ /**
627
+ * Map of letters from diacritic variant to diacritless variant
628
+ * Contains lowercase and uppercase separatelly
629
+ *
630
+ * > "á" => "a"
631
+ * > "ě" => "e"
632
+ * > "Ă" => "A"
633
+ * > ...
634
+ *
635
+ * @public exported from `@promptbook/utils`
636
+ */
637
+ var DIACRITIC_VARIANTS_LETTERS = {};
638
+ // tslint:disable-next-line: prefer-for-of
639
+ for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
640
+ var letters = defaultDiacriticsRemovalMap[i].letters;
641
+ // tslint:disable-next-line: prefer-for-of
642
+ for (var j = 0; j < letters.length; j++) {
643
+ DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
644
+ }
645
+ }
646
+ // <- TODO: [🍓] Put to maker function to save execution time if not needed
647
+ /*
648
+ @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
649
+ Licensed under the Apache License, Version 2.0 (the "License");
650
+ you may not use this file except in compliance with the License.
651
+ You may obtain a copy of the License at
652
+
653
+ http://www.apache.org/licenses/LICENSE-2.0
654
+
655
+ Unless required by applicable law or agreed to in writing, software
656
+ distributed under the License is distributed on an "AS IS" BASIS,
657
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
658
+ See the License for the specific language governing permissions and
659
+ limitations under the License.
660
+ */
661
+
662
+ /**
663
+ * @@@
664
+ *
665
+ * @param input @@@
666
+ * @returns @@@
667
+ * @public exported from `@promptbook/utils`
668
+ */
669
+ function removeDiacritics(input) {
670
+ /*eslint no-control-regex: "off"*/
671
+ return input.replace(/[^\u0000-\u007E]/g, function (a) {
672
+ return DIACRITIC_VARIANTS_LETTERS[a] || a;
673
+ });
674
+ }
675
+ /**
676
+ * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
677
+ */
678
+
679
+ /**
680
+ * Counts number of words in the text
681
+ *
682
+ * @public exported from `@promptbook/utils`
683
+ */
684
+ function countWords(text) {
685
+ text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
686
+ text = removeDiacritics(text);
687
+ return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
688
+ }
689
+
690
+ /**
691
+ * Helper of usage compute
692
+ *
693
+ * @param content the content of prompt or response
694
+ * @returns part of PromptResultUsageCounts
695
+ *
696
+ * @private internal utility of LlmExecutionTools
697
+ */
698
+ function computeUsageCounts(content) {
699
+ return {
700
+ charactersCount: { value: countCharacters(content) },
701
+ wordsCount: { value: countWords(content) },
702
+ sentencesCount: { value: countSentences(content) },
703
+ linesCount: { value: countLines(content) },
704
+ paragraphsCount: { value: countParagraphs(content) },
705
+ pagesCount: { value: countPages(content) },
706
+ };
707
+ }
708
+
709
+ /**
710
+ * Make UncertainNumber
711
+ *
712
+ * @param value
713
+ *
714
+ * @private utility for initializating UncertainNumber
715
+ */
716
+ function uncertainNumber(value) {
717
+ if (value === null || value === undefined || Number.isNaN(value)) {
718
+ return { value: 0, isUncertain: true };
719
+ }
720
+ return { value: value };
721
+ }
722
+
723
+ /**
724
+ * Function computeUsage will create price per one token based on the string value found on openai page
725
+ *
726
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
727
+ */
728
+ function computeUsage(value) {
729
+ var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
730
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
731
+ }
732
+
733
+ /**
734
+ * List of available OpenAI models with pricing
735
+ *
736
+ * Note: Done at 2024-05-20
737
+ *
738
+ * @see https://platform.openai.com/docs/models/
739
+ * @see https://openai.com/api/pricing/
740
+ * @public exported from `@promptbook/openai`
741
+ */
742
+ var OPENAI_MODELS = [
743
+ /*/
744
+ {
745
+ modelTitle: 'dall-e-3',
746
+ modelName: 'dall-e-3',
747
+ },
748
+ /**/
749
+ /*/
750
+ {
751
+ modelTitle: 'whisper-1',
752
+ modelName: 'whisper-1',
753
+ },
754
+ /**/
755
+ /**/
756
+ {
757
+ modelVariant: 'COMPLETION',
758
+ modelTitle: 'davinci-002',
759
+ modelName: 'davinci-002',
760
+ pricing: {
761
+ prompt: computeUsage("$2.00 / 1M tokens"),
762
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
763
+ },
867
764
  },
868
- { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
869
- { base: 'VY', letters: '\uA760' },
765
+ /**/
766
+ /*/
767
+ {
768
+ modelTitle: 'dall-e-2',
769
+ modelName: 'dall-e-2',
770
+ },
771
+ /**/
772
+ /**/
870
773
  {
871
- base: 'W',
872
- letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
774
+ modelVariant: 'CHAT',
775
+ modelTitle: 'gpt-3.5-turbo-16k',
776
+ modelName: 'gpt-3.5-turbo-16k',
777
+ pricing: {
778
+ prompt: computeUsage("$3.00 / 1M tokens"),
779
+ output: computeUsage("$4.00 / 1M tokens"),
780
+ },
873
781
  },
874
- { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
782
+ /**/
783
+ /*/
784
+ {
785
+ modelTitle: 'tts-1-hd-1106',
786
+ modelName: 'tts-1-hd-1106',
787
+ },
788
+ /**/
789
+ /*/
790
+ {
791
+ modelTitle: 'tts-1-hd',
792
+ modelName: 'tts-1-hd',
793
+ },
794
+ /**/
795
+ /**/
875
796
  {
876
- base: 'Y',
877
- letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
797
+ modelVariant: 'CHAT',
798
+ modelTitle: 'gpt-4',
799
+ modelName: 'gpt-4',
800
+ pricing: {
801
+ prompt: computeUsage("$30.00 / 1M tokens"),
802
+ output: computeUsage("$60.00 / 1M tokens"),
803
+ },
878
804
  },
805
+ /**/
806
+ /**/
879
807
  {
880
- base: 'Z',
881
- letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
808
+ modelVariant: 'CHAT',
809
+ modelTitle: 'gpt-4-32k',
810
+ modelName: 'gpt-4-32k',
811
+ pricing: {
812
+ prompt: computeUsage("$60.00 / 1M tokens"),
813
+ output: computeUsage("$120.00 / 1M tokens"),
814
+ },
882
815
  },
816
+ /**/
817
+ /*/
883
818
  {
884
- base: 'a',
885
- letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
819
+ modelVariant: 'CHAT',
820
+ modelTitle: 'gpt-4-0613',
821
+ modelName: 'gpt-4-0613',
822
+ pricing: {
823
+ prompt: computeUsage(` / 1M tokens`),
824
+ output: computeUsage(` / 1M tokens`),
825
+ },
886
826
  },
887
- { base: 'aa', letters: '\uA733' },
888
- { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
889
- { base: 'ao', letters: '\uA735' },
890
- { base: 'au', letters: '\uA737' },
891
- { base: 'av', letters: '\uA739\uA73B' },
892
- { base: 'ay', letters: '\uA73D' },
827
+ /**/
828
+ /**/
893
829
  {
894
- base: 'b',
895
- letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
830
+ modelVariant: 'CHAT',
831
+ modelTitle: 'gpt-4-turbo-2024-04-09',
832
+ modelName: 'gpt-4-turbo-2024-04-09',
833
+ pricing: {
834
+ prompt: computeUsage("$10.00 / 1M tokens"),
835
+ output: computeUsage("$30.00 / 1M tokens"),
836
+ },
896
837
  },
838
+ /**/
839
+ /**/
897
840
  {
898
- base: 'c',
899
- letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
841
+ modelVariant: 'CHAT',
842
+ modelTitle: 'gpt-3.5-turbo-1106',
843
+ modelName: 'gpt-3.5-turbo-1106',
844
+ pricing: {
845
+ prompt: computeUsage("$1.00 / 1M tokens"),
846
+ output: computeUsage("$2.00 / 1M tokens"),
847
+ },
900
848
  },
849
+ /**/
850
+ /**/
901
851
  {
902
- base: 'd',
903
- letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
852
+ modelVariant: 'CHAT',
853
+ modelTitle: 'gpt-4-turbo',
854
+ modelName: 'gpt-4-turbo',
855
+ pricing: {
856
+ prompt: computeUsage("$10.00 / 1M tokens"),
857
+ output: computeUsage("$30.00 / 1M tokens"),
858
+ },
904
859
  },
905
- { base: 'dz', letters: '\u01F3\u01C6' },
860
+ /**/
861
+ /**/
906
862
  {
907
- base: 'e',
908
- letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
863
+ modelVariant: 'COMPLETION',
864
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
865
+ modelName: 'gpt-3.5-turbo-instruct-0914',
866
+ pricing: {
867
+ prompt: computeUsage("$1.50 / 1M tokens"),
868
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
869
+ },
909
870
  },
910
- { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
871
+ /**/
872
+ /**/
911
873
  {
912
- base: 'g',
913
- letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
874
+ modelVariant: 'COMPLETION',
875
+ modelTitle: 'gpt-3.5-turbo-instruct',
876
+ modelName: 'gpt-3.5-turbo-instruct',
877
+ pricing: {
878
+ prompt: computeUsage("$1.50 / 1M tokens"),
879
+ output: computeUsage("$2.00 / 1M tokens"),
880
+ },
914
881
  },
882
+ /**/
883
+ /*/
884
+ {
885
+ modelTitle: 'tts-1',
886
+ modelName: 'tts-1',
887
+ },
888
+ /**/
889
+ /**/
915
890
  {
916
- base: 'h',
917
- letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
891
+ modelVariant: 'CHAT',
892
+ modelTitle: 'gpt-3.5-turbo',
893
+ modelName: 'gpt-3.5-turbo',
894
+ pricing: {
895
+ prompt: computeUsage("$3.00 / 1M tokens"),
896
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
897
+ },
918
898
  },
919
- { base: 'hv', letters: '\u0195' },
899
+ /**/
900
+ /**/
920
901
  {
921
- base: 'i',
922
- letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
902
+ modelVariant: 'CHAT',
903
+ modelTitle: 'gpt-3.5-turbo-0301',
904
+ modelName: 'gpt-3.5-turbo-0301',
905
+ pricing: {
906
+ prompt: computeUsage("$1.50 / 1M tokens"),
907
+ output: computeUsage("$2.00 / 1M tokens"),
908
+ },
923
909
  },
924
- { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
910
+ /**/
911
+ /**/
925
912
  {
926
- base: 'k',
927
- letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
913
+ modelVariant: 'COMPLETION',
914
+ modelTitle: 'babbage-002',
915
+ modelName: 'babbage-002',
916
+ pricing: {
917
+ prompt: computeUsage("$0.40 / 1M tokens"),
918
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
919
+ },
928
920
  },
921
+ /**/
922
+ /**/
929
923
  {
930
- base: 'l',
931
- letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
924
+ modelVariant: 'CHAT',
925
+ modelTitle: 'gpt-4-1106-preview',
926
+ modelName: 'gpt-4-1106-preview',
927
+ pricing: {
928
+ prompt: computeUsage("$10.00 / 1M tokens"),
929
+ output: computeUsage("$30.00 / 1M tokens"),
930
+ },
932
931
  },
933
- { base: 'lj', letters: '\u01C9' },
934
- { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
932
+ /**/
933
+ /**/
935
934
  {
936
- base: 'n',
937
- letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
935
+ modelVariant: 'CHAT',
936
+ modelTitle: 'gpt-4-0125-preview',
937
+ modelName: 'gpt-4-0125-preview',
938
+ pricing: {
939
+ prompt: computeUsage("$10.00 / 1M tokens"),
940
+ output: computeUsage("$30.00 / 1M tokens"),
941
+ },
938
942
  },
939
- { base: 'nj', letters: '\u01CC' },
943
+ /**/
944
+ /*/
945
+ {
946
+ modelTitle: 'tts-1-1106',
947
+ modelName: 'tts-1-1106',
948
+ },
949
+ /**/
950
+ /**/
940
951
  {
941
- base: 'o',
942
- letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
952
+ modelVariant: 'CHAT',
953
+ modelTitle: 'gpt-3.5-turbo-0125',
954
+ modelName: 'gpt-3.5-turbo-0125',
955
+ pricing: {
956
+ prompt: computeUsage("$0.50 / 1M tokens"),
957
+ output: computeUsage("$1.50 / 1M tokens"),
958
+ },
943
959
  },
944
- { base: 'oi', letters: '\u01A3' },
945
- { base: 'ou', letters: '\u0223' },
946
- { base: 'oo', letters: '\uA74F' },
960
+ /**/
961
+ /**/
947
962
  {
948
- base: 'p',
949
- letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
963
+ modelVariant: 'CHAT',
964
+ modelTitle: 'gpt-4-turbo-preview',
965
+ modelName: 'gpt-4-turbo-preview',
966
+ pricing: {
967
+ prompt: computeUsage("$10.00 / 1M tokens"),
968
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
969
+ },
950
970
  },
951
- { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
971
+ /**/
972
+ /**/
952
973
  {
953
- base: 'r',
954
- letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
974
+ modelVariant: 'EMBEDDING',
975
+ modelTitle: 'text-embedding-3-large',
976
+ modelName: 'text-embedding-3-large',
977
+ pricing: {
978
+ prompt: computeUsage("$0.13 / 1M tokens"),
979
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
980
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
981
+ },
955
982
  },
983
+ /**/
984
+ /**/
956
985
  {
957
- base: 's',
958
- letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
986
+ modelVariant: 'EMBEDDING',
987
+ modelTitle: 'text-embedding-3-small',
988
+ modelName: 'text-embedding-3-small',
989
+ pricing: {
990
+ prompt: computeUsage("$0.02 / 1M tokens"),
991
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
992
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
993
+ },
959
994
  },
995
+ /**/
996
+ /**/
960
997
  {
961
- base: 't',
962
- letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
998
+ modelVariant: 'CHAT',
999
+ modelTitle: 'gpt-3.5-turbo-0613',
1000
+ modelName: 'gpt-3.5-turbo-0613',
1001
+ pricing: {
1002
+ prompt: computeUsage("$1.50 / 1M tokens"),
1003
+ output: computeUsage("$2.00 / 1M tokens"),
1004
+ },
963
1005
  },
964
- { base: 'tz', letters: '\uA729' },
1006
+ /**/
1007
+ /**/
965
1008
  {
966
- base: 'u',
967
- letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
1009
+ modelVariant: 'EMBEDDING',
1010
+ modelTitle: 'text-embedding-ada-002',
1011
+ modelName: 'text-embedding-ada-002',
1012
+ pricing: {
1013
+ prompt: computeUsage("$0.1 / 1M tokens"),
1014
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1015
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1016
+ },
968
1017
  },
969
- { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
970
- { base: 'vy', letters: '\uA761' },
1018
+ /**/
1019
+ /*/
1020
+ {
1021
+ modelVariant: 'CHAT',
1022
+ modelTitle: 'gpt-4-1106-vision-preview',
1023
+ modelName: 'gpt-4-1106-vision-preview',
1024
+ },
1025
+ /**/
1026
+ /*/
1027
+ {
1028
+ modelVariant: 'CHAT',
1029
+ modelTitle: 'gpt-4-vision-preview',
1030
+ modelName: 'gpt-4-vision-preview',
1031
+ pricing: {
1032
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1033
+ output: computeUsage(`$30.00 / 1M tokens`),
1034
+ },
1035
+ },
1036
+ /**/
1037
+ /**/
971
1038
  {
972
- base: 'w',
973
- letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
1039
+ modelVariant: 'CHAT',
1040
+ modelTitle: 'gpt-4o-2024-05-13',
1041
+ modelName: 'gpt-4o-2024-05-13',
1042
+ pricing: {
1043
+ prompt: computeUsage("$5.00 / 1M tokens"),
1044
+ output: computeUsage("$15.00 / 1M tokens"),
1045
+ },
974
1046
  },
975
- { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
1047
+ /**/
1048
+ /**/
976
1049
  {
977
- base: 'y',
978
- letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
1050
+ modelVariant: 'CHAT',
1051
+ modelTitle: 'gpt-4o',
1052
+ modelName: 'gpt-4o',
1053
+ pricing: {
1054
+ prompt: computeUsage("$5.00 / 1M tokens"),
1055
+ output: computeUsage("$15.00 / 1M tokens"),
1056
+ },
979
1057
  },
1058
+ /**/
1059
+ /**/
980
1060
  {
981
- base: 'z',
982
- letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
1061
+ modelVariant: 'CHAT',
1062
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
1063
+ modelName: 'gpt-3.5-turbo-16k-0613',
1064
+ pricing: {
1065
+ prompt: computeUsage("$3.00 / 1M tokens"),
1066
+ output: computeUsage("$4.00 / 1M tokens"),
1067
+ },
983
1068
  },
1069
+ /**/
984
1070
  ];
985
1071
  /**
986
- * Map of letters from diacritic variant to diacritless variant
987
- * Contains lowercase and uppercase separatelly
988
- *
989
- * > "á" => "a"
990
- * > "ě" => "e"
991
- * > "Ă" => "A"
992
- * > ...
993
- *
994
- * @public exported from `@promptbook/utils`
995
- */
996
- var DIACRITIC_VARIANTS_LETTERS = {};
997
- // tslint:disable-next-line: prefer-for-of
998
- for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
999
- var letters = defaultDiacriticsRemovalMap[i].letters;
1000
- // tslint:disable-next-line: prefer-for-of
1001
- for (var j = 0; j < letters.length; j++) {
1002
- DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
1003
- }
1004
- }
1005
- // <- TODO: [🍓] Put to maker function to save execution time if not needed
1006
- /*
1007
- @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
1008
- Licensed under the Apache License, Version 2.0 (the "License");
1009
- you may not use this file except in compliance with the License.
1010
- You may obtain a copy of the License at
1011
-
1012
- http://www.apache.org/licenses/LICENSE-2.0
1013
-
1014
- Unless required by applicable law or agreed to in writing, software
1015
- distributed under the License is distributed on an "AS IS" BASIS,
1016
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1017
- See the License for the specific language governing permissions and
1018
- limitations under the License.
1019
- */
1020
-
1021
- /**
1022
- * @@@
1023
- *
1024
- * @param input @@@
1025
- * @returns @@@
1026
- * @public exported from `@promptbook/utils`
1027
- */
1028
- function removeDiacritics(input) {
1029
- /*eslint no-control-regex: "off"*/
1030
- return input.replace(/[^\u0000-\u007E]/g, function (a) {
1031
- return DIACRITIC_VARIANTS_LETTERS[a] || a;
1032
- });
1033
- }
1034
- /**
1035
- * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
1036
- */
1037
-
1038
- /**
1039
- * Counts number of words in the text
1040
- *
1041
- * @public exported from `@promptbook/utils`
1042
- */
1043
- function countWords(text) {
1044
- text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
1045
- text = removeDiacritics(text);
1046
- return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
1047
- }
1048
-
1049
- /**
1050
- * Helper of usage compute
1051
- *
1052
- * @param content the content of prompt or response
1053
- * @returns part of PromptResultUsageCounts
1054
- *
1055
- * @private internal utility of LlmExecutionTools
1056
- */
1057
- function computeUsageCounts(content) {
1058
- return {
1059
- charactersCount: { value: countCharacters(content) },
1060
- wordsCount: { value: countWords(content) },
1061
- sentencesCount: { value: countSentences(content) },
1062
- linesCount: { value: countLines(content) },
1063
- paragraphsCount: { value: countParagraphs(content) },
1064
- pagesCount: { value: countPages(content) },
1065
- };
1066
- }
1067
-
1068
- /**
1069
- * Make UncertainNumber
1070
- *
1071
- * @param value
1072
- *
1073
- * @private utility for initializating UncertainNumber
1072
+ * Note: [🤖] Add models of new variant
1073
+ * TODO: [🧠] Some mechanism to propagate unsureness
1074
+ * TODO: [🎰] Some mechanism to auto-update available models
1075
+ * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1076
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1077
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
1078
+ * @see https://openai.com/api/pricing/
1079
+ * @see /other/playground/playground.ts
1080
+ * TODO: [🍓] Make better
1081
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
1082
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
1074
1083
  */
1075
- function uncertainNumber(value) {
1076
- if (value === null || value === undefined || Number.isNaN(value)) {
1077
- return { value: 0, isUncertain: true };
1078
- }
1079
- return { value: value };
1080
- }
1081
1084
 
1082
1085
  /**
1083
1086
  * Computes the usage of the OpenAI API based on the response from OpenAI
@@ -1088,7 +1091,7 @@
1088
1091
  * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
1089
1092
  * @private internal utility of `OpenAiExecutionTools`
1090
1093
  */
1091
- function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
1094
+ function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
1092
1095
  resultContent, rawResponse) {
1093
1096
  var _a, _b;
1094
1097
  if (rawResponse.usage === undefined) {
@@ -1113,9 +1116,12 @@
1113
1116
  output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
1114
1117
  };
1115
1118
  }
1119
+ /**
1120
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
1121
+ */
1116
1122
 
1117
1123
  /**
1118
- * Execution Tools for calling OpenAI API.
1124
+ * Execution Tools for calling OpenAI API
1119
1125
  *
1120
1126
  * @public exported from `@promptbook/openai`
1121
1127
  */
@@ -1133,6 +1139,7 @@
1133
1139
  delete openAiOptions.isVerbose;
1134
1140
  delete openAiOptions.user;
1135
1141
  this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
1142
+ // <- TODO: !!!!!! Lazy-load client
1136
1143
  }
1137
1144
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
1138
1145
  get: function () {
@@ -1213,7 +1220,7 @@
1213
1220
  resultContent = rawResponse.choices[0].message.content;
1214
1221
  // eslint-disable-next-line prefer-const
1215
1222
  complete = getCurrentIsoDate();
1216
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
1223
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
1217
1224
  if (resultContent === null) {
1218
1225
  throw new PipelineExecutionError('No response message from OpenAI');
1219
1226
  }
@@ -1282,7 +1289,7 @@
1282
1289
  resultContent = rawResponse.choices[0].text;
1283
1290
  // eslint-disable-next-line prefer-const
1284
1291
  complete = getCurrentIsoDate();
1285
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
1292
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
1286
1293
  return [2 /*return*/, {
1287
1294
  content: resultContent,
1288
1295
  modelName: rawResponse.model || modelName,
@@ -1339,7 +1346,7 @@
1339
1346
  resultContent = rawResponse.data[0].embedding;
1340
1347
  // eslint-disable-next-line prefer-const
1341
1348
  complete = getCurrentIsoDate();
1342
- usage = computeOpenaiUsage(content, '', rawResponse);
1349
+ usage = computeOpenAiUsage(content, '', rawResponse);
1343
1350
  return [2 /*return*/, {
1344
1351
  content: resultContent,
1345
1352
  modelName: rawResponse.model || modelName,
@@ -1413,14 +1420,77 @@
1413
1420
  /**
1414
1421
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
1415
1422
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
1416
- * TODO: Maybe make custom OpenaiError
1423
+ * TODO: Maybe make custom OpenAiError
1417
1424
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
1418
1425
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
1419
1426
  */
1420
1427
 
1428
+ /**
1429
+ * Execution Tools for calling OpenAI API
1430
+ *
1431
+ * @public exported from `@promptbook/openai`
1432
+ */
1433
+ var createOpenAiExecutionTools = Object.assign(function (options) {
1434
+ // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
1435
+ return new OpenAiExecutionTools(options);
1436
+ }, {
1437
+ packageName: '@promptbook/openai',
1438
+ className: 'OpenAiExecutionTools',
1439
+ });
1440
+ /**
1441
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
1442
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
1443
+ */
1444
+
1445
+ /**
1446
+ * Register is @@@
1447
+ *
1448
+ * @private internal utility, exported are only signleton instances of this class
1449
+ */
1450
+ var Register = /** @class */ (function () {
1451
+ function Register(storage) {
1452
+ this.storage = storage;
1453
+ }
1454
+ Register.prototype.list = function () {
1455
+ // <- TODO: ReadonlyDeep<Array<TRegistered>>
1456
+ return this.storage;
1457
+ };
1458
+ Register.prototype.register = function (registered) {
1459
+ // !!!!!! <- TODO: What to return here
1460
+ // TODO: !!!!!! Compare if same is not already registered
1461
+ this.storage.push(registered);
1462
+ };
1463
+ return Register;
1464
+ }());
1465
+
1466
+ /**
1467
+ * @@@
1468
+ *
1469
+ * Note: `$` is used to indicate that this interacts with the global scope
1470
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
1471
+ * @public exported from `@promptbook/core`
1472
+ */
1473
+ var $llmToolsRegister = new Register([
1474
+ // TODO: !!!!!! Take from global scope
1475
+ ]);
1476
+
1477
+ /**
1478
+ * @@@ registration2
1479
+ *
1480
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
1481
+ *
1482
+ * @public exported from `@promptbook/openai`
1483
+ */
1484
+ var _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
1485
+ /**
1486
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
1487
+ */
1488
+
1421
1489
  exports.OPENAI_MODELS = OPENAI_MODELS;
1422
1490
  exports.OpenAiExecutionTools = OpenAiExecutionTools;
1423
1491
  exports.PROMPTBOOK_VERSION = PROMPTBOOK_VERSION;
1492
+ exports._OpenAiRegistration = _OpenAiRegistration;
1493
+ exports.createOpenAiExecutionTools = createOpenAiExecutionTools;
1424
1494
 
1425
1495
  Object.defineProperty(exports, '__esModule', { value: true });
1426
1496