@promptbook/ollama 0.94.0-1 → 0.94.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/umd/index.umd.js CHANGED
@@ -1,13 +1,14 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('bottleneck'), require('node-fetch'), require('crypto'), require('spacetrim')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'bottleneck', 'node-fetch', 'crypto', 'spacetrim'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-ollama"] = {}, global.Bottleneck, global.fetch, global.crypto, global.spaceTrim));
5
- })(this, (function (exports, Bottleneck, fetch, crypto, spaceTrim) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('bottleneck'), require('colors'), require('openai'), require('spacetrim'), require('crypto')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'bottleneck', 'colors', 'openai', 'spacetrim', 'crypto'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-ollama"] = {}, global.Bottleneck, global.colors, global.OpenAI, global.spaceTrim, global.crypto));
5
+ })(this, (function (exports, Bottleneck, colors, OpenAI, spaceTrim, crypto) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
9
9
  var Bottleneck__default = /*#__PURE__*/_interopDefaultLegacy(Bottleneck);
10
- var fetch__default = /*#__PURE__*/_interopDefaultLegacy(fetch);
10
+ var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
11
+ var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
11
12
  var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
12
13
 
13
14
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
@@ -24,12 +25,52 @@
24
25
  * @generated
25
26
  * @see https://github.com/webgptorg/promptbook
26
27
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-1';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-3';
28
29
  /**
29
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
31
32
  */
32
33
 
34
+ /**
35
+ * Detects if the code is running in a browser environment in main thread (Not in a web worker)
36
+ *
37
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
38
+ *
39
+ * @public exported from `@promptbook/utils`
40
+ */
41
+ const $isRunningInBrowser = new Function(`
42
+ try {
43
+ return this === window;
44
+ } catch (e) {
45
+ return false;
46
+ }
47
+ `);
48
+ /**
49
+ * TODO: [🎺]
50
+ */
51
+
52
+ /**
53
+ * Detects if the code is running in a web worker
54
+ *
55
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
56
+ *
57
+ * @public exported from `@promptbook/utils`
58
+ */
59
+ const $isRunningInWebWorker = new Function(`
60
+ try {
61
+ if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
62
+ return true;
63
+ } else {
64
+ return false;
65
+ }
66
+ } catch (e) {
67
+ return false;
68
+ }
69
+ `);
70
+ /**
71
+ * TODO: [🎺]
72
+ */
73
+
33
74
  /**
34
75
  * Name for the Promptbook
35
76
  *
@@ -50,6 +91,34 @@
50
91
  * @public exported from `@promptbook/core`
51
92
  */
52
93
  const ADMIN_GITHUB_NAME = 'hejny';
94
+ // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
95
+ /**
96
+ * The maximum number of iterations for a loops
97
+ *
98
+ * @private within the repository - too low-level in comparison with other `MAX_...`
99
+ */
100
+ const LOOP_LIMIT = 1000;
101
+ /**
102
+ * Strings to represent various values in the context of parameter values
103
+ *
104
+ * @public exported from `@promptbook/utils`
105
+ */
106
+ const VALUE_STRINGS = {
107
+ empty: '(nothing; empty string)',
108
+ null: '(no value; null)',
109
+ undefined: '(unknown value; undefined)',
110
+ nan: '(not a number; NaN)',
111
+ infinity: '(infinity; ∞)',
112
+ negativeInfinity: '(negative infinity; -∞)',
113
+ unserializable: '(unserializable value)',
114
+ circular: '(circular JSON)',
115
+ };
116
+ /**
117
+ * Small number limit
118
+ *
119
+ * @public exported from `@promptbook/utils`
120
+ */
121
+ const SMALL_NUMBER = 0.001;
53
122
  // <- TODO: [🧜‍♂️]
54
123
  /**
55
124
  * Default settings for parsing and generating CSV files in Promptbook.
@@ -75,40 +144,6 @@
75
144
  * TODO: [🧠][🧜‍♂️] Maybe join remoteServerUrl and path into single value
76
145
  */
77
146
 
78
- /**
79
- * Generates random token
80
- *
81
- * Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
82
- *
83
- * @private internal helper function
84
- * @returns secure random token
85
- */
86
- function $randomToken(randomness) {
87
- return crypto.randomBytes(randomness).toString('hex');
88
- }
89
- /**
90
- * TODO: Maybe use nanoid instead https://github.com/ai/nanoid
91
- */
92
-
93
- /**
94
- * This error indicates errors during the execution of the pipeline
95
- *
96
- * @public exported from `@promptbook/core`
97
- */
98
- class PipelineExecutionError extends Error {
99
- constructor(message) {
100
- // Added id parameter
101
- super(message);
102
- this.name = 'PipelineExecutionError';
103
- // TODO: [🐙] DRY - Maybe $randomId
104
- this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
105
- Object.setPrototypeOf(this, PipelineExecutionError.prototype);
106
- }
107
- }
108
- /**
109
- * TODO: [🧠][🌂] Add id to all errors
110
- */
111
-
112
147
  /**
113
148
  * Make error report URL for the given error
114
149
  *
@@ -177,6 +212,88 @@
177
212
  }
178
213
  }
179
214
 
215
+ /**
216
+ * This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
217
+ *
218
+ * @public exported from `@promptbook/core`
219
+ */
220
+ class WrappedError extends Error {
221
+ constructor(whatWasThrown) {
222
+ const tag = `[🤮]`;
223
+ console.error(tag, whatWasThrown);
224
+ super(spaceTrim.spaceTrim(`
225
+ Non-Error object was thrown
226
+
227
+ Note: Look for ${tag} in the console for more details
228
+ Please report issue on ${ADMIN_EMAIL}
229
+ `));
230
+ this.name = 'WrappedError';
231
+ Object.setPrototypeOf(this, WrappedError.prototype);
232
+ }
233
+ }
234
+
235
+ /**
236
+ * Helper used in catch blocks to assert that the error is an instance of `Error`
237
+ *
238
+ * @param whatWasThrown Any object that was thrown
239
+ * @returns Nothing if the error is an instance of `Error`
240
+ * @throws `WrappedError` or `UnexpectedError` if the error is not standard
241
+ *
242
+ * @private within the repository
243
+ */
244
+ function assertsError(whatWasThrown) {
245
+ // Case 1: Handle error which was rethrown as `WrappedError`
246
+ if (whatWasThrown instanceof WrappedError) {
247
+ const wrappedError = whatWasThrown;
248
+ throw wrappedError;
249
+ }
250
+ // Case 2: Handle unexpected errors
251
+ if (whatWasThrown instanceof UnexpectedError) {
252
+ const unexpectedError = whatWasThrown;
253
+ throw unexpectedError;
254
+ }
255
+ // Case 3: Handle standard errors - keep them up to consumer
256
+ if (whatWasThrown instanceof Error) {
257
+ return;
258
+ }
259
+ // Case 4: Handle non-standard errors - wrap them into `WrappedError` and throw
260
+ throw new WrappedError(whatWasThrown);
261
+ }
262
+
263
+ /**
264
+ * Generates random token
265
+ *
266
+ * Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
267
+ *
268
+ * @private internal helper function
269
+ * @returns secure random token
270
+ */
271
+ function $randomToken(randomness) {
272
+ return crypto.randomBytes(randomness).toString('hex');
273
+ }
274
+ /**
275
+ * TODO: Maybe use nanoid instead https://github.com/ai/nanoid
276
+ */
277
+
278
+ /**
279
+ * This error indicates errors during the execution of the pipeline
280
+ *
281
+ * @public exported from `@promptbook/core`
282
+ */
283
+ class PipelineExecutionError extends Error {
284
+ constructor(message) {
285
+ // Added id parameter
286
+ super(message);
287
+ this.name = 'PipelineExecutionError';
288
+ // TODO: [🐙] DRY - Maybe $randomId
289
+ this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
290
+ Object.setPrototypeOf(this, PipelineExecutionError.prototype);
291
+ }
292
+ }
293
+ /**
294
+ * TODO: [🧠][🌂] Add id to all errors
295
+ */
296
+
180
297
  /**
181
298
  * Simple wrapper `new Date().toISOString()`
182
299
  *
@@ -231,54 +348,6 @@
231
348
  * TODO: [🧠] Is there a way how to meaningfully test this utility
232
349
  */
233
350
 
234
- /**
235
- * This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
236
- *
237
- * @public exported from `@promptbook/core`
238
- */
239
- class WrappedError extends Error {
240
- constructor(whatWasThrown) {
241
- const tag = `[🤮]`;
242
- console.error(tag, whatWasThrown);
243
- super(spaceTrim.spaceTrim(`
244
- Non-Error object was thrown
245
-
246
- Note: Look for ${tag} in the console for more details
247
- Please report issue on ${ADMIN_EMAIL}
248
- `));
249
- this.name = 'WrappedError';
250
- Object.setPrototypeOf(this, WrappedError.prototype);
251
- }
252
- }
253
-
254
- /**
255
- * Helper used in catch blocks to assert that the error is an instance of `Error`
256
- *
257
- * @param whatWasThrown Any object that was thrown
258
- * @returns Nothing if the error is an instance of `Error`
259
- * @throws `WrappedError` or `UnexpectedError` if the error is not standard
260
- *
261
- * @private within the repository
262
- */
263
- function assertsError(whatWasThrown) {
264
- // Case 1: Handle error which was rethrown as `WrappedError`
265
- if (whatWasThrown instanceof WrappedError) {
266
- const wrappedError = whatWasThrown;
267
- throw wrappedError;
268
- }
269
- // Case 2: Handle unexpected errors
270
- if (whatWasThrown instanceof UnexpectedError) {
271
- const unexpectedError = whatWasThrown;
272
- throw unexpectedError;
273
- }
274
- // Case 3: Handle standard errors - keep them up to consumer
275
- if (whatWasThrown instanceof Error) {
276
- return;
277
- }
278
- // Case 4: Handle non-standard errors - wrap them into `WrappedError` and throw
279
- throw new WrappedError(whatWasThrown);
280
- }
281
-
282
351
  /**
283
352
  * Checks if the value is [🚉] serializable as JSON
284
353
  * If not, throws an UnexpectedError with a rich error message and tracking
@@ -479,89 +548,1637 @@
479
548
  */
480
549
 
481
550
  /**
482
- * Execution Tools for calling a local Ollama model via HTTP API
551
+ * Nonce which is used for replacing things in strings
483
552
  *
484
- * @public exported from `@promptbook/ollama`
553
+ * @private within the repository
485
554
  */
486
- class OllamaExecutionTools {
487
- constructor(options) {
488
- this.options = options;
489
- this.limiter = new Bottleneck__default["default"]({
490
- minTime: 60000 / (options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
491
- });
555
+ const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
556
+ /**
557
+ * Nonce which is used as string which is not occurring in normal text
558
+ *
559
+ * @private within the repository
560
+ */
561
+ const SALT_NONCE = 'ptbkghhewbvruets21t54et5';
562
+ /**
563
+ * Placeholder value indicating a parameter is missing its value.
564
+ *
565
+ * @private within the repository
566
+ */
567
+ const RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
568
+ /**
569
+ * Placeholder value indicating a parameter is restricted and cannot be used directly.
570
+ *
571
+ * @private within the repository
572
+ */
573
+ const RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
574
+ /**
575
+ * The names of the parameters that are reserved for special purposes
576
+ *
577
+ * @public exported from `@promptbook/core`
578
+ */
579
+ exportJson({
580
+ name: 'RESERVED_PARAMETER_NAMES',
581
+ message: `The names of the parameters that are reserved for special purposes`,
582
+ value: [
583
+ 'content',
584
+ 'context',
585
+ 'knowledge',
586
+ 'examples',
587
+ 'modelName',
588
+ 'currentDate',
589
+ // <- TODO: list here all command names
590
+ // <- TODO: Add more like 'date', 'modelName',...
591
+ // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
592
+ ],
593
+ });
594
+ /**
595
+ * Note: [💞] Ignore a discrepancy between file name and entity name
596
+ */
597
+
598
+ /**
599
+ * This error type indicates that some limit was reached
600
+ *
601
+ * @public exported from `@promptbook/core`
602
+ */
603
+ class LimitReachedError extends Error {
604
+ constructor(message) {
605
+ super(message);
606
+ this.name = 'LimitReachedError';
607
+ Object.setPrototypeOf(this, LimitReachedError.prototype);
492
608
  }
493
- get title() {
494
- return 'Ollama';
609
+ }
610
+
611
+ /**
612
+ * Format either small or big number
613
+ *
614
+ * @public exported from `@promptbook/utils`
615
+ */
616
+ function numberToString(value) {
617
+ if (value === 0) {
618
+ return '0';
495
619
  }
496
- get description() {
497
- return 'Local Ollama LLM via HTTP';
620
+ else if (Number.isNaN(value)) {
621
+ return VALUE_STRINGS.nan;
498
622
  }
499
- async checkConfiguration() {
500
- const res = await fetch__default["default"](`${this.options.baseUrl}/models`);
501
- if (!res.ok)
502
- throw new UnexpectedError(`Failed to reach Ollama API at ${this.options.baseUrl}`);
623
+ else if (value === Infinity) {
624
+ return VALUE_STRINGS.infinity;
503
625
  }
504
- async listModels() {
505
- const res = await fetch__default["default"](`${this.options.baseUrl}/models`);
506
- if (!res.ok)
507
- throw new UnexpectedError(`Error listing Ollama models: ${res.statusText}`);
508
- const data = (await res.json());
509
- return data.map((m) => ({ modelName: m.name, modelVariant: 'CHAT' }));
626
+ else if (value === -Infinity) {
627
+ return VALUE_STRINGS.negativeInfinity;
510
628
  }
511
- async callChatModel(prompt) {
512
- const { content, parameters, modelRequirements } = prompt;
513
- if (modelRequirements.modelVariant !== 'CHAT') {
514
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
515
- }
516
- const modelName = modelRequirements.modelName || this.options.model;
517
- const body = {
518
- model: modelName,
519
- messages: [
520
- ...(modelRequirements.systemMessage
521
- ? [{ role: 'system', content: modelRequirements.systemMessage }]
522
- : []),
523
- { role: 'user', content: content },
524
- ],
525
- parameters: parameters,
526
- };
527
- const start = $getCurrentDate();
528
- const res = await this.limiter.schedule(() => fetch__default["default"](`${this.options.baseUrl}/chat/completions`, {
529
- method: 'POST',
530
- headers: { 'Content-Type': 'application/json' },
531
- body: JSON.stringify(body),
532
- }));
533
- if (!res.ok)
534
- throw new PipelineExecutionError(`Ollama API error: ${res.statusText}`);
535
- const json = await res.json();
536
- const complete = $getCurrentDate();
537
- if (!json.choices || !json.choices[0]) {
538
- throw new PipelineExecutionError('No choices from Ollama');
629
+ for (let exponent = 0; exponent < 15; exponent++) {
630
+ const factor = 10 ** exponent;
631
+ const valueRounded = Math.round(value * factor) / factor;
632
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
633
+ return valueRounded.toFixed(exponent);
539
634
  }
540
- const resultContent = json.choices[0].message.content;
541
- const usage = { price: { value: 0, isUncertain: true }, input: {}, output: {} }; /* <- !!! */
542
- return exportJson({
543
- name: 'promptResult',
544
- message: 'Result of Ollama',
545
- order: [],
546
- value: {
547
- content: resultContent,
548
- modelName,
549
- timing: { start, complete },
550
- usage,
551
- rawPromptContent: content,
552
- rawRequest: body,
553
- rawResponse: json,
554
- },
555
- });
556
635
  }
636
+ return value.toString();
557
637
  }
558
638
 
639
+ /**
640
+ * Function `valueToString` will convert the given value to string
641
+ * This is useful and used in the `templateParameters` function
642
+ *
643
+ * Note: This function is not just calling `toString` method
644
+ * It's more complex and can handle this conversion specifically for LLM models
645
+ * See `VALUE_STRINGS`
646
+ *
647
+ * Note: There are 2 similar functions
648
+ * - `valueToString` converts value to string for LLM models as human-readable string
649
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
650
+ *
651
+ * @public exported from `@promptbook/utils`
652
+ */
653
+ function valueToString(value) {
654
+ try {
655
+ if (value === '') {
656
+ return VALUE_STRINGS.empty;
657
+ }
658
+ else if (value === null) {
659
+ return VALUE_STRINGS.null;
660
+ }
661
+ else if (value === undefined) {
662
+ return VALUE_STRINGS.undefined;
663
+ }
664
+ else if (typeof value === 'string') {
665
+ return value;
666
+ }
667
+ else if (typeof value === 'number') {
668
+ return numberToString(value);
669
+ }
670
+ else if (value instanceof Date) {
671
+ return value.toISOString();
672
+ }
673
+ else {
674
+ try {
675
+ return JSON.stringify(value);
676
+ }
677
+ catch (error) {
678
+ if (error instanceof TypeError && error.message.includes('circular structure')) {
679
+ return VALUE_STRINGS.circular;
680
+ }
681
+ throw error;
682
+ }
683
+ }
684
+ }
685
+ catch (error) {
686
+ assertsError(error);
687
+ console.error(error);
688
+ return VALUE_STRINGS.unserializable;
689
+ }
690
+ }
691
+
692
+ /**
693
+ * Replaces parameters in template with values from parameters object
694
+ *
695
+ * Note: This function is not places strings into string,
696
+ * It's more complex and can handle this operation specifically for LLM models
697
+ *
698
+ * @param template the template with parameters in {curly} braces
699
+ * @param parameters the object with parameters
700
+ * @returns the template with replaced parameters
701
+ * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
702
+ * @public exported from `@promptbook/utils`
703
+ */
704
+ function templateParameters(template, parameters) {
705
+ for (const [parameterName, parameterValue] of Object.entries(parameters)) {
706
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
707
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
708
+ }
709
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
710
+ // TODO: [🍵]
711
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
712
+ }
713
+ }
714
+ let replacedTemplates = template;
715
+ let match;
716
+ let loopLimit = LOOP_LIMIT;
717
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
718
+ .exec(replacedTemplates))) {
719
+ if (loopLimit-- < 0) {
720
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
721
+ }
722
+ const precol = match.groups.precol;
723
+ const parameterName = match.groups.parameterName;
724
+ if (parameterName === '') {
725
+ // Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
726
+ continue;
727
+ }
728
+ if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
729
+ throw new PipelineExecutionError('Parameter is already opened or not closed');
730
+ }
731
+ if (parameters[parameterName] === undefined) {
732
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
733
+ }
734
+ let parameterValue = parameters[parameterName];
735
+ if (parameterValue === undefined) {
736
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
737
+ }
738
+ parameterValue = valueToString(parameterValue);
739
+ // Escape curly braces in parameter values to prevent prompt-injection
740
+ parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
741
+ if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
742
+ parameterValue = parameterValue
743
+ .split('\n')
744
+ .map((line, index) => (index === 0 ? line : `${precol}${line}`))
745
+ .join('\n');
746
+ }
747
+ replacedTemplates =
748
+ replacedTemplates.substring(0, match.index + precol.length) +
749
+ parameterValue +
750
+ replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
751
+ }
752
+ // [💫] Check if there are parameters that are not closed properly
753
+ if (/{\w+$/.test(replacedTemplates)) {
754
+ throw new PipelineExecutionError('Parameter is not closed');
755
+ }
756
+ // [💫] Check if there are parameters that are not opened properly
757
+ if (/^\w+}/.test(replacedTemplates)) {
758
+ throw new PipelineExecutionError('Parameter is not opened');
759
+ }
760
+ return replacedTemplates;
761
+ }
762
+
763
+ /**
764
+ * Counts number of characters in the text
765
+ *
766
+ * @public exported from `@promptbook/utils`
767
+ */
768
+ function countCharacters(text) {
769
+ // Remove null characters
770
+ text = text.replace(/\0/g, '');
771
+ // Replace emojis (and also ZWJ sequence) with hyphens
772
+ text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
773
+ text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
774
+ text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
775
+ return text.length;
776
+ }
777
+ /**
778
+ * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
779
+ */
780
+
781
+ /**
782
+ * Number of characters per standard line with 11pt Arial font size.
783
+ *
784
+ * @public exported from `@promptbook/utils`
785
+ */
786
+ const CHARACTERS_PER_STANDARD_LINE = 63;
787
+ /**
788
+ * Number of lines per standard A4 page with 11pt Arial font size and standard margins and spacing.
789
+ *
790
+ * @public exported from `@promptbook/utils`
791
+ */
792
+ const LINES_PER_STANDARD_PAGE = 44;
793
+ /**
794
+ * TODO: [🧠] Should be this `constants.ts` or `config.ts`?
795
+ * Note: [💞] Ignore a discrepancy between file name and entity name
796
+ */
797
+
798
+ /**
799
+ * Counts number of lines in the text
800
+ *
801
+ * Note: This does not check only for the presence of newlines, but also for the length of the standard line.
802
+ *
803
+ * @public exported from `@promptbook/utils`
804
+ */
805
+ function countLines(text) {
806
+ text = text.replace('\r\n', '\n');
807
+ text = text.replace('\r', '\n');
808
+ const lines = text.split('\n');
809
+ return lines.reduce((count, line) => count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 0);
810
+ }
811
+ /**
812
+ * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
813
+ */
814
+
815
+ /**
816
+ * Counts number of pages in the text
817
+ *
818
+ * Note: This does not check only for the count of newlines, but also for the length of the standard line and length of the standard page.
819
+ *
820
+ * @public exported from `@promptbook/utils`
821
+ */
822
+ function countPages(text) {
823
+ return Math.ceil(countLines(text) / LINES_PER_STANDARD_PAGE);
824
+ }
825
+ /**
826
+ * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
827
+ */
828
+
829
+ /**
830
+ * Counts number of paragraphs in the text
831
+ *
832
+ * @public exported from `@promptbook/utils`
833
+ */
834
+ function countParagraphs(text) {
835
+ return text.split(/\n\s*\n/).filter((paragraph) => paragraph.trim() !== '').length;
836
+ }
837
+ /**
838
+ * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
839
+ */
840
+
841
+ /**
842
+ * Split text into sentences
843
+ *
844
+ * @public exported from `@promptbook/utils`
845
+ */
846
+ function splitIntoSentences(text) {
847
+ return text.split(/[.!?]+/).filter((sentence) => sentence.trim() !== '');
848
+ }
849
+ /**
850
+ * Counts number of sentences in the text
851
+ *
852
+ * @public exported from `@promptbook/utils`
853
+ */
854
+ function countSentences(text) {
855
+ return splitIntoSentences(text).length;
856
+ }
857
+ /**
858
+ * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
859
+ */
860
+
861
+ const defaultDiacriticsRemovalMap = [
862
+ {
863
+ base: 'A',
864
+ letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
865
+ },
866
+ { base: 'AA', letters: '\uA732' },
867
+ { base: 'AE', letters: '\u00C6\u01FC\u01E2' },
868
+ { base: 'AO', letters: '\uA734' },
869
+ { base: 'AU', letters: '\uA736' },
870
+ { base: 'AV', letters: '\uA738\uA73A' },
871
+ { base: 'AY', letters: '\uA73C' },
872
+ {
873
+ base: 'B',
874
+ letters: '\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181',
875
+ },
876
+ {
877
+ base: 'C',
878
+ letters: '\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E',
879
+ },
880
+ {
881
+ base: 'D',
882
+ letters: '\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779\u00D0',
883
+ },
884
+ { base: 'DZ', letters: '\u01F1\u01C4' },
885
+ { base: 'Dz', letters: '\u01F2\u01C5' },
886
+ {
887
+ base: 'E',
888
+ letters: '\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E',
889
+ },
890
+ { base: 'F', letters: '\u0046\u24BB\uFF26\u1E1E\u0191\uA77B' },
891
+ {
892
+ base: 'G',
893
+ letters: '\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E',
894
+ },
895
+ {
896
+ base: 'H',
897
+ letters: '\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D',
898
+ },
899
+ {
900
+ base: 'I',
901
+ letters: '\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197',
902
+ },
903
+ { base: 'J', letters: '\u004A\u24BF\uFF2A\u0134\u0248' },
904
+ {
905
+ base: 'K',
906
+ letters: '\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2',
907
+ },
908
+ {
909
+ base: 'L',
910
+ letters: '\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780',
911
+ },
912
+ { base: 'LJ', letters: '\u01C7' },
913
+ { base: 'Lj', letters: '\u01C8' },
914
+ { base: 'M', letters: '\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C' },
915
+ {
916
+ base: 'N',
917
+ letters: '\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4',
918
+ },
919
+ { base: 'NJ', letters: '\u01CA' },
920
+ { base: 'Nj', letters: '\u01CB' },
921
+ {
922
+ base: 'O',
923
+ letters: '\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C',
924
+ },
925
+ { base: 'OI', letters: '\u01A2' },
926
+ { base: 'OO', letters: '\uA74E' },
927
+ { base: 'OU', letters: '\u0222' },
928
+ { base: 'OE', letters: '\u008C\u0152' },
929
+ { base: 'oe', letters: '\u009C\u0153' },
930
+ {
931
+ base: 'P',
932
+ letters: '\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754',
933
+ },
934
+ { base: 'Q', letters: '\u0051\u24C6\uFF31\uA756\uA758\u024A' },
935
+ {
936
+ base: 'R',
937
+ letters: '\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782',
938
+ },
939
+ {
940
+ base: 'S',
941
+ letters: '\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784',
942
+ },
943
+ {
944
+ base: 'T',
945
+ letters: '\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786',
946
+ },
947
+ { base: 'TZ', letters: '\uA728' },
948
+ {
949
+ base: 'U',
950
+ letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
951
+ },
952
+ { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
953
+ { base: 'VY', letters: '\uA760' },
954
+ {
955
+ base: 'W',
956
+ letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
957
+ },
958
+ { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
959
+ {
960
+ base: 'Y',
961
+ letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
962
+ },
963
+ {
964
+ base: 'Z',
965
+ letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
966
+ },
967
+ {
968
+ base: 'a',
969
+ letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
970
+ },
971
+ { base: 'aa', letters: '\uA733' },
972
+ { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
973
+ { base: 'ao', letters: '\uA735' },
974
+ { base: 'au', letters: '\uA737' },
975
+ { base: 'av', letters: '\uA739\uA73B' },
976
+ { base: 'ay', letters: '\uA73D' },
977
+ {
978
+ base: 'b',
979
+ letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
980
+ },
981
+ {
982
+ base: 'c',
983
+ letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
984
+ },
985
+ {
986
+ base: 'd',
987
+ letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
988
+ },
989
+ { base: 'dz', letters: '\u01F3\u01C6' },
990
+ {
991
+ base: 'e',
992
+ letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
993
+ },
994
+ { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
995
+ {
996
+ base: 'g',
997
+ letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
998
+ },
999
+ {
1000
+ base: 'h',
1001
+ letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
1002
+ },
1003
+ { base: 'hv', letters: '\u0195' },
1004
+ {
1005
+ base: 'i',
1006
+ letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
1007
+ },
1008
+ { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
1009
+ {
1010
+ base: 'k',
1011
+ letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
1012
+ },
1013
+ {
1014
+ base: 'l',
1015
+ letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
1016
+ },
1017
+ { base: 'lj', letters: '\u01C9' },
1018
+ { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
1019
+ {
1020
+ base: 'n',
1021
+ letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
1022
+ },
1023
+ { base: 'nj', letters: '\u01CC' },
1024
+ {
1025
+ base: 'o',
1026
+ letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
1027
+ },
1028
+ { base: 'oi', letters: '\u01A3' },
1029
+ { base: 'ou', letters: '\u0223' },
1030
+ { base: 'oo', letters: '\uA74F' },
1031
+ {
1032
+ base: 'p',
1033
+ letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
1034
+ },
1035
+ { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
1036
+ {
1037
+ base: 'r',
1038
+ letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
1039
+ },
1040
+ {
1041
+ base: 's',
1042
+ letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
1043
+ },
1044
+ {
1045
+ base: 't',
1046
+ letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
1047
+ },
1048
+ { base: 'tz', letters: '\uA729' },
1049
+ {
1050
+ base: 'u',
1051
+ letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
1052
+ },
1053
+ { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
1054
+ { base: 'vy', letters: '\uA761' },
1055
+ {
1056
+ base: 'w',
1057
+ letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
1058
+ },
1059
+ { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
1060
+ {
1061
+ base: 'y',
1062
+ letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
1063
+ },
1064
+ {
1065
+ base: 'z',
1066
+ letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
1067
+ },
1068
+ ];
1069
+ /**
1070
+ * Map of letters from diacritic variant to diacritless variant
1071
+ * Contains lowercase and uppercase separatelly
1072
+ *
1073
+ * > "á" => "a"
1074
+ * > "ě" => "e"
1075
+ * > "Ă" => "A"
1076
+ * > ...
1077
+ *
1078
+ * @public exported from `@promptbook/utils`
1079
+ */
1080
+ const DIACRITIC_VARIANTS_LETTERS = {};
1081
+ // tslint:disable-next-line: prefer-for-of
1082
+ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1083
+ const letters = defaultDiacriticsRemovalMap[i].letters;
1084
+ // tslint:disable-next-line: prefer-for-of
1085
+ for (let j = 0; j < letters.length; j++) {
1086
+ DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
1087
+ }
1088
+ }
1089
+ // <- TODO: [🍓] Put to maker function to save execution time if not needed
1090
+ /*
1091
+ @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
1092
+ Licensed under the Apache License, Version 2.0 (the "License");
1093
+ you may not use this file except in compliance with the License.
1094
+ You may obtain a copy of the License at
1095
+
1096
+ http://www.apache.org/licenses/LICENSE-2.0
1097
+
1098
+ Unless required by applicable law or agreed to in writing, software
1099
+ distributed under the License is distributed on an "AS IS" BASIS,
1100
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1101
+ See the License for the specific language governing permissions and
1102
+ limitations under the License.
1103
+ */
1104
+
1105
+ /**
1106
+ * Removes diacritic marks (accents) from characters in a string.
1107
+ *
1108
+ * @param input The string containing diacritics to be normalized.
1109
+ * @returns The string with diacritics removed or normalized.
1110
+ * @public exported from `@promptbook/utils`
1111
+ */
1112
+ function removeDiacritics(input) {
1113
+ /*eslint no-control-regex: "off"*/
1114
+ return input.replace(/[^\u0000-\u007E]/g, (a) => {
1115
+ return DIACRITIC_VARIANTS_LETTERS[a] || a;
1116
+ });
1117
+ }
1118
+ /**
1119
+ * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
1120
+ */
1121
+
1122
+ /**
1123
+ * Counts number of words in the text
1124
+ *
1125
+ * @public exported from `@promptbook/utils`
1126
+ */
1127
+ function countWords(text) {
1128
+ text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
1129
+ text = removeDiacritics(text);
1130
+ // Add spaces before uppercase letters preceded by lowercase letters (for camelCase)
1131
+ text = text.replace(/([a-z])([A-Z])/g, '$1 $2');
1132
+ return text.split(/[^a-zа-я0-9]+/i).filter((word) => word.length > 0).length;
1133
+ }
1134
+ /**
1135
+ * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
1136
+ */
1137
+
1138
+ /**
1139
+ * Helper of usage compute
1140
+ *
1141
+ * @param content the content of prompt or response
1142
+ * @returns part of UsageCounts
1143
+ *
1144
+ * @private internal utility of LlmExecutionTools
1145
+ */
1146
+ function computeUsageCounts(content) {
1147
+ return {
1148
+ charactersCount: { value: countCharacters(content) },
1149
+ wordsCount: { value: countWords(content) },
1150
+ sentencesCount: { value: countSentences(content) },
1151
+ linesCount: { value: countLines(content) },
1152
+ paragraphsCount: { value: countParagraphs(content) },
1153
+ pagesCount: { value: countPages(content) },
1154
+ };
1155
+ }
1156
+
1157
+ /**
1158
+ * Represents the uncertain value
1159
+ *
1160
+ * @public exported from `@promptbook/core`
1161
+ */
1162
+ const ZERO_VALUE = $deepFreeze({ value: 0 });
1163
+ /**
1164
+ * Represents the uncertain value
1165
+ *
1166
+ * @public exported from `@promptbook/core`
1167
+ */
1168
+ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
1169
+ /**
1170
+ * Represents the usage with no resources consumed
1171
+ *
1172
+ * @public exported from `@promptbook/core`
1173
+ */
1174
+ $deepFreeze({
1175
+ price: ZERO_VALUE,
1176
+ input: {
1177
+ tokensCount: ZERO_VALUE,
1178
+ charactersCount: ZERO_VALUE,
1179
+ wordsCount: ZERO_VALUE,
1180
+ sentencesCount: ZERO_VALUE,
1181
+ linesCount: ZERO_VALUE,
1182
+ paragraphsCount: ZERO_VALUE,
1183
+ pagesCount: ZERO_VALUE,
1184
+ },
1185
+ output: {
1186
+ tokensCount: ZERO_VALUE,
1187
+ charactersCount: ZERO_VALUE,
1188
+ wordsCount: ZERO_VALUE,
1189
+ sentencesCount: ZERO_VALUE,
1190
+ linesCount: ZERO_VALUE,
1191
+ paragraphsCount: ZERO_VALUE,
1192
+ pagesCount: ZERO_VALUE,
1193
+ },
1194
+ });
1195
+ /**
1196
+ * Represents the usage with unknown resources consumed
1197
+ *
1198
+ * @public exported from `@promptbook/core`
1199
+ */
1200
+ $deepFreeze({
1201
+ price: UNCERTAIN_ZERO_VALUE,
1202
+ input: {
1203
+ tokensCount: UNCERTAIN_ZERO_VALUE,
1204
+ charactersCount: UNCERTAIN_ZERO_VALUE,
1205
+ wordsCount: UNCERTAIN_ZERO_VALUE,
1206
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
1207
+ linesCount: UNCERTAIN_ZERO_VALUE,
1208
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
1209
+ pagesCount: UNCERTAIN_ZERO_VALUE,
1210
+ },
1211
+ output: {
1212
+ tokensCount: UNCERTAIN_ZERO_VALUE,
1213
+ charactersCount: UNCERTAIN_ZERO_VALUE,
1214
+ wordsCount: UNCERTAIN_ZERO_VALUE,
1215
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
1216
+ linesCount: UNCERTAIN_ZERO_VALUE,
1217
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
1218
+ pagesCount: UNCERTAIN_ZERO_VALUE,
1219
+ },
1220
+ });
1221
+ /**
1222
+ * Note: [💞] Ignore a discrepancy between file name and entity name
1223
+ */
1224
+
1225
+ /**
1226
+ * Make UncertainNumber
1227
+ *
1228
+ * @param value value of the uncertain number, if `NaN` or `undefined`, it will be set to 0 and `isUncertain=true`
1229
+ * @param isUncertain if `true`, the value is uncertain, otherwise depends on the value
1230
+ *
1231
+ * @private utility for initializating UncertainNumber
1232
+ */
1233
+ function uncertainNumber(value, isUncertain) {
1234
+ if (value === null || value === undefined || Number.isNaN(value)) {
1235
+ return UNCERTAIN_ZERO_VALUE;
1236
+ }
1237
+ if (isUncertain === true) {
1238
+ return { value, isUncertain };
1239
+ }
1240
+ return { value };
1241
+ }
1242
+
1243
+ /**
1244
+ * Function computeUsage will create price per one token based on the string value found on openai page
1245
+ *
1246
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1247
+ */
1248
+ function computeUsage(value) {
1249
+ const [price, tokens] = value.split(' / ');
1250
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1251
+ }
1252
+
1253
+ /**
1254
+ * List of available OpenAI models with pricing
1255
+ *
1256
+ * Note: Done at 2025-05-06
1257
+ *
1258
+ * @see https://platform.openai.com/docs/models/
1259
+ * @see https://openai.com/api/pricing/
1260
+ * @public exported from `@promptbook/openai`
1261
+ */
1262
+ const OPENAI_MODELS = exportJson({
1263
+ name: 'OPENAI_MODELS',
1264
+ value: [
1265
+ /*/
1266
+ {
1267
+ modelTitle: 'dall-e-3',
1268
+ modelName: 'dall-e-3',
1269
+ },
1270
+ /**/
1271
+ /*/
1272
+ {
1273
+ modelTitle: 'whisper-1',
1274
+ modelName: 'whisper-1',
1275
+ },
1276
+ /**/
1277
+ /**/
1278
+ {
1279
+ modelVariant: 'COMPLETION',
1280
+ modelTitle: 'davinci-002',
1281
+ modelName: 'davinci-002',
1282
+ modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
1283
+ pricing: {
1284
+ prompt: computeUsage(`$2.00 / 1M tokens`),
1285
+ output: computeUsage(`$2.00 / 1M tokens`),
1286
+ },
1287
+ },
1288
+ /**/
1289
+ /*/
1290
+ {
1291
+ modelTitle: 'dall-e-2',
1292
+ modelName: 'dall-e-2',
1293
+ },
1294
+ /**/
1295
+ /**/
1296
+ {
1297
+ modelVariant: 'CHAT',
1298
+ modelTitle: 'gpt-3.5-turbo-16k',
1299
+ modelName: 'gpt-3.5-turbo-16k',
1300
+ modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
1301
+ pricing: {
1302
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1303
+ output: computeUsage(`$4.00 / 1M tokens`),
1304
+ },
1305
+ },
1306
+ /**/
1307
+ /*/
1308
+ {
1309
+ modelTitle: 'tts-1-hd-1106',
1310
+ modelName: 'tts-1-hd-1106',
1311
+ },
1312
+ /**/
1313
+ /*/
1314
+ {
1315
+ modelTitle: 'tts-1-hd',
1316
+ modelName: 'tts-1-hd',
1317
+ },
1318
+ /**/
1319
+ /**/
1320
+ {
1321
+ modelVariant: 'CHAT',
1322
+ modelTitle: 'gpt-4',
1323
+ modelName: 'gpt-4',
1324
+ modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
1325
+ pricing: {
1326
+ prompt: computeUsage(`$30.00 / 1M tokens`),
1327
+ output: computeUsage(`$60.00 / 1M tokens`),
1328
+ },
1329
+ },
1330
+ /**/
1331
+ /**/
1332
+ {
1333
+ modelVariant: 'CHAT',
1334
+ modelTitle: 'gpt-4-32k',
1335
+ modelName: 'gpt-4-32k',
1336
+ modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
1337
+ pricing: {
1338
+ prompt: computeUsage(`$60.00 / 1M tokens`),
1339
+ output: computeUsage(`$120.00 / 1M tokens`),
1340
+ },
1341
+ },
1342
+ /**/
1343
+ /*/
1344
+ {
1345
+ modelVariant: 'CHAT',
1346
+ modelTitle: 'gpt-4-0613',
1347
+ modelName: 'gpt-4-0613',
1348
+ pricing: {
1349
+ prompt: computeUsage(` / 1M tokens`),
1350
+ output: computeUsage(` / 1M tokens`),
1351
+ },
1352
+ },
1353
+ /**/
1354
+ /**/
1355
+ {
1356
+ modelVariant: 'CHAT',
1357
+ modelTitle: 'gpt-4-turbo-2024-04-09',
1358
+ modelName: 'gpt-4-turbo-2024-04-09',
1359
+ modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
1360
+ pricing: {
1361
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1362
+ output: computeUsage(`$30.00 / 1M tokens`),
1363
+ },
1364
+ },
1365
+ /**/
1366
+ /**/
1367
+ {
1368
+ modelVariant: 'CHAT',
1369
+ modelTitle: 'gpt-3.5-turbo-1106',
1370
+ modelName: 'gpt-3.5-turbo-1106',
1371
+ modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
1372
+ pricing: {
1373
+ prompt: computeUsage(`$1.00 / 1M tokens`),
1374
+ output: computeUsage(`$2.00 / 1M tokens`),
1375
+ },
1376
+ },
1377
+ /**/
1378
+ /**/
1379
+ {
1380
+ modelVariant: 'CHAT',
1381
+ modelTitle: 'gpt-4-turbo',
1382
+ modelName: 'gpt-4-turbo',
1383
+ modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
1384
+ pricing: {
1385
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1386
+ output: computeUsage(`$30.00 / 1M tokens`),
1387
+ },
1388
+ },
1389
+ /**/
1390
+ /**/
1391
+ {
1392
+ modelVariant: 'COMPLETION',
1393
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
1394
+ modelName: 'gpt-3.5-turbo-instruct-0914',
1395
+ modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
1396
+ pricing: {
1397
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1398
+ output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1399
+ },
1400
+ },
1401
+ /**/
1402
+ /**/
1403
+ {
1404
+ modelVariant: 'COMPLETION',
1405
+ modelTitle: 'gpt-3.5-turbo-instruct',
1406
+ modelName: 'gpt-3.5-turbo-instruct',
1407
+ modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
1408
+ pricing: {
1409
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1410
+ output: computeUsage(`$2.00 / 1M tokens`),
1411
+ },
1412
+ },
1413
+ /**/
1414
+ /*/
1415
+ {
1416
+ modelTitle: 'tts-1',
1417
+ modelName: 'tts-1',
1418
+ },
1419
+ /**/
1420
+ /**/
1421
+ {
1422
+ modelVariant: 'CHAT',
1423
+ modelTitle: 'gpt-3.5-turbo',
1424
+ modelName: 'gpt-3.5-turbo',
1425
+ modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
1426
+ pricing: {
1427
+ prompt: computeUsage(`$0.50 / 1M tokens`),
1428
+ output: computeUsage(`$1.50 / 1M tokens`),
1429
+ },
1430
+ },
1431
+ /**/
1432
+ /**/
1433
+ {
1434
+ modelVariant: 'CHAT',
1435
+ modelTitle: 'gpt-3.5-turbo-0301',
1436
+ modelName: 'gpt-3.5-turbo-0301',
1437
+ modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
1438
+ pricing: {
1439
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1440
+ output: computeUsage(`$2.00 / 1M tokens`),
1441
+ },
1442
+ },
1443
+ /**/
1444
+ /**/
1445
+ {
1446
+ modelVariant: 'COMPLETION',
1447
+ modelTitle: 'babbage-002',
1448
+ modelName: 'babbage-002',
1449
+ modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
1450
+ pricing: {
1451
+ prompt: computeUsage(`$0.40 / 1M tokens`),
1452
+ output: computeUsage(`$0.40 / 1M tokens`),
1453
+ },
1454
+ },
1455
+ /**/
1456
+ /**/
1457
+ {
1458
+ modelVariant: 'CHAT',
1459
+ modelTitle: 'gpt-4-1106-preview',
1460
+ modelName: 'gpt-4-1106-preview',
1461
+ modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
1462
+ pricing: {
1463
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1464
+ output: computeUsage(`$30.00 / 1M tokens`),
1465
+ },
1466
+ },
1467
+ /**/
1468
+ /**/
1469
+ {
1470
+ modelVariant: 'CHAT',
1471
+ modelTitle: 'gpt-4-0125-preview',
1472
+ modelName: 'gpt-4-0125-preview',
1473
+ modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
1474
+ pricing: {
1475
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1476
+ output: computeUsage(`$30.00 / 1M tokens`),
1477
+ },
1478
+ },
1479
+ /**/
1480
+ /*/
1481
+ {
1482
+ modelTitle: 'tts-1-1106',
1483
+ modelName: 'tts-1-1106',
1484
+ },
1485
+ /**/
1486
+ /**/
1487
+ {
1488
+ modelVariant: 'CHAT',
1489
+ modelTitle: 'gpt-3.5-turbo-0125',
1490
+ modelName: 'gpt-3.5-turbo-0125',
1491
+ modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
1492
+ pricing: {
1493
+ prompt: computeUsage(`$0.50 / 1M tokens`),
1494
+ output: computeUsage(`$1.50 / 1M tokens`),
1495
+ },
1496
+ },
1497
+ /**/
1498
+ /**/
1499
+ {
1500
+ modelVariant: 'CHAT',
1501
+ modelTitle: 'gpt-4-turbo-preview',
1502
+ modelName: 'gpt-4-turbo-preview',
1503
+ modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
1504
+ pricing: {
1505
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1506
+ output: computeUsage(`$30.00 / 1M tokens`),
1507
+ },
1508
+ },
1509
+ /**/
1510
+ /**/
1511
+ {
1512
+ modelVariant: 'EMBEDDING',
1513
+ modelTitle: 'text-embedding-3-large',
1514
+ modelName: 'text-embedding-3-large',
1515
+ modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
1516
+ pricing: {
1517
+ prompt: computeUsage(`$0.13 / 1M tokens`),
1518
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1519
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1520
+ },
1521
+ },
1522
+ /**/
1523
+ /**/
1524
+ {
1525
+ modelVariant: 'EMBEDDING',
1526
+ modelTitle: 'text-embedding-3-small',
1527
+ modelName: 'text-embedding-3-small',
1528
+ modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
1529
+ pricing: {
1530
+ prompt: computeUsage(`$0.02 / 1M tokens`),
1531
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1532
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1533
+ },
1534
+ },
1535
+ /**/
1536
+ /**/
1537
+ {
1538
+ modelVariant: 'CHAT',
1539
+ modelTitle: 'gpt-3.5-turbo-0613',
1540
+ modelName: 'gpt-3.5-turbo-0613',
1541
+ modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
1542
+ pricing: {
1543
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1544
+ output: computeUsage(`$2.00 / 1M tokens`),
1545
+ },
1546
+ },
1547
+ /**/
1548
+ /**/
1549
+ {
1550
+ modelVariant: 'EMBEDDING',
1551
+ modelTitle: 'text-embedding-ada-002',
1552
+ modelName: 'text-embedding-ada-002',
1553
+ modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
1554
+ pricing: {
1555
+ prompt: computeUsage(`$0.1 / 1M tokens`),
1556
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1557
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1558
+ },
1559
+ },
1560
+ /**/
1561
+ /*/
1562
+ {
1563
+ modelVariant: 'CHAT',
1564
+ modelTitle: 'gpt-4-1106-vision-preview',
1565
+ modelName: 'gpt-4-1106-vision-preview',
1566
+ },
1567
+ /**/
1568
+ /*/
1569
+ {
1570
+ modelVariant: 'CHAT',
1571
+ modelTitle: 'gpt-4-vision-preview',
1572
+ modelName: 'gpt-4-vision-preview',
1573
+ pricing: {
1574
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1575
+ output: computeUsage(`$30.00 / 1M tokens`),
1576
+ },
1577
+ },
1578
+ /**/
1579
+ /**/
1580
+ {
1581
+ modelVariant: 'CHAT',
1582
+ modelTitle: 'gpt-4o-2024-05-13',
1583
+ modelName: 'gpt-4o-2024-05-13',
1584
+ modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
1585
+ pricing: {
1586
+ prompt: computeUsage(`$5.00 / 1M tokens`),
1587
+ output: computeUsage(`$15.00 / 1M tokens`),
1588
+ },
1589
+ },
1590
+ /**/
1591
+ /**/
1592
+ {
1593
+ modelVariant: 'CHAT',
1594
+ modelTitle: 'gpt-4o',
1595
+ modelName: 'gpt-4o',
1596
+ modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
1597
+ pricing: {
1598
+ prompt: computeUsage(`$5.00 / 1M tokens`),
1599
+ output: computeUsage(`$15.00 / 1M tokens`),
1600
+ },
1601
+ },
1602
+ /**/
1603
+ /**/
1604
+ {
1605
+ modelVariant: 'CHAT',
1606
+ modelTitle: 'gpt-4o-mini',
1607
+ modelName: 'gpt-4o-mini',
1608
+ modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
1609
+ pricing: {
1610
+ prompt: computeUsage(`$0.15 / 1M tokens`),
1611
+ output: computeUsage(`$0.60 / 1M tokens`),
1612
+ },
1613
+ },
1614
+ /**/
1615
+ /**/
1616
+ {
1617
+ modelVariant: 'CHAT',
1618
+ modelTitle: 'o1-preview',
1619
+ modelName: 'o1-preview',
1620
+ modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
1621
+ pricing: {
1622
+ prompt: computeUsage(`$15.00 / 1M tokens`),
1623
+ output: computeUsage(`$60.00 / 1M tokens`),
1624
+ },
1625
+ },
1626
+ /**/
1627
+ /**/
1628
+ {
1629
+ modelVariant: 'CHAT',
1630
+ modelTitle: 'o1-preview-2024-09-12',
1631
+ modelName: 'o1-preview-2024-09-12',
1632
+ modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
1633
+ // <- TODO: [💩] Some better system to organize these date suffixes and versions
1634
+ pricing: {
1635
+ prompt: computeUsage(`$15.00 / 1M tokens`),
1636
+ output: computeUsage(`$60.00 / 1M tokens`),
1637
+ },
1638
+ },
1639
+ /**/
1640
+ /**/
1641
+ {
1642
+ modelVariant: 'CHAT',
1643
+ modelTitle: 'o1-mini',
1644
+ modelName: 'o1-mini',
1645
+ modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
1646
+ pricing: {
1647
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1648
+ output: computeUsage(`$12.00 / 1M tokens`),
1649
+ },
1650
+ },
1651
+ /**/
1652
+ /**/
1653
+ {
1654
+ modelVariant: 'CHAT',
1655
+ modelTitle: 'o1',
1656
+ modelName: 'o1',
1657
+ modelDescription: "OpenAI's advanced reasoning model focused on logic and problem-solving. Designed for complex analytical tasks with rigorous step-by-step reasoning. 128K context window.",
1658
+ pricing: {
1659
+ prompt: computeUsage(`$15.00 / 1M tokens`),
1660
+ output: computeUsage(`$60.00 / 1M tokens`),
1661
+ },
1662
+ },
1663
+ /**/
1664
+ /**/
1665
+ {
1666
+ modelVariant: 'CHAT',
1667
+ modelTitle: 'o3-mini',
1668
+ modelName: 'o3-mini',
1669
+ modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving. Efficient performance on STEM tasks with deep mathematical and scientific knowledge. 128K context window.',
1670
+ pricing: {
1671
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1672
+ output: computeUsage(`$12.00 / 1M tokens`),
1673
+ // <- TODO: !! Unsure, check the pricing
1674
+ },
1675
+ },
1676
+ /**/
1677
+ /**/
1678
+ {
1679
+ modelVariant: 'CHAT',
1680
+ modelTitle: 'o1-mini-2024-09-12',
1681
+ modelName: 'o1-mini-2024-09-12',
1682
+ modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
1683
+ pricing: {
1684
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1685
+ output: computeUsage(`$12.00 / 1M tokens`),
1686
+ },
1687
+ },
1688
+ /**/
1689
+ /**/
1690
+ {
1691
+ modelVariant: 'CHAT',
1692
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
1693
+ modelName: 'gpt-3.5-turbo-16k-0613',
1694
+ modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
1695
+ pricing: {
1696
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1697
+ output: computeUsage(`$4.00 / 1M tokens`),
1698
+ },
1699
+ },
1700
+ /**/
1701
+ // <- [🕕]
1702
+ ],
1703
+ });
1704
+ /**
1705
+ * Note: [🤖] Add models of new variant
1706
+ * TODO: [🧠] Some mechanism to propagate unsureness
1707
+ * TODO: [🎰] Some mechanism to auto-update available models
1708
+ * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1709
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1710
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
1711
+ * @see https://openai.com/api/pricing/
1712
+ * @see /other/playground/playground.ts
1713
+ * TODO: [🍓][💩] Make better
1714
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
1715
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
1716
+ * Note: [💞] Ignore a discrepancy between file name and entity name
1717
+ */
1718
+
1719
+ /**
1720
+ * Computes the usage of the OpenAI API based on the response from OpenAI
1721
+ *
1722
+ * @param promptContent The content of the prompt
1723
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
1724
+ * @param rawResponse The raw response from OpenAI API
1725
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
1726
+ * @private internal utility of `OpenAiExecutionTools`
1727
+ */
1728
+ function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
1729
+ resultContent, rawResponse) {
1730
+ var _a, _b;
1731
+ if (rawResponse.usage === undefined) {
1732
+ throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
1733
+ }
1734
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
1735
+ throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
1736
+ }
1737
+ const inputTokens = rawResponse.usage.prompt_tokens;
1738
+ const outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
1739
+ let isUncertain = false;
1740
+ let modelInfo = OPENAI_MODELS.find((model) => model.modelName === rawResponse.model);
1741
+ if (modelInfo === undefined) {
1742
+ // Note: Model is not in the list of known models, fallback to the family of the models and mark price as uncertain
1743
+ modelInfo = OPENAI_MODELS.find((model) => (rawResponse.model || SALT_NONCE).startsWith(model.modelName));
1744
+ if (modelInfo !== undefined) {
1745
+ isUncertain = true;
1746
+ }
1747
+ }
1748
+ let price;
1749
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
1750
+ price = uncertainNumber();
1751
+ }
1752
+ else {
1753
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output, isUncertain);
1754
+ }
1755
+ return {
1756
+ price,
1757
+ input: {
1758
+ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens),
1759
+ ...computeUsageCounts(promptContent),
1760
+ },
1761
+ output: {
1762
+ tokensCount: uncertainNumber(outputTokens),
1763
+ ...computeUsageCounts(resultContent),
1764
+ },
1765
+ };
1766
+ }
1767
+ /**
1768
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
1769
+ */
1770
+
1771
+ /**
1772
+ * Execution Tools for calling OpenAI API
1773
+ *
1774
+ * @public exported from `@promptbook/openai`
1775
+ */
1776
+ class OpenAiExecutionTools {
1777
+ /**
1778
+ * Creates OpenAI Execution Tools.
1779
+ *
1780
+ * @param options which are relevant are directly passed to the OpenAI client
1781
+ */
1782
+ constructor(options) {
1783
+ this.options = options;
1784
+ /**
1785
+ * OpenAI API client.
1786
+ */
1787
+ this.client = null;
1788
+ // TODO: Allow configuring rate limits via options
1789
+ this.limiter = new Bottleneck__default["default"]({
1790
+ minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
1791
+ });
1792
+ }
1793
+ get title() {
1794
+ return 'OpenAI';
1795
+ }
1796
+ get description() {
1797
+ return 'Use all models provided by OpenAI';
1798
+ }
1799
+ async getClient() {
1800
+ if (this.client === null) {
1801
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
1802
+ const openAiOptions = { ...this.options };
1803
+ delete openAiOptions.isVerbose;
1804
+ delete openAiOptions.userId;
1805
+ this.client = new OpenAI__default["default"](openAiOptions);
1806
+ }
1807
+ return this.client;
1808
+ }
1809
+ /*
1810
+ Note: Commenting this out to avoid circular dependency
1811
+ /**
1812
+ * Create (sub)tools for calling OpenAI API Assistants
1813
+ *
1814
+ * @param assistantId Which assistant to use
1815
+ * @returns Tools for calling OpenAI API Assistants with same token
1816
+ * /
1817
+ public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
1818
+ return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
1819
+ }
1820
+ */
1821
+ /**
1822
+ * Check the `options` passed to `constructor`
1823
+ */
1824
+ async checkConfiguration() {
1825
+ await this.getClient();
1826
+ // TODO: [🎍] Do here a real check that API is online, working and API key is correct
1827
+ }
1828
+ /**
1829
+ * List all available OpenAI models that can be used
1830
+ */
1831
+ async listModels() {
1832
+ /*
1833
+ Note: Dynamic lising of the models
1834
+ const models = await this.openai.models.list({});
1835
+
1836
+ console.log({ models });
1837
+ console.log(models.data);
1838
+ */
1839
+ const client = await this.getClient();
1840
+ const rawModelsList = await client.models.list();
1841
+ const availableModels = rawModelsList.data
1842
+ .sort((a, b) => (a.created > b.created ? 1 : -1))
1843
+ .map((modelFromApi) => {
1844
+ // TODO: !!!! What about other model compatibilities?
1845
+ const modelFromList = OPENAI_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
1846
+ modelName.startsWith(modelFromApi.id) ||
1847
+ modelFromApi.id.startsWith(modelName));
1848
+ if (modelFromList !== undefined) {
1849
+ return modelFromList;
1850
+ }
1851
+ return {
1852
+ modelVariant: 'CHAT',
1853
+ modelTitle: modelFromApi.id,
1854
+ modelName: modelFromApi.id,
1855
+ modelDescription: '',
1856
+ };
1857
+ });
1858
+ return availableModels;
1859
+ }
1860
+ /**
1861
+ * Calls OpenAI API to use a chat model.
1862
+ */
1863
+ async callChatModel(prompt) {
1864
+ var _a;
1865
+ if (this.options.isVerbose) {
1866
+ console.info('💬 OpenAI callChatModel call', { prompt });
1867
+ }
1868
+ const { content, parameters, modelRequirements, format } = prompt;
1869
+ const client = await this.getClient();
1870
+ // TODO: [☂] Use here more modelRequirements
1871
+ if (modelRequirements.modelVariant !== 'CHAT') {
1872
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1873
+ }
1874
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1875
+ const modelSettings = {
1876
+ model: modelName,
1877
+ max_tokens: modelRequirements.maxTokens,
1878
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1879
+ temperature: modelRequirements.temperature,
1880
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1881
+ // <- Note: [🧆]
1882
+ }; // <- TODO: [💩] Guard here types better
1883
+ if (format === 'JSON') {
1884
+ modelSettings.response_format = {
1885
+ type: 'json_object',
1886
+ };
1887
+ }
1888
+ // <- TODO: [🚸] Not all models are compatible with JSON mode
1889
+ // > 'response_format' of type 'json_object' is not supported with this model.
1890
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1891
+ const rawRequest = {
1892
+ ...modelSettings,
1893
+ messages: [
1894
+ ...(modelRequirements.systemMessage === undefined
1895
+ ? []
1896
+ : [
1897
+ {
1898
+ role: 'system',
1899
+ content: modelRequirements.systemMessage,
1900
+ },
1901
+ ]),
1902
+ {
1903
+ role: 'user',
1904
+ content: rawPromptContent,
1905
+ },
1906
+ ],
1907
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1908
+ };
1909
+ const start = $getCurrentDate();
1910
+ if (this.options.isVerbose) {
1911
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1912
+ }
1913
+ const rawResponse = await this.limiter
1914
+ .schedule(() => client.chat.completions.create(rawRequest))
1915
+ .catch((error) => {
1916
+ assertsError(error);
1917
+ if (this.options.isVerbose) {
1918
+ console.info(colors__default["default"].bgRed('error'), error);
1919
+ }
1920
+ throw error;
1921
+ });
1922
+ if (this.options.isVerbose) {
1923
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1924
+ }
1925
+ const complete = $getCurrentDate();
1926
+ if (!rawResponse.choices[0]) {
1927
+ throw new PipelineExecutionError('No choises from OpenAI');
1928
+ }
1929
+ if (rawResponse.choices.length > 1) {
1930
+ // TODO: This should be maybe only warning
1931
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1932
+ }
1933
+ const resultContent = rawResponse.choices[0].message.content;
1934
+ const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1935
+ if (resultContent === null) {
1936
+ throw new PipelineExecutionError('No response message from OpenAI');
1937
+ }
1938
+ return exportJson({
1939
+ name: 'promptResult',
1940
+ message: `Result of \`OpenAiExecutionTools.callChatModel\``,
1941
+ order: [],
1942
+ value: {
1943
+ content: resultContent,
1944
+ modelName: rawResponse.model || modelName,
1945
+ timing: {
1946
+ start,
1947
+ complete,
1948
+ },
1949
+ usage,
1950
+ rawPromptContent,
1951
+ rawRequest,
1952
+ rawResponse,
1953
+ // <- [🗯]
1954
+ },
1955
+ });
1956
+ }
1957
+ /**
1958
+ * Calls OpenAI API to use a complete model.
1959
+ */
1960
+ async callCompletionModel(prompt) {
1961
+ var _a;
1962
+ if (this.options.isVerbose) {
1963
+ console.info('🖋 OpenAI callCompletionModel call', { prompt });
1964
+ }
1965
+ const { content, parameters, modelRequirements } = prompt;
1966
+ const client = await this.getClient();
1967
+ // TODO: [☂] Use here more modelRequirements
1968
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1969
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1970
+ }
1971
+ const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1972
+ const modelSettings = {
1973
+ model: modelName,
1974
+ max_tokens: modelRequirements.maxTokens || 2000,
1975
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1976
+ temperature: modelRequirements.temperature,
1977
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1978
+ // <- Note: [🧆]
1979
+ };
1980
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1981
+ const rawRequest = {
1982
+ ...modelSettings,
1983
+ prompt: rawPromptContent,
1984
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1985
+ };
1986
+ const start = $getCurrentDate();
1987
+ if (this.options.isVerbose) {
1988
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1989
+ }
1990
+ const rawResponse = await this.limiter
1991
+ .schedule(() => client.completions.create(rawRequest))
1992
+ .catch((error) => {
1993
+ assertsError(error);
1994
+ if (this.options.isVerbose) {
1995
+ console.info(colors__default["default"].bgRed('error'), error);
1996
+ }
1997
+ throw error;
1998
+ });
1999
+ if (this.options.isVerbose) {
2000
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2001
+ }
2002
+ const complete = $getCurrentDate();
2003
+ if (!rawResponse.choices[0]) {
2004
+ throw new PipelineExecutionError('No choises from OpenAI');
2005
+ }
2006
+ if (rawResponse.choices.length > 1) {
2007
+ // TODO: This should be maybe only warning
2008
+ throw new PipelineExecutionError('More than one choise from OpenAI');
2009
+ }
2010
+ const resultContent = rawResponse.choices[0].text;
2011
+ const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
2012
+ return exportJson({
2013
+ name: 'promptResult',
2014
+ message: `Result of \`OpenAiExecutionTools.callCompletionModel\``,
2015
+ order: [],
2016
+ value: {
2017
+ content: resultContent,
2018
+ modelName: rawResponse.model || modelName,
2019
+ timing: {
2020
+ start,
2021
+ complete,
2022
+ },
2023
+ usage,
2024
+ rawPromptContent,
2025
+ rawRequest,
2026
+ rawResponse,
2027
+ // <- [🗯]
2028
+ },
2029
+ });
2030
+ }
2031
+ /**
2032
+ * Calls OpenAI API to use a embedding model
2033
+ */
2034
+ async callEmbeddingModel(prompt) {
2035
+ if (this.options.isVerbose) {
2036
+ console.info('🖋 OpenAI embedding call', { prompt });
2037
+ }
2038
+ const { content, parameters, modelRequirements } = prompt;
2039
+ const client = await this.getClient();
2040
+ // TODO: [☂] Use here more modelRequirements
2041
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
2042
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
2043
+ }
2044
+ const modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
2045
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2046
+ const rawRequest = {
2047
+ input: rawPromptContent,
2048
+ model: modelName,
2049
+ };
2050
+ const start = $getCurrentDate();
2051
+ if (this.options.isVerbose) {
2052
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2053
+ }
2054
+ const rawResponse = await this.limiter
2055
+ .schedule(() => client.embeddings.create(rawRequest))
2056
+ .catch((error) => {
2057
+ assertsError(error);
2058
+ if (this.options.isVerbose) {
2059
+ console.info(colors__default["default"].bgRed('error'), error);
2060
+ }
2061
+ throw error;
2062
+ });
2063
+ if (this.options.isVerbose) {
2064
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2065
+ }
2066
+ const complete = $getCurrentDate();
2067
+ if (rawResponse.data.length !== 1) {
2068
+ throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
2069
+ }
2070
+ const resultContent = rawResponse.data[0].embedding;
2071
+ const usage = computeOpenAiUsage(content || '', '',
2072
+ // <- Note: Embedding does not have result content
2073
+ rawResponse);
2074
+ return exportJson({
2075
+ name: 'promptResult',
2076
+ message: `Result of \`OpenAiExecutionTools.callEmbeddingModel\``,
2077
+ order: [],
2078
+ value: {
2079
+ content: resultContent,
2080
+ modelName: rawResponse.model || modelName,
2081
+ timing: {
2082
+ start,
2083
+ complete,
2084
+ },
2085
+ usage,
2086
+ rawPromptContent,
2087
+ rawRequest,
2088
+ rawResponse,
2089
+ // <- [🗯]
2090
+ },
2091
+ });
2092
+ }
2093
+ // <- Note: [🤖] callXxxModel
2094
+ /**
2095
+ * Get the model that should be used as default
2096
+ */
2097
+ getDefaultModel(defaultModelName) {
2098
+ // Note: Match exact or prefix for model families
2099
+ const model = OPENAI_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
2100
+ if (model === undefined) {
2101
+ throw new UnexpectedError(spaceTrim__default["default"]((block) => `
2102
+ Cannot find model in OpenAI models with name "${defaultModelName}" which should be used as default.
2103
+
2104
+ Available models:
2105
+ ${block(OPENAI_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
2106
+
2107
+ `));
2108
+ }
2109
+ return model;
2110
+ }
2111
+ /**
2112
+ * Default model for chat variant.
2113
+ */
2114
+ getDefaultChatModel() {
2115
+ return this.getDefaultModel('gpt-4o');
2116
+ }
2117
+ /**
2118
+ * Default model for completion variant.
2119
+ */
2120
+ getDefaultCompletionModel() {
2121
+ return this.getDefaultModel('gpt-3.5-turbo-instruct');
2122
+ }
2123
+ /**
2124
+ * Default model for completion variant.
2125
+ */
2126
+ getDefaultEmbeddingModel() {
2127
+ return this.getDefaultModel('text-embedding-3-large');
2128
+ }
2129
+ }
2130
+ /**
2131
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2132
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2133
+ * TODO: Maybe make custom OpenAiError
2134
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2135
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2136
+ */
2137
+
2138
+ /**
2139
+ * Execution Tools for calling OpenAI API
2140
+ *
2141
+ * Note: This can be also used for other OpenAI compatible APIs, like Ollama
2142
+ *
2143
+ * @public exported from `@promptbook/openai`
2144
+ */
2145
+ const createOpenAiExecutionTools = Object.assign((options) => {
2146
+ // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2147
+ if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2148
+ options = { ...options, dangerouslyAllowBrowser: true };
2149
+ }
2150
+ return new OpenAiExecutionTools(options);
2151
+ }, {
2152
+ packageName: '@promptbook/openai',
2153
+ className: 'OpenAiExecutionTools',
2154
+ });
2155
+ /**
2156
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
2157
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
2158
+ */
2159
+
2160
+ /**
2161
+ * Default base URL for Ollama API
2162
+ *
2163
+ * @public exported from `@promptbook/ollama`
2164
+ */
2165
+ const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'; // <- TODO: !!!! What is the correct base URL? /v1?
2166
+
559
2167
  /**
560
2168
  * Execution Tools for calling Ollama API
561
2169
  *
562
2170
  * @public exported from `@promptbook/ollama`
563
2171
  */
564
- const createOllamaExecutionTools = Object.assign((options) => new OllamaExecutionTools(options), {
2172
+ const createOllamaExecutionTools = Object.assign((ollamaOptions) => {
2173
+ const openAiCompatibleOptions = {
2174
+ baseURL: DEFAULT_OLLAMA_BASE_URL,
2175
+ ...ollamaOptions,
2176
+ userId: 'ollama',
2177
+ };
2178
+ // TODO: !!!! Listing the models - do it dynamically in OpenAiExecutionTools
2179
+ // TODO: !!!! Do not allow to create Assistant from OpenAi compatible tools
2180
+ return createOpenAiExecutionTools(openAiCompatibleOptions);
2181
+ }, {
565
2182
  packageName: '@promptbook/ollama',
566
2183
  className: 'OllamaExecutionTools',
567
2184
  });
@@ -745,7 +2362,7 @@
745
2362
  */
746
2363
 
747
2364
  exports.BOOK_LANGUAGE_VERSION = BOOK_LANGUAGE_VERSION;
748
- exports.OllamaExecutionTools = OllamaExecutionTools;
2365
+ exports.DEFAULT_OLLAMA_BASE_URL = DEFAULT_OLLAMA_BASE_URL;
749
2366
  exports.PROMPTBOOK_ENGINE_VERSION = PROMPTBOOK_ENGINE_VERSION;
750
2367
  exports._OllamaRegistration = _OllamaRegistration;
751
2368
  exports.createOllamaExecutionTools = createOllamaExecutionTools;