@promptbook/ollama 0.94.0-3 → 0.94.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -1,8 +1,8 @@
1
+ import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
2
+ import { randomBytes } from 'crypto';
1
3
  import Bottleneck from 'bottleneck';
2
4
  import colors from 'colors';
3
5
  import OpenAI from 'openai';
4
- import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
5
- import { randomBytes } from 'crypto';
6
6
 
7
7
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
8
8
  /**
@@ -18,51 +18,121 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
18
18
  * @generated
19
19
  * @see https://github.com/webgptorg/promptbook
20
20
  */
21
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-3';
21
+ const PROMPTBOOK_ENGINE_VERSION = '0.94.0-4';
22
22
  /**
23
23
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
24
24
  * Note: [💞] Ignore a discrepancy between file name and entity name
25
25
  */
26
26
 
27
27
  /**
28
- * Detects if the code is running in a browser environment in main thread (Not in a web worker)
28
+ * Freezes the given object and all its nested objects recursively
29
29
  *
30
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
30
+ * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
31
+ * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
31
32
  *
33
+ * @returns The same object as the input, but deeply frozen
32
34
  * @public exported from `@promptbook/utils`
33
35
  */
34
- const $isRunningInBrowser = new Function(`
35
- try {
36
- return this === window;
37
- } catch (e) {
38
- return false;
36
+ function $deepFreeze(objectValue) {
37
+ if (Array.isArray(objectValue)) {
38
+ return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
39
+ }
40
+ const propertyNames = Object.getOwnPropertyNames(objectValue);
41
+ for (const propertyName of propertyNames) {
42
+ const value = objectValue[propertyName];
43
+ if (value && typeof value === 'object') {
44
+ $deepFreeze(value);
45
+ }
39
46
  }
40
- `);
47
+ Object.freeze(objectValue);
48
+ return objectValue;
49
+ }
41
50
  /**
42
- * TODO: [🎺]
51
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
43
52
  */
44
53
 
45
54
  /**
46
- * Detects if the code is running in a web worker
55
+ * Represents the uncertain value
47
56
  *
48
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
57
+ * @public exported from `@promptbook/core`
58
+ */
59
+ const ZERO_VALUE = $deepFreeze({ value: 0 });
60
+ /**
61
+ * Represents the uncertain value
49
62
  *
50
- * @public exported from `@promptbook/utils`
63
+ * @public exported from `@promptbook/core`
51
64
  */
52
- const $isRunningInWebWorker = new Function(`
53
- try {
54
- if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
55
- return true;
56
- } else {
57
- return false;
58
- }
59
- } catch (e) {
60
- return false;
61
- }
62
- `);
65
+ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
66
+ /**
67
+ * Represents the usage with no resources consumed
68
+ *
69
+ * @public exported from `@promptbook/core`
70
+ */
71
+ $deepFreeze({
72
+ price: ZERO_VALUE,
73
+ input: {
74
+ tokensCount: ZERO_VALUE,
75
+ charactersCount: ZERO_VALUE,
76
+ wordsCount: ZERO_VALUE,
77
+ sentencesCount: ZERO_VALUE,
78
+ linesCount: ZERO_VALUE,
79
+ paragraphsCount: ZERO_VALUE,
80
+ pagesCount: ZERO_VALUE,
81
+ },
82
+ output: {
83
+ tokensCount: ZERO_VALUE,
84
+ charactersCount: ZERO_VALUE,
85
+ wordsCount: ZERO_VALUE,
86
+ sentencesCount: ZERO_VALUE,
87
+ linesCount: ZERO_VALUE,
88
+ paragraphsCount: ZERO_VALUE,
89
+ pagesCount: ZERO_VALUE,
90
+ },
91
+ });
92
+ /**
93
+ * Represents the usage with unknown resources consumed
94
+ *
95
+ * @public exported from `@promptbook/core`
96
+ */
97
+ $deepFreeze({
98
+ price: UNCERTAIN_ZERO_VALUE,
99
+ input: {
100
+ tokensCount: UNCERTAIN_ZERO_VALUE,
101
+ charactersCount: UNCERTAIN_ZERO_VALUE,
102
+ wordsCount: UNCERTAIN_ZERO_VALUE,
103
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
104
+ linesCount: UNCERTAIN_ZERO_VALUE,
105
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
106
+ pagesCount: UNCERTAIN_ZERO_VALUE,
107
+ },
108
+ output: {
109
+ tokensCount: UNCERTAIN_ZERO_VALUE,
110
+ charactersCount: UNCERTAIN_ZERO_VALUE,
111
+ wordsCount: UNCERTAIN_ZERO_VALUE,
112
+ sentencesCount: UNCERTAIN_ZERO_VALUE,
113
+ linesCount: UNCERTAIN_ZERO_VALUE,
114
+ paragraphsCount: UNCERTAIN_ZERO_VALUE,
115
+ pagesCount: UNCERTAIN_ZERO_VALUE,
116
+ },
117
+ });
118
+ /**
119
+ * Note: [💞] Ignore a discrepancy between file name and entity name
120
+ */
121
+
63
122
  /**
64
- * TODO: [🎺]
123
+ * Orders JSON object by keys
124
+ *
125
+ * @returns The same type of object as the input re-ordered
126
+ * @public exported from `@promptbook/utils`
65
127
  */
128
+ function orderJson(options) {
129
+ const { value, order } = options;
130
+ const orderedValue = {
131
+ ...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
132
+ ...value,
133
+ };
134
+ return orderedValue;
135
+ }
66
136
 
67
137
  /**
68
138
  * Name for the Promptbook
@@ -253,94 +323,6 @@ function assertsError(whatWasThrown) {
253
323
  throw new WrappedError(whatWasThrown);
254
324
  }
255
325
 
256
- /**
257
- * Generates random token
258
- *
259
- * Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
260
- *
261
- * @private internal helper function
262
- * @returns secure random token
263
- */
264
- function $randomToken(randomness) {
265
- return randomBytes(randomness).toString('hex');
266
- }
267
- /**
268
- * TODO: Maybe use nanoid instead https://github.com/ai/nanoid
269
- */
270
-
271
- /**
272
- * This error indicates errors during the execution of the pipeline
273
- *
274
- * @public exported from `@promptbook/core`
275
- */
276
- class PipelineExecutionError extends Error {
277
- constructor(message) {
278
- // Added id parameter
279
- super(message);
280
- this.name = 'PipelineExecutionError';
281
- // TODO: [🐙] DRY - Maybe $randomId
282
- this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
283
- Object.setPrototypeOf(this, PipelineExecutionError.prototype);
284
- }
285
- }
286
- /**
287
- * TODO: [🧠][🌂] Add id to all errors
288
- */
289
-
290
- /**
291
- * Simple wrapper `new Date().toISOString()`
292
- *
293
- * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
294
- *
295
- * @returns string_date branded type
296
- * @public exported from `@promptbook/utils`
297
- */
298
- function $getCurrentDate() {
299
- return new Date().toISOString();
300
- }
301
-
302
- /**
303
- * Orders JSON object by keys
304
- *
305
- * @returns The same type of object as the input re-ordered
306
- * @public exported from `@promptbook/utils`
307
- */
308
- function orderJson(options) {
309
- const { value, order } = options;
310
- const orderedValue = {
311
- ...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
312
- ...value,
313
- };
314
- return orderedValue;
315
- }
316
-
317
- /**
318
- * Freezes the given object and all its nested objects recursively
319
- *
320
- * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
321
- * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
322
- *
323
- * @returns The same object as the input, but deeply frozen
324
- * @public exported from `@promptbook/utils`
325
- */
326
- function $deepFreeze(objectValue) {
327
- if (Array.isArray(objectValue)) {
328
- return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
329
- }
330
- const propertyNames = Object.getOwnPropertyNames(objectValue);
331
- for (const propertyName of propertyNames) {
332
- const value = objectValue[propertyName];
333
- if (value && typeof value === 'object') {
334
- $deepFreeze(value);
335
- }
336
- }
337
- Object.freeze(objectValue);
338
- return objectValue;
339
- }
340
- /**
341
- * TODO: [🧠] Is there a way how to meaningfully test this utility
342
- */
343
-
344
326
  /**
345
327
  * Checks if the value is [🚉] serializable as JSON
346
328
  * If not, throws an UnexpectedError with a rich error message and tracking
@@ -589,187 +571,56 @@ exportJson({
589
571
  */
590
572
 
591
573
  /**
592
- * This error type indicates that some limit was reached
574
+ * Generates random token
575
+ *
576
+ * Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
577
+ *
578
+ * @private internal helper function
579
+ * @returns secure random token
580
+ */
581
+ function $randomToken(randomness) {
582
+ return randomBytes(randomness).toString('hex');
583
+ }
584
+ /**
585
+ * TODO: Maybe use nanoid instead https://github.com/ai/nanoid
586
+ */
587
+
588
+ /**
589
+ * This error indicates errors during the execution of the pipeline
593
590
  *
594
591
  * @public exported from `@promptbook/core`
595
592
  */
596
- class LimitReachedError extends Error {
593
+ class PipelineExecutionError extends Error {
597
594
  constructor(message) {
595
+ // Added id parameter
598
596
  super(message);
599
- this.name = 'LimitReachedError';
600
- Object.setPrototypeOf(this, LimitReachedError.prototype);
597
+ this.name = 'PipelineExecutionError';
598
+ // TODO: [🐙] DRY - Maybe $randomId
599
+ this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
600
+ Object.setPrototypeOf(this, PipelineExecutionError.prototype);
601
601
  }
602
602
  }
603
+ /**
604
+ * TODO: [🧠][🌂] Add id to all errors
605
+ */
603
606
 
604
607
  /**
605
- * Format either small or big number
608
+ * Counts number of characters in the text
606
609
  *
607
610
  * @public exported from `@promptbook/utils`
608
611
  */
609
- function numberToString(value) {
610
- if (value === 0) {
611
- return '0';
612
- }
613
- else if (Number.isNaN(value)) {
614
- return VALUE_STRINGS.nan;
615
- }
616
- else if (value === Infinity) {
617
- return VALUE_STRINGS.infinity;
618
- }
619
- else if (value === -Infinity) {
620
- return VALUE_STRINGS.negativeInfinity;
621
- }
622
- for (let exponent = 0; exponent < 15; exponent++) {
623
- const factor = 10 ** exponent;
624
- const valueRounded = Math.round(value * factor) / factor;
625
- if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
626
- return valueRounded.toFixed(exponent);
627
- }
628
- }
629
- return value.toString();
612
+ function countCharacters(text) {
613
+ // Remove null characters
614
+ text = text.replace(/\0/g, '');
615
+ // Replace emojis (and also ZWJ sequence) with hyphens
616
+ text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
617
+ text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
618
+ text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
619
+ return text.length;
630
620
  }
631
-
632
- /**
633
- * Function `valueToString` will convert the given value to string
634
- * This is useful and used in the `templateParameters` function
635
- *
636
- * Note: This function is not just calling `toString` method
637
- * It's more complex and can handle this conversion specifically for LLM models
638
- * See `VALUE_STRINGS`
639
- *
640
- * Note: There are 2 similar functions
641
- * - `valueToString` converts value to string for LLM models as human-readable string
642
- * - `asSerializable` converts value to string to preserve full information to be able to convert it back
643
- *
644
- * @public exported from `@promptbook/utils`
645
- */
646
- function valueToString(value) {
647
- try {
648
- if (value === '') {
649
- return VALUE_STRINGS.empty;
650
- }
651
- else if (value === null) {
652
- return VALUE_STRINGS.null;
653
- }
654
- else if (value === undefined) {
655
- return VALUE_STRINGS.undefined;
656
- }
657
- else if (typeof value === 'string') {
658
- return value;
659
- }
660
- else if (typeof value === 'number') {
661
- return numberToString(value);
662
- }
663
- else if (value instanceof Date) {
664
- return value.toISOString();
665
- }
666
- else {
667
- try {
668
- return JSON.stringify(value);
669
- }
670
- catch (error) {
671
- if (error instanceof TypeError && error.message.includes('circular structure')) {
672
- return VALUE_STRINGS.circular;
673
- }
674
- throw error;
675
- }
676
- }
677
- }
678
- catch (error) {
679
- assertsError(error);
680
- console.error(error);
681
- return VALUE_STRINGS.unserializable;
682
- }
683
- }
684
-
685
- /**
686
- * Replaces parameters in template with values from parameters object
687
- *
688
- * Note: This function is not places strings into string,
689
- * It's more complex and can handle this operation specifically for LLM models
690
- *
691
- * @param template the template with parameters in {curly} braces
692
- * @param parameters the object with parameters
693
- * @returns the template with replaced parameters
694
- * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
695
- * @public exported from `@promptbook/utils`
696
- */
697
- function templateParameters(template, parameters) {
698
- for (const [parameterName, parameterValue] of Object.entries(parameters)) {
699
- if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
700
- throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
701
- }
702
- else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
703
- // TODO: [🍵]
704
- throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
705
- }
706
- }
707
- let replacedTemplates = template;
708
- let match;
709
- let loopLimit = LOOP_LIMIT;
710
- while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
711
- .exec(replacedTemplates))) {
712
- if (loopLimit-- < 0) {
713
- throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
714
- }
715
- const precol = match.groups.precol;
716
- const parameterName = match.groups.parameterName;
717
- if (parameterName === '') {
718
- // Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
719
- continue;
720
- }
721
- if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
722
- throw new PipelineExecutionError('Parameter is already opened or not closed');
723
- }
724
- if (parameters[parameterName] === undefined) {
725
- throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
726
- }
727
- let parameterValue = parameters[parameterName];
728
- if (parameterValue === undefined) {
729
- throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
730
- }
731
- parameterValue = valueToString(parameterValue);
732
- // Escape curly braces in parameter values to prevent prompt-injection
733
- parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
734
- if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
735
- parameterValue = parameterValue
736
- .split('\n')
737
- .map((line, index) => (index === 0 ? line : `${precol}${line}`))
738
- .join('\n');
739
- }
740
- replacedTemplates =
741
- replacedTemplates.substring(0, match.index + precol.length) +
742
- parameterValue +
743
- replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
744
- }
745
- // [💫] Check if there are parameters that are not closed properly
746
- if (/{\w+$/.test(replacedTemplates)) {
747
- throw new PipelineExecutionError('Parameter is not closed');
748
- }
749
- // [💫] Check if there are parameters that are not opened properly
750
- if (/^\w+}/.test(replacedTemplates)) {
751
- throw new PipelineExecutionError('Parameter is not opened');
752
- }
753
- return replacedTemplates;
754
- }
755
-
756
- /**
757
- * Counts number of characters in the text
758
- *
759
- * @public exported from `@promptbook/utils`
760
- */
761
- function countCharacters(text) {
762
- // Remove null characters
763
- text = text.replace(/\0/g, '');
764
- // Replace emojis (and also ZWJ sequence) with hyphens
765
- text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
766
- text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
767
- text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
768
- return text.length;
769
- }
770
- /**
771
- * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
772
- */
621
+ /**
622
+ * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
623
+ */
773
624
 
774
625
  /**
775
626
  * Number of characters per standard line with 11pt Arial font size.
@@ -1147,74 +998,6 @@ function computeUsageCounts(content) {
1147
998
  };
1148
999
  }
1149
1000
 
1150
- /**
1151
- * Represents the uncertain value
1152
- *
1153
- * @public exported from `@promptbook/core`
1154
- */
1155
- const ZERO_VALUE = $deepFreeze({ value: 0 });
1156
- /**
1157
- * Represents the uncertain value
1158
- *
1159
- * @public exported from `@promptbook/core`
1160
- */
1161
- const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
1162
- /**
1163
- * Represents the usage with no resources consumed
1164
- *
1165
- * @public exported from `@promptbook/core`
1166
- */
1167
- $deepFreeze({
1168
- price: ZERO_VALUE,
1169
- input: {
1170
- tokensCount: ZERO_VALUE,
1171
- charactersCount: ZERO_VALUE,
1172
- wordsCount: ZERO_VALUE,
1173
- sentencesCount: ZERO_VALUE,
1174
- linesCount: ZERO_VALUE,
1175
- paragraphsCount: ZERO_VALUE,
1176
- pagesCount: ZERO_VALUE,
1177
- },
1178
- output: {
1179
- tokensCount: ZERO_VALUE,
1180
- charactersCount: ZERO_VALUE,
1181
- wordsCount: ZERO_VALUE,
1182
- sentencesCount: ZERO_VALUE,
1183
- linesCount: ZERO_VALUE,
1184
- paragraphsCount: ZERO_VALUE,
1185
- pagesCount: ZERO_VALUE,
1186
- },
1187
- });
1188
- /**
1189
- * Represents the usage with unknown resources consumed
1190
- *
1191
- * @public exported from `@promptbook/core`
1192
- */
1193
- $deepFreeze({
1194
- price: UNCERTAIN_ZERO_VALUE,
1195
- input: {
1196
- tokensCount: UNCERTAIN_ZERO_VALUE,
1197
- charactersCount: UNCERTAIN_ZERO_VALUE,
1198
- wordsCount: UNCERTAIN_ZERO_VALUE,
1199
- sentencesCount: UNCERTAIN_ZERO_VALUE,
1200
- linesCount: UNCERTAIN_ZERO_VALUE,
1201
- paragraphsCount: UNCERTAIN_ZERO_VALUE,
1202
- pagesCount: UNCERTAIN_ZERO_VALUE,
1203
- },
1204
- output: {
1205
- tokensCount: UNCERTAIN_ZERO_VALUE,
1206
- charactersCount: UNCERTAIN_ZERO_VALUE,
1207
- wordsCount: UNCERTAIN_ZERO_VALUE,
1208
- sentencesCount: UNCERTAIN_ZERO_VALUE,
1209
- linesCount: UNCERTAIN_ZERO_VALUE,
1210
- paragraphsCount: UNCERTAIN_ZERO_VALUE,
1211
- pagesCount: UNCERTAIN_ZERO_VALUE,
1212
- },
1213
- });
1214
- /**
1215
- * Note: [💞] Ignore a discrepancy between file name and entity name
1216
- */
1217
-
1218
1001
  /**
1219
1002
  * Make UncertainNumber
1220
1003
  *
@@ -1234,11 +1017,11 @@ function uncertainNumber(value, isUncertain) {
1234
1017
  }
1235
1018
 
1236
1019
  /**
1237
- * Function computeUsage will create price per one token based on the string value found on openai page
1020
+ * Create price per one token based on the string value found on openai page
1238
1021
  *
1239
1022
  * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1240
1023
  */
1241
- function computeUsage(value) {
1024
+ function pricing(value) {
1242
1025
  const [price, tokens] = value.split(' / ');
1243
1026
  return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1244
1027
  }
@@ -1274,8 +1057,8 @@ const OPENAI_MODELS = exportJson({
1274
1057
  modelName: 'davinci-002',
1275
1058
  modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
1276
1059
  pricing: {
1277
- prompt: computeUsage(`$2.00 / 1M tokens`),
1278
- output: computeUsage(`$2.00 / 1M tokens`),
1060
+ prompt: pricing(`$2.00 / 1M tokens`),
1061
+ output: pricing(`$2.00 / 1M tokens`),
1279
1062
  },
1280
1063
  },
1281
1064
  /**/
@@ -1292,8 +1075,8 @@ const OPENAI_MODELS = exportJson({
1292
1075
  modelName: 'gpt-3.5-turbo-16k',
1293
1076
  modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
1294
1077
  pricing: {
1295
- prompt: computeUsage(`$3.00 / 1M tokens`),
1296
- output: computeUsage(`$4.00 / 1M tokens`),
1078
+ prompt: pricing(`$3.00 / 1M tokens`),
1079
+ output: pricing(`$4.00 / 1M tokens`),
1297
1080
  },
1298
1081
  },
1299
1082
  /**/
@@ -1316,8 +1099,8 @@ const OPENAI_MODELS = exportJson({
1316
1099
  modelName: 'gpt-4',
1317
1100
  modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
1318
1101
  pricing: {
1319
- prompt: computeUsage(`$30.00 / 1M tokens`),
1320
- output: computeUsage(`$60.00 / 1M tokens`),
1102
+ prompt: pricing(`$30.00 / 1M tokens`),
1103
+ output: pricing(`$60.00 / 1M tokens`),
1321
1104
  },
1322
1105
  },
1323
1106
  /**/
@@ -1328,8 +1111,8 @@ const OPENAI_MODELS = exportJson({
1328
1111
  modelName: 'gpt-4-32k',
1329
1112
  modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
1330
1113
  pricing: {
1331
- prompt: computeUsage(`$60.00 / 1M tokens`),
1332
- output: computeUsage(`$120.00 / 1M tokens`),
1114
+ prompt: pricing(`$60.00 / 1M tokens`),
1115
+ output: pricing(`$120.00 / 1M tokens`),
1333
1116
  },
1334
1117
  },
1335
1118
  /**/
@@ -1351,8 +1134,8 @@ const OPENAI_MODELS = exportJson({
1351
1134
  modelName: 'gpt-4-turbo-2024-04-09',
1352
1135
  modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
1353
1136
  pricing: {
1354
- prompt: computeUsage(`$10.00 / 1M tokens`),
1355
- output: computeUsage(`$30.00 / 1M tokens`),
1137
+ prompt: pricing(`$10.00 / 1M tokens`),
1138
+ output: pricing(`$30.00 / 1M tokens`),
1356
1139
  },
1357
1140
  },
1358
1141
  /**/
@@ -1363,8 +1146,8 @@ const OPENAI_MODELS = exportJson({
1363
1146
  modelName: 'gpt-3.5-turbo-1106',
1364
1147
  modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
1365
1148
  pricing: {
1366
- prompt: computeUsage(`$1.00 / 1M tokens`),
1367
- output: computeUsage(`$2.00 / 1M tokens`),
1149
+ prompt: pricing(`$1.00 / 1M tokens`),
1150
+ output: pricing(`$2.00 / 1M tokens`),
1368
1151
  },
1369
1152
  },
1370
1153
  /**/
@@ -1375,8 +1158,8 @@ const OPENAI_MODELS = exportJson({
1375
1158
  modelName: 'gpt-4-turbo',
1376
1159
  modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
1377
1160
  pricing: {
1378
- prompt: computeUsage(`$10.00 / 1M tokens`),
1379
- output: computeUsage(`$30.00 / 1M tokens`),
1161
+ prompt: pricing(`$10.00 / 1M tokens`),
1162
+ output: pricing(`$30.00 / 1M tokens`),
1380
1163
  },
1381
1164
  },
1382
1165
  /**/
@@ -1387,8 +1170,8 @@ const OPENAI_MODELS = exportJson({
1387
1170
  modelName: 'gpt-3.5-turbo-instruct-0914',
1388
1171
  modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
1389
1172
  pricing: {
1390
- prompt: computeUsage(`$1.50 / 1M tokens`),
1391
- output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1173
+ prompt: pricing(`$1.50 / 1M tokens`),
1174
+ output: pricing(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1392
1175
  },
1393
1176
  },
1394
1177
  /**/
@@ -1399,8 +1182,8 @@ const OPENAI_MODELS = exportJson({
1399
1182
  modelName: 'gpt-3.5-turbo-instruct',
1400
1183
  modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
1401
1184
  pricing: {
1402
- prompt: computeUsage(`$1.50 / 1M tokens`),
1403
- output: computeUsage(`$2.00 / 1M tokens`),
1185
+ prompt: pricing(`$1.50 / 1M tokens`),
1186
+ output: pricing(`$2.00 / 1M tokens`),
1404
1187
  },
1405
1188
  },
1406
1189
  /**/
@@ -1417,8 +1200,8 @@ const OPENAI_MODELS = exportJson({
1417
1200
  modelName: 'gpt-3.5-turbo',
1418
1201
  modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
1419
1202
  pricing: {
1420
- prompt: computeUsage(`$0.50 / 1M tokens`),
1421
- output: computeUsage(`$1.50 / 1M tokens`),
1203
+ prompt: pricing(`$0.50 / 1M tokens`),
1204
+ output: pricing(`$1.50 / 1M tokens`),
1422
1205
  },
1423
1206
  },
1424
1207
  /**/
@@ -1429,8 +1212,8 @@ const OPENAI_MODELS = exportJson({
1429
1212
  modelName: 'gpt-3.5-turbo-0301',
1430
1213
  modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
1431
1214
  pricing: {
1432
- prompt: computeUsage(`$1.50 / 1M tokens`),
1433
- output: computeUsage(`$2.00 / 1M tokens`),
1215
+ prompt: pricing(`$1.50 / 1M tokens`),
1216
+ output: pricing(`$2.00 / 1M tokens`),
1434
1217
  },
1435
1218
  },
1436
1219
  /**/
@@ -1441,8 +1224,8 @@ const OPENAI_MODELS = exportJson({
1441
1224
  modelName: 'babbage-002',
1442
1225
  modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
1443
1226
  pricing: {
1444
- prompt: computeUsage(`$0.40 / 1M tokens`),
1445
- output: computeUsage(`$0.40 / 1M tokens`),
1227
+ prompt: pricing(`$0.40 / 1M tokens`),
1228
+ output: pricing(`$0.40 / 1M tokens`),
1446
1229
  },
1447
1230
  },
1448
1231
  /**/
@@ -1453,8 +1236,8 @@ const OPENAI_MODELS = exportJson({
1453
1236
  modelName: 'gpt-4-1106-preview',
1454
1237
  modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
1455
1238
  pricing: {
1456
- prompt: computeUsage(`$10.00 / 1M tokens`),
1457
- output: computeUsage(`$30.00 / 1M tokens`),
1239
+ prompt: pricing(`$10.00 / 1M tokens`),
1240
+ output: pricing(`$30.00 / 1M tokens`),
1458
1241
  },
1459
1242
  },
1460
1243
  /**/
@@ -1465,8 +1248,8 @@ const OPENAI_MODELS = exportJson({
1465
1248
  modelName: 'gpt-4-0125-preview',
1466
1249
  modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
1467
1250
  pricing: {
1468
- prompt: computeUsage(`$10.00 / 1M tokens`),
1469
- output: computeUsage(`$30.00 / 1M tokens`),
1251
+ prompt: pricing(`$10.00 / 1M tokens`),
1252
+ output: pricing(`$30.00 / 1M tokens`),
1470
1253
  },
1471
1254
  },
1472
1255
  /**/
@@ -1483,8 +1266,8 @@ const OPENAI_MODELS = exportJson({
1483
1266
  modelName: 'gpt-3.5-turbo-0125',
1484
1267
  modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
1485
1268
  pricing: {
1486
- prompt: computeUsage(`$0.50 / 1M tokens`),
1487
- output: computeUsage(`$1.50 / 1M tokens`),
1269
+ prompt: pricing(`$0.50 / 1M tokens`),
1270
+ output: pricing(`$1.50 / 1M tokens`),
1488
1271
  },
1489
1272
  },
1490
1273
  /**/
@@ -1495,8 +1278,8 @@ const OPENAI_MODELS = exportJson({
1495
1278
  modelName: 'gpt-4-turbo-preview',
1496
1279
  modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
1497
1280
  pricing: {
1498
- prompt: computeUsage(`$10.00 / 1M tokens`),
1499
- output: computeUsage(`$30.00 / 1M tokens`),
1281
+ prompt: pricing(`$10.00 / 1M tokens`),
1282
+ output: pricing(`$30.00 / 1M tokens`),
1500
1283
  },
1501
1284
  },
1502
1285
  /**/
@@ -1507,7 +1290,7 @@ const OPENAI_MODELS = exportJson({
1507
1290
  modelName: 'text-embedding-3-large',
1508
1291
  modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
1509
1292
  pricing: {
1510
- prompt: computeUsage(`$0.13 / 1M tokens`),
1293
+ prompt: pricing(`$0.13 / 1M tokens`),
1511
1294
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1512
1295
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1513
1296
  },
@@ -1520,7 +1303,7 @@ const OPENAI_MODELS = exportJson({
1520
1303
  modelName: 'text-embedding-3-small',
1521
1304
  modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
1522
1305
  pricing: {
1523
- prompt: computeUsage(`$0.02 / 1M tokens`),
1306
+ prompt: pricing(`$0.02 / 1M tokens`),
1524
1307
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1525
1308
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1526
1309
  },
@@ -1533,8 +1316,8 @@ const OPENAI_MODELS = exportJson({
1533
1316
  modelName: 'gpt-3.5-turbo-0613',
1534
1317
  modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
1535
1318
  pricing: {
1536
- prompt: computeUsage(`$1.50 / 1M tokens`),
1537
- output: computeUsage(`$2.00 / 1M tokens`),
1319
+ prompt: pricing(`$1.50 / 1M tokens`),
1320
+ output: pricing(`$2.00 / 1M tokens`),
1538
1321
  },
1539
1322
  },
1540
1323
  /**/
@@ -1545,7 +1328,7 @@ const OPENAI_MODELS = exportJson({
1545
1328
  modelName: 'text-embedding-ada-002',
1546
1329
  modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
1547
1330
  pricing: {
1548
- prompt: computeUsage(`$0.1 / 1M tokens`),
1331
+ prompt: pricing(`$0.1 / 1M tokens`),
1549
1332
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1550
1333
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1551
1334
  },
@@ -1576,8 +1359,8 @@ const OPENAI_MODELS = exportJson({
1576
1359
  modelName: 'gpt-4o-2024-05-13',
1577
1360
  modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
1578
1361
  pricing: {
1579
- prompt: computeUsage(`$5.00 / 1M tokens`),
1580
- output: computeUsage(`$15.00 / 1M tokens`),
1362
+ prompt: pricing(`$5.00 / 1M tokens`),
1363
+ output: pricing(`$15.00 / 1M tokens`),
1581
1364
  },
1582
1365
  },
1583
1366
  /**/
@@ -1588,8 +1371,8 @@ const OPENAI_MODELS = exportJson({
1588
1371
  modelName: 'gpt-4o',
1589
1372
  modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
1590
1373
  pricing: {
1591
- prompt: computeUsage(`$5.00 / 1M tokens`),
1592
- output: computeUsage(`$15.00 / 1M tokens`),
1374
+ prompt: pricing(`$5.00 / 1M tokens`),
1375
+ output: pricing(`$15.00 / 1M tokens`),
1593
1376
  },
1594
1377
  },
1595
1378
  /**/
@@ -1600,8 +1383,8 @@ const OPENAI_MODELS = exportJson({
1600
1383
  modelName: 'gpt-4o-mini',
1601
1384
  modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
1602
1385
  pricing: {
1603
- prompt: computeUsage(`$0.15 / 1M tokens`),
1604
- output: computeUsage(`$0.60 / 1M tokens`),
1386
+ prompt: pricing(`$0.15 / 1M tokens`),
1387
+ output: pricing(`$0.60 / 1M tokens`),
1605
1388
  },
1606
1389
  },
1607
1390
  /**/
@@ -1612,8 +1395,8 @@ const OPENAI_MODELS = exportJson({
1612
1395
  modelName: 'o1-preview',
1613
1396
  modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
1614
1397
  pricing: {
1615
- prompt: computeUsage(`$15.00 / 1M tokens`),
1616
- output: computeUsage(`$60.00 / 1M tokens`),
1398
+ prompt: pricing(`$15.00 / 1M tokens`),
1399
+ output: pricing(`$60.00 / 1M tokens`),
1617
1400
  },
1618
1401
  },
1619
1402
  /**/
@@ -1625,8 +1408,8 @@ const OPENAI_MODELS = exportJson({
1625
1408
  modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
1626
1409
  // <- TODO: [💩] Some better system to organize these date suffixes and versions
1627
1410
  pricing: {
1628
- prompt: computeUsage(`$15.00 / 1M tokens`),
1629
- output: computeUsage(`$60.00 / 1M tokens`),
1411
+ prompt: pricing(`$15.00 / 1M tokens`),
1412
+ output: pricing(`$60.00 / 1M tokens`),
1630
1413
  },
1631
1414
  },
1632
1415
  /**/
@@ -1637,8 +1420,8 @@ const OPENAI_MODELS = exportJson({
1637
1420
  modelName: 'o1-mini',
1638
1421
  modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
1639
1422
  pricing: {
1640
- prompt: computeUsage(`$3.00 / 1M tokens`),
1641
- output: computeUsage(`$12.00 / 1M tokens`),
1423
+ prompt: pricing(`$3.00 / 1M tokens`),
1424
+ output: pricing(`$12.00 / 1M tokens`),
1642
1425
  },
1643
1426
  },
1644
1427
  /**/
@@ -1649,8 +1432,8 @@ const OPENAI_MODELS = exportJson({
1649
1432
  modelName: 'o1',
1650
1433
  modelDescription: "OpenAI's advanced reasoning model focused on logic and problem-solving. Designed for complex analytical tasks with rigorous step-by-step reasoning. 128K context window.",
1651
1434
  pricing: {
1652
- prompt: computeUsage(`$15.00 / 1M tokens`),
1653
- output: computeUsage(`$60.00 / 1M tokens`),
1435
+ prompt: pricing(`$15.00 / 1M tokens`),
1436
+ output: pricing(`$60.00 / 1M tokens`),
1654
1437
  },
1655
1438
  },
1656
1439
  /**/
@@ -1661,8 +1444,8 @@ const OPENAI_MODELS = exportJson({
1661
1444
  modelName: 'o3-mini',
1662
1445
  modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving. Efficient performance on STEM tasks with deep mathematical and scientific knowledge. 128K context window.',
1663
1446
  pricing: {
1664
- prompt: computeUsage(`$3.00 / 1M tokens`),
1665
- output: computeUsage(`$12.00 / 1M tokens`),
1447
+ prompt: pricing(`$3.00 / 1M tokens`),
1448
+ output: pricing(`$12.00 / 1M tokens`),
1666
1449
  // <- TODO: !! Unsure, check the pricing
1667
1450
  },
1668
1451
  },
@@ -1674,8 +1457,8 @@ const OPENAI_MODELS = exportJson({
1674
1457
  modelName: 'o1-mini-2024-09-12',
1675
1458
  modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
1676
1459
  pricing: {
1677
- prompt: computeUsage(`$3.00 / 1M tokens`),
1678
- output: computeUsage(`$12.00 / 1M tokens`),
1460
+ prompt: pricing(`$3.00 / 1M tokens`),
1461
+ output: pricing(`$12.00 / 1M tokens`),
1679
1462
  },
1680
1463
  },
1681
1464
  /**/
@@ -1686,8 +1469,8 @@ const OPENAI_MODELS = exportJson({
1686
1469
  modelName: 'gpt-3.5-turbo-16k-0613',
1687
1470
  modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
1688
1471
  pricing: {
1689
- prompt: computeUsage(`$3.00 / 1M tokens`),
1690
- output: computeUsage(`$4.00 / 1M tokens`),
1472
+ prompt: pricing(`$3.00 / 1M tokens`),
1473
+ output: pricing(`$4.00 / 1M tokens`),
1691
1474
  },
1692
1475
  },
1693
1476
  /**/
@@ -1762,34 +1545,205 @@ resultContent, rawResponse) {
1762
1545
  */
1763
1546
 
1764
1547
  /**
1765
- * Execution Tools for calling OpenAI API
1548
+ * Simple wrapper `new Date().toISOString()`
1766
1549
  *
1767
- * @public exported from `@promptbook/openai`
1550
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
1551
+ *
1552
+ * @returns string_date branded type
1553
+ * @public exported from `@promptbook/utils`
1768
1554
  */
1769
- class OpenAiExecutionTools {
1770
- /**
1771
- * Creates OpenAI Execution Tools.
1772
- *
1773
- * @param options which are relevant are directly passed to the OpenAI client
1774
- */
1775
- constructor(options) {
1776
- this.options = options;
1777
- /**
1778
- * OpenAI API client.
1779
- */
1780
- this.client = null;
1781
- // TODO: Allow configuring rate limits via options
1782
- this.limiter = new Bottleneck({
1783
- minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
1784
- });
1555
+ function $getCurrentDate() {
1556
+ return new Date().toISOString();
1557
+ }
1558
+
1559
+ /**
1560
+ * This error type indicates that some limit was reached
1561
+ *
1562
+ * @public exported from `@promptbook/core`
1563
+ */
1564
+ class LimitReachedError extends Error {
1565
+ constructor(message) {
1566
+ super(message);
1567
+ this.name = 'LimitReachedError';
1568
+ Object.setPrototypeOf(this, LimitReachedError.prototype);
1785
1569
  }
1786
- get title() {
1787
- return 'OpenAI';
1570
+ }
1571
+
1572
+ /**
1573
+ * Format either small or big number
1574
+ *
1575
+ * @public exported from `@promptbook/utils`
1576
+ */
1577
+ function numberToString(value) {
1578
+ if (value === 0) {
1579
+ return '0';
1788
1580
  }
1789
- get description() {
1790
- return 'Use all models provided by OpenAI';
1581
+ else if (Number.isNaN(value)) {
1582
+ return VALUE_STRINGS.nan;
1791
1583
  }
1792
- async getClient() {
1584
+ else if (value === Infinity) {
1585
+ return VALUE_STRINGS.infinity;
1586
+ }
1587
+ else if (value === -Infinity) {
1588
+ return VALUE_STRINGS.negativeInfinity;
1589
+ }
1590
+ for (let exponent = 0; exponent < 15; exponent++) {
1591
+ const factor = 10 ** exponent;
1592
+ const valueRounded = Math.round(value * factor) / factor;
1593
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
1594
+ return valueRounded.toFixed(exponent);
1595
+ }
1596
+ }
1597
+ return value.toString();
1598
+ }
1599
+
1600
+ /**
1601
+ * Function `valueToString` will convert the given value to string
1602
+ * This is useful and used in the `templateParameters` function
1603
+ *
1604
+ * Note: This function is not just calling `toString` method
1605
+ * It's more complex and can handle this conversion specifically for LLM models
1606
+ * See `VALUE_STRINGS`
1607
+ *
1608
+ * Note: There are 2 similar functions
1609
+ * - `valueToString` converts value to string for LLM models as human-readable string
1610
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
1611
+ *
1612
+ * @public exported from `@promptbook/utils`
1613
+ */
1614
+ function valueToString(value) {
1615
+ try {
1616
+ if (value === '') {
1617
+ return VALUE_STRINGS.empty;
1618
+ }
1619
+ else if (value === null) {
1620
+ return VALUE_STRINGS.null;
1621
+ }
1622
+ else if (value === undefined) {
1623
+ return VALUE_STRINGS.undefined;
1624
+ }
1625
+ else if (typeof value === 'string') {
1626
+ return value;
1627
+ }
1628
+ else if (typeof value === 'number') {
1629
+ return numberToString(value);
1630
+ }
1631
+ else if (value instanceof Date) {
1632
+ return value.toISOString();
1633
+ }
1634
+ else {
1635
+ try {
1636
+ return JSON.stringify(value);
1637
+ }
1638
+ catch (error) {
1639
+ if (error instanceof TypeError && error.message.includes('circular structure')) {
1640
+ return VALUE_STRINGS.circular;
1641
+ }
1642
+ throw error;
1643
+ }
1644
+ }
1645
+ }
1646
+ catch (error) {
1647
+ assertsError(error);
1648
+ console.error(error);
1649
+ return VALUE_STRINGS.unserializable;
1650
+ }
1651
+ }
1652
+
1653
+ /**
1654
+ * Replaces parameters in template with values from parameters object
1655
+ *
1656
+ * Note: This function is not places strings into string,
1657
+ * It's more complex and can handle this operation specifically for LLM models
1658
+ *
1659
+ * @param template the template with parameters in {curly} braces
1660
+ * @param parameters the object with parameters
1661
+ * @returns the template with replaced parameters
1662
+ * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
1663
+ * @public exported from `@promptbook/utils`
1664
+ */
1665
+ function templateParameters(template, parameters) {
1666
+ for (const [parameterName, parameterValue] of Object.entries(parameters)) {
1667
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
1668
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
1669
+ }
1670
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
1671
+ // TODO: [🍵]
1672
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
1673
+ }
1674
+ }
1675
+ let replacedTemplates = template;
1676
+ let match;
1677
+ let loopLimit = LOOP_LIMIT;
1678
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
1679
+ .exec(replacedTemplates))) {
1680
+ if (loopLimit-- < 0) {
1681
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
1682
+ }
1683
+ const precol = match.groups.precol;
1684
+ const parameterName = match.groups.parameterName;
1685
+ if (parameterName === '') {
1686
+ // Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
1687
+ continue;
1688
+ }
1689
+ if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
1690
+ throw new PipelineExecutionError('Parameter is already opened or not closed');
1691
+ }
1692
+ if (parameters[parameterName] === undefined) {
1693
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
1694
+ }
1695
+ let parameterValue = parameters[parameterName];
1696
+ if (parameterValue === undefined) {
1697
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
1698
+ }
1699
+ parameterValue = valueToString(parameterValue);
1700
+ // Escape curly braces in parameter values to prevent prompt-injection
1701
+ parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
1702
+ if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
1703
+ parameterValue = parameterValue
1704
+ .split('\n')
1705
+ .map((line, index) => (index === 0 ? line : `${precol}${line}`))
1706
+ .join('\n');
1707
+ }
1708
+ replacedTemplates =
1709
+ replacedTemplates.substring(0, match.index + precol.length) +
1710
+ parameterValue +
1711
+ replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
1712
+ }
1713
+ // [💫] Check if there are parameters that are not closed properly
1714
+ if (/{\w+$/.test(replacedTemplates)) {
1715
+ throw new PipelineExecutionError('Parameter is not closed');
1716
+ }
1717
+ // [💫] Check if there are parameters that are not opened properly
1718
+ if (/^\w+}/.test(replacedTemplates)) {
1719
+ throw new PipelineExecutionError('Parameter is not opened');
1720
+ }
1721
+ return replacedTemplates;
1722
+ }
1723
+
1724
+ /**
1725
+ * Execution Tools for calling OpenAI API or other OpeenAI compatible provider
1726
+ *
1727
+ * @public exported from `@promptbook/openai`
1728
+ */
1729
+ class OpenAiCompatibleExecutionTools {
1730
+ /**
1731
+ * Creates OpenAI compatible Execution Tools.
1732
+ *
1733
+ * @param options which are relevant are directly passed to the OpenAI compatible client
1734
+ */
1735
+ constructor(options) {
1736
+ this.options = options;
1737
+ /**
1738
+ * OpenAI API client.
1739
+ */
1740
+ this.client = null;
1741
+ // TODO: Allow configuring rate limits via options
1742
+ this.limiter = new Bottleneck({
1743
+ minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
1744
+ });
1745
+ }
1746
+ async getClient() {
1793
1747
  if (this.client === null) {
1794
1748
  // Note: Passing only OpenAI relevant options to OpenAI constructor
1795
1749
  const openAiOptions = { ...this.options };
@@ -1799,18 +1753,6 @@ class OpenAiExecutionTools {
1799
1753
  }
1800
1754
  return this.client;
1801
1755
  }
1802
- /*
1803
- Note: Commenting this out to avoid circular dependency
1804
- /**
1805
- * Create (sub)tools for calling OpenAI API Assistants
1806
- *
1807
- * @param assistantId Which assistant to use
1808
- * @returns Tools for calling OpenAI API Assistants with same token
1809
- * /
1810
- public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
1811
- return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
1812
- }
1813
- */
1814
1756
  /**
1815
1757
  * Check the `options` passed to `constructor`
1816
1758
  */
@@ -1819,23 +1761,15 @@ class OpenAiExecutionTools {
1819
1761
  // TODO: [🎍] Do here a real check that API is online, working and API key is correct
1820
1762
  }
1821
1763
  /**
1822
- * List all available OpenAI models that can be used
1764
+ * List all available OpenAI compatible models that can be used
1823
1765
  */
1824
1766
  async listModels() {
1825
- /*
1826
- Note: Dynamic lising of the models
1827
- const models = await this.openai.models.list({});
1828
-
1829
- console.log({ models });
1830
- console.log(models.data);
1831
- */
1832
1767
  const client = await this.getClient();
1833
1768
  const rawModelsList = await client.models.list();
1834
1769
  const availableModels = rawModelsList.data
1835
1770
  .sort((a, b) => (a.created > b.created ? 1 : -1))
1836
1771
  .map((modelFromApi) => {
1837
- // TODO: !!!! What about other model compatibilities?
1838
- const modelFromList = OPENAI_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
1772
+ const modelFromList = this.HARDCODED_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
1839
1773
  modelName.startsWith(modelFromApi.id) ||
1840
1774
  modelFromApi.id.startsWith(modelName));
1841
1775
  if (modelFromList !== undefined) {
@@ -1851,12 +1785,12 @@ class OpenAiExecutionTools {
1851
1785
  return availableModels;
1852
1786
  }
1853
1787
  /**
1854
- * Calls OpenAI API to use a chat model.
1788
+ * Calls OpenAI compatible API to use a chat model.
1855
1789
  */
1856
1790
  async callChatModel(prompt) {
1857
1791
  var _a;
1858
1792
  if (this.options.isVerbose) {
1859
- console.info('💬 OpenAI callChatModel call', { prompt });
1793
+ console.info(`💬 ${this.title} callChatModel call`, { prompt });
1860
1794
  }
1861
1795
  const { content, parameters, modelRequirements, format } = prompt;
1862
1796
  const client = await this.getClient();
@@ -1917,20 +1851,20 @@ class OpenAiExecutionTools {
1917
1851
  }
1918
1852
  const complete = $getCurrentDate();
1919
1853
  if (!rawResponse.choices[0]) {
1920
- throw new PipelineExecutionError('No choises from OpenAI');
1854
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
1921
1855
  }
1922
1856
  if (rawResponse.choices.length > 1) {
1923
1857
  // TODO: This should be maybe only warning
1924
- throw new PipelineExecutionError('More than one choise from OpenAI');
1858
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
1925
1859
  }
1926
1860
  const resultContent = rawResponse.choices[0].message.content;
1927
- const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1861
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
1928
1862
  if (resultContent === null) {
1929
- throw new PipelineExecutionError('No response message from OpenAI');
1863
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
1930
1864
  }
1931
1865
  return exportJson({
1932
1866
  name: 'promptResult',
1933
- message: `Result of \`OpenAiExecutionTools.callChatModel\``,
1867
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
1934
1868
  order: [],
1935
1869
  value: {
1936
1870
  content: resultContent,
@@ -1953,7 +1887,7 @@ class OpenAiExecutionTools {
1953
1887
  async callCompletionModel(prompt) {
1954
1888
  var _a;
1955
1889
  if (this.options.isVerbose) {
1956
- console.info('🖋 OpenAI callCompletionModel call', { prompt });
1890
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
1957
1891
  }
1958
1892
  const { content, parameters, modelRequirements } = prompt;
1959
1893
  const client = await this.getClient();
@@ -1994,17 +1928,17 @@ class OpenAiExecutionTools {
1994
1928
  }
1995
1929
  const complete = $getCurrentDate();
1996
1930
  if (!rawResponse.choices[0]) {
1997
- throw new PipelineExecutionError('No choises from OpenAI');
1931
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
1998
1932
  }
1999
1933
  if (rawResponse.choices.length > 1) {
2000
1934
  // TODO: This should be maybe only warning
2001
- throw new PipelineExecutionError('More than one choise from OpenAI');
1935
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2002
1936
  }
2003
1937
  const resultContent = rawResponse.choices[0].text;
2004
- const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1938
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2005
1939
  return exportJson({
2006
1940
  name: 'promptResult',
2007
- message: `Result of \`OpenAiExecutionTools.callCompletionModel\``,
1941
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
2008
1942
  order: [],
2009
1943
  value: {
2010
1944
  content: resultContent,
@@ -2022,11 +1956,11 @@ class OpenAiExecutionTools {
2022
1956
  });
2023
1957
  }
2024
1958
  /**
2025
- * Calls OpenAI API to use a embedding model
1959
+ * Calls OpenAI compatible API to use a embedding model
2026
1960
  */
2027
1961
  async callEmbeddingModel(prompt) {
2028
1962
  if (this.options.isVerbose) {
2029
- console.info('🖋 OpenAI embedding call', { prompt });
1963
+ console.info(`🖋 ${this.title} embedding call`, { prompt });
2030
1964
  }
2031
1965
  const { content, parameters, modelRequirements } = prompt;
2032
1966
  const client = await this.getClient();
@@ -2061,12 +1995,12 @@ class OpenAiExecutionTools {
2061
1995
  throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
2062
1996
  }
2063
1997
  const resultContent = rawResponse.data[0].embedding;
2064
- const usage = computeOpenAiUsage(content || '', '',
1998
+ const usage = this.computeUsage(content || '', '',
2065
1999
  // <- Note: Embedding does not have result content
2066
2000
  rawResponse);
2067
2001
  return exportJson({
2068
2002
  name: 'promptResult',
2069
- message: `Result of \`OpenAiExecutionTools.callEmbeddingModel\``,
2003
+ message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
2070
2004
  order: [],
2071
2005
  value: {
2072
2006
  content: resultContent,
@@ -2089,65 +2023,171 @@ class OpenAiExecutionTools {
2089
2023
  */
2090
2024
  getDefaultModel(defaultModelName) {
2091
2025
  // Note: Match exact or prefix for model families
2092
- const model = OPENAI_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
2026
+ const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
2093
2027
  if (model === undefined) {
2094
- throw new UnexpectedError(spaceTrim((block) => `
2095
- Cannot find model in OpenAI models with name "${defaultModelName}" which should be used as default.
2028
+ throw new PipelineExecutionError(spaceTrim((block) => `
2029
+ Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
2096
2030
 
2097
2031
  Available models:
2098
- ${block(OPENAI_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
2032
+ ${block(this.HARDCODED_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
2033
+
2034
+ Model "${defaultModelName}" is probably not available anymore, not installed, inaccessible or misconfigured.
2099
2035
 
2100
2036
  `));
2101
2037
  }
2102
2038
  return model;
2103
2039
  }
2104
- /**
2105
- * Default model for chat variant.
2106
- */
2107
- getDefaultChatModel() {
2108
- return this.getDefaultModel('gpt-4o');
2109
- }
2110
- /**
2111
- * Default model for completion variant.
2112
- */
2113
- getDefaultCompletionModel() {
2114
- return this.getDefaultModel('gpt-3.5-turbo-instruct');
2115
- }
2116
- /**
2117
- * Default model for completion variant.
2118
- */
2119
- getDefaultEmbeddingModel() {
2120
- return this.getDefaultModel('text-embedding-3-large');
2121
- }
2122
2040
  }
2123
2041
  /**
2124
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2125
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2126
- * TODO: Maybe make custom OpenAiError
2042
+ * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
2043
+ * TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
2127
2044
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2128
2045
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2129
2046
  */
2130
2047
 
2131
2048
  /**
2132
- * Execution Tools for calling OpenAI API
2049
+ * List of available models in Ollama library
2133
2050
  *
2134
- * Note: This can be also used for other OpenAI compatible APIs, like Ollama
2051
+ * Note: Done at 2025-05-19
2135
2052
  *
2136
- * @public exported from `@promptbook/openai`
2053
+ * @see https://ollama.com/library
2054
+ * @public exported from `@promptbook/ollama`
2137
2055
  */
2138
- const createOpenAiExecutionTools = Object.assign((options) => {
2139
- // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2140
- if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2141
- options = { ...options, dangerouslyAllowBrowser: true };
2142
- }
2143
- return new OpenAiExecutionTools(options);
2144
- }, {
2145
- packageName: '@promptbook/openai',
2146
- className: 'OpenAiExecutionTools',
2056
+ const OLLAMA_MODELS = exportJson({
2057
+ name: 'OLLAMA_MODELS',
2058
+ value: [
2059
+ {
2060
+ modelVariant: 'CHAT',
2061
+ modelTitle: 'llama2',
2062
+ modelName: 'llama2',
2063
+ modelDescription: 'Meta Llama 2, a general-purpose large language model.',
2064
+ },
2065
+ {
2066
+ modelVariant: 'CHAT',
2067
+ modelTitle: 'llama2-chat',
2068
+ modelName: 'llama2-chat',
2069
+ modelDescription: 'Meta Llama 2 Chat, optimized for conversational tasks.',
2070
+ },
2071
+ {
2072
+ modelVariant: 'CHAT',
2073
+ modelTitle: 'alpaca-7b',
2074
+ modelName: 'alpaca-7b',
2075
+ modelDescription: 'Stanford Alpaca 7B, instruction-tuned LLaMA model.',
2076
+ },
2077
+ {
2078
+ modelVariant: 'CHAT',
2079
+ modelTitle: 'alpaca-30b',
2080
+ modelName: 'alpaca-30b',
2081
+ modelDescription: 'Stanford Alpaca 30B, larger instruction-tuned LLaMA model.',
2082
+ },
2083
+ {
2084
+ modelVariant: 'CHAT',
2085
+ modelTitle: 'vicuna-13b',
2086
+ modelName: 'vicuna-13b',
2087
+ modelDescription: 'Vicuna 13B, fine-tuned LLaMA for chat and instruction.',
2088
+ },
2089
+ {
2090
+ modelVariant: 'CHAT',
2091
+ modelTitle: 'falcon-7b',
2092
+ modelName: 'falcon-7b',
2093
+ modelDescription: 'Falcon 7B, a performant open large language model.',
2094
+ },
2095
+ {
2096
+ modelVariant: 'CHAT',
2097
+ modelTitle: 'falcon-40b',
2098
+ modelName: 'falcon-40b',
2099
+ modelDescription: 'Falcon 40B, a larger open large language model.',
2100
+ },
2101
+ {
2102
+ modelVariant: 'CHAT',
2103
+ modelTitle: 'bloom-7b',
2104
+ modelName: 'bloom-7b',
2105
+ modelDescription: 'BLOOM 7B, multilingual large language model.',
2106
+ },
2107
+ {
2108
+ modelVariant: 'CHAT',
2109
+ modelTitle: 'mistral-7b',
2110
+ modelName: 'mistral-7b',
2111
+ modelDescription: 'Mistral 7B, efficient and fast open LLM.',
2112
+ },
2113
+ {
2114
+ modelVariant: 'CHAT',
2115
+ modelTitle: 'gorilla',
2116
+ modelName: 'gorilla',
2117
+ modelDescription: 'Gorilla, open-source LLM for tool use and APIs.',
2118
+ },
2119
+ {
2120
+ modelVariant: 'CHAT',
2121
+ modelTitle: 'cerebras-13b',
2122
+ modelName: 'cerebras-13b',
2123
+ modelDescription: 'Cerebras-GPT 13B, open large language model.',
2124
+ },
2125
+ {
2126
+ modelVariant: 'CHAT',
2127
+ modelTitle: 'openchat-7b',
2128
+ modelName: 'openchat-7b',
2129
+ modelDescription: 'OpenChat 7B, fine-tuned for conversational tasks.',
2130
+ },
2131
+ {
2132
+ modelVariant: 'CHAT',
2133
+ modelTitle: 'openchat-13b',
2134
+ modelName: 'openchat-13b',
2135
+ modelDescription: 'OpenChat 13B, larger conversational LLM.',
2136
+ },
2137
+ {
2138
+ modelVariant: 'CHAT',
2139
+ modelTitle: 'mpt-7b-chat',
2140
+ modelName: 'mpt-7b-chat',
2141
+ modelDescription: 'MPT-7B Chat, optimized for dialogue and chat.',
2142
+ },
2143
+ {
2144
+ modelVariant: 'CHAT',
2145
+ modelTitle: 'mpt-7b-instruct',
2146
+ modelName: 'mpt-7b-instruct',
2147
+ modelDescription: 'MPT-7B Instruct, instruction-tuned variant.',
2148
+ },
2149
+ {
2150
+ modelVariant: 'CHAT',
2151
+ modelTitle: 'command-7b',
2152
+ modelName: 'command-7b',
2153
+ modelDescription: 'Command 7B, instruction-following LLM.',
2154
+ },
2155
+ {
2156
+ modelVariant: 'CHAT',
2157
+ modelTitle: 'starcoder',
2158
+ modelName: 'starcoder',
2159
+ modelDescription: 'StarCoder, code generation large language model.',
2160
+ },
2161
+ {
2162
+ modelVariant: 'CHAT',
2163
+ modelTitle: 'starcoder2',
2164
+ modelName: 'starcoder2',
2165
+ modelDescription: 'StarCoder2, improved code generation model.',
2166
+ },
2167
+ {
2168
+ modelVariant: 'CHAT',
2169
+ modelTitle: 'mixtral-7b-chat',
2170
+ modelName: 'mixtral-7b-chat',
2171
+ modelDescription: 'Mixtral 7B Chat, Mixture-of-Experts conversational model.',
2172
+ },
2173
+ {
2174
+ modelVariant: 'CHAT',
2175
+ modelTitle: 'mixtral-8x7b',
2176
+ modelName: 'mixtral-8x7b',
2177
+ modelDescription: 'Mixtral 8x7B, Mixture-of-Experts large language model.',
2178
+ },
2179
+ {
2180
+ modelVariant: 'CHAT',
2181
+ modelTitle: 'mixtral-8x7b-instruct',
2182
+ modelName: 'mixtral-8x7b-instruct',
2183
+ modelDescription: 'Mixtral 8x7B Instruct, instruction-tuned Mixture-of-Experts model.',
2184
+ },
2185
+ // <- [🕕]
2186
+ ],
2147
2187
  });
2148
2188
  /**
2149
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
2150
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
2189
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
2190
+ * Note: [💞] Ignore a discrepancy between file name and entity name
2151
2191
  */
2152
2192
 
2153
2193
  /**
@@ -2155,22 +2195,79 @@ const createOpenAiExecutionTools = Object.assign((options) => {
2155
2195
  *
2156
2196
  * @public exported from `@promptbook/ollama`
2157
2197
  */
2158
- const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434'; // <- TODO: !!!! What is the correct base URL? /v1?
2198
+ const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1';
2159
2199
 
2160
2200
  /**
2161
2201
  * Execution Tools for calling Ollama API
2162
2202
  *
2163
2203
  * @public exported from `@promptbook/ollama`
2164
2204
  */
2165
- const createOllamaExecutionTools = Object.assign((ollamaOptions) => {
2166
- const openAiCompatibleOptions = {
2167
- baseURL: DEFAULT_OLLAMA_BASE_URL,
2168
- ...ollamaOptions,
2169
- userId: 'ollama',
2170
- };
2171
- // TODO: !!!! Listing the models - do it dynamically in OpenAiExecutionTools
2172
- // TODO: !!!! Do not allow to create Assistant from OpenAi compatible tools
2173
- return createOpenAiExecutionTools(openAiCompatibleOptions);
2205
+ class OllamaExecutionTools extends OpenAiCompatibleExecutionTools {
2206
+ /* <- TODO: [🍚] `, Destroyable` */
2207
+ constructor(ollamaOptions) {
2208
+ const openAiCompatibleOptions = {
2209
+ baseURL: DEFAULT_OLLAMA_BASE_URL,
2210
+ ...ollamaOptions,
2211
+ userId: 'ollama',
2212
+ };
2213
+ super(openAiCompatibleOptions);
2214
+ }
2215
+ get title() {
2216
+ return 'Ollama';
2217
+ }
2218
+ get description() {
2219
+ return 'Use all models provided by Ollama';
2220
+ }
2221
+ /**
2222
+ * List all available models (non dynamically)
2223
+ *
2224
+ * Note: Purpose of this is to provide more information about models than standard listing from API
2225
+ */
2226
+ get HARDCODED_MODELS() {
2227
+ return OLLAMA_MODELS;
2228
+ }
2229
+ /**
2230
+ * Computes the usage of the Ollama API based on the response from Ollama
2231
+ */
2232
+ computeUsage(...args) {
2233
+ return {
2234
+ ...computeOpenAiUsage(...args),
2235
+ price: ZERO_VALUE, // <- Note: Running on local model, so no price, maybe in the future we can add a way to calculate price based on electricity usage
2236
+ };
2237
+ }
2238
+ /**
2239
+ * Default model for chat variant.
2240
+ */
2241
+ getDefaultChatModel() {
2242
+ return this.getDefaultModel('llama2'); // <- TODO: [🧠] Pick the best default model
2243
+ // <- TODO: [🛄] When 'llama2' not installed, maybe better error message
2244
+ }
2245
+ /**
2246
+ * Default model for completion variant.
2247
+ */
2248
+ getDefaultCompletionModel() {
2249
+ return this.getDefaultModel('llama2'); // <- TODO: [🧠] Pick the best default model
2250
+ // <- TODO: [🛄] When 'llama2' not installed, maybe better error message
2251
+ }
2252
+ /**
2253
+ * Default model for completion variant.
2254
+ */
2255
+ getDefaultEmbeddingModel() {
2256
+ return this.getDefaultModel('text-embedding-3-large'); // <- TODO: [🧠] Pick the best default model
2257
+ // <- TODO: [🛄]
2258
+ }
2259
+ }
2260
+ /**
2261
+ * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
2262
+ */
2263
+
2264
+ /**
2265
+ * Execution Tools for calling Ollama API
2266
+ *
2267
+ * @public exported from `@promptbook/ollama`
2268
+ */
2269
+ const createOllamaExecutionTools = Object.assign((options) => {
2270
+ return new OllamaExecutionTools(options);
2174
2271
  }, {
2175
2272
  packageName: '@promptbook/ollama',
2176
2273
  className: 'OllamaExecutionTools',
@@ -2354,5 +2451,5 @@ const _OllamaRegistration = $llmToolsRegister.register(createOllamaExecutionTool
2354
2451
  * Note: [💞] Ignore a discrepancy between file name and entity name
2355
2452
  */
2356
2453
 
2357
- export { BOOK_LANGUAGE_VERSION, DEFAULT_OLLAMA_BASE_URL, PROMPTBOOK_ENGINE_VERSION, _OllamaRegistration, createOllamaExecutionTools };
2454
+ export { BOOK_LANGUAGE_VERSION, DEFAULT_OLLAMA_BASE_URL, OLLAMA_MODELS, OllamaExecutionTools, PROMPTBOOK_ENGINE_VERSION, _OllamaRegistration, createOllamaExecutionTools };
2358
2455
  //# sourceMappingURL=index.es.js.map