@promptbook/ollama 0.94.0-1 → 0.94.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -9
- package/esm/index.es.js +1880 -166
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/ollama.index.d.ts +6 -0
- package/esm/typings/src/_packages/openai.index.d.ts +2 -0
- package/esm/typings/src/execution/AvailableModel.d.ts +9 -1
- package/esm/typings/src/llm-providers/_common/filterModels.d.ts +2 -2
- package/esm/typings/src/llm-providers/{openai/computeUsage.d.ts → _common/utils/pricing.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/GoogleExecutionToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +36 -11
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionToolsOptions.d.ts +23 -12
- package/esm/typings/src/llm-providers/ollama/createOllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +14 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +91 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +12 -53
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +2 -0
- package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -7
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +4 -2
- package/umd/index.umd.js +1884 -168
- package/umd/index.umd.js.map +1 -1
- /package/esm/typings/src/llm-providers/{openai/computeUsage.test.d.ts → _common/utils/pricing.test.d.ts} +0 -0
package/umd/index.umd.js
CHANGED
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
(function (global, factory) {
|
|
2
|
-
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('
|
|
3
|
-
typeof define === 'function' && define.amd ? define(['exports', '
|
|
4
|
-
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-ollama"] = {}, global.
|
|
5
|
-
})(this, (function (exports,
|
|
2
|
+
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('crypto'), require('bottleneck'), require('colors'), require('openai')) :
|
|
3
|
+
typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'crypto', 'bottleneck', 'colors', 'openai'], factory) :
|
|
4
|
+
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-ollama"] = {}, global.spaceTrim, global.crypto, global.Bottleneck, global.colors, global.OpenAI));
|
|
5
|
+
})(this, (function (exports, spaceTrim, crypto, Bottleneck, colors, OpenAI) { 'use strict';
|
|
6
6
|
|
|
7
7
|
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
|
|
8
8
|
|
|
9
|
-
var Bottleneck__default = /*#__PURE__*/_interopDefaultLegacy(Bottleneck);
|
|
10
|
-
var fetch__default = /*#__PURE__*/_interopDefaultLegacy(fetch);
|
|
11
9
|
var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
|
|
10
|
+
var Bottleneck__default = /*#__PURE__*/_interopDefaultLegacy(Bottleneck);
|
|
11
|
+
var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
|
|
12
|
+
var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
|
|
12
13
|
|
|
13
14
|
// ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
|
|
14
15
|
/**
|
|
@@ -24,12 +25,122 @@
|
|
|
24
25
|
* @generated
|
|
25
26
|
* @see https://github.com/webgptorg/promptbook
|
|
26
27
|
*/
|
|
27
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.94.0-
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.94.0-4';
|
|
28
29
|
/**
|
|
29
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
30
31
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
31
32
|
*/
|
|
32
33
|
|
|
34
|
+
/**
|
|
35
|
+
* Freezes the given object and all its nested objects recursively
|
|
36
|
+
*
|
|
37
|
+
* Note: `$` is used to indicate that this function is not a pure function - it mutates given object
|
|
38
|
+
* Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
|
|
39
|
+
*
|
|
40
|
+
* @returns The same object as the input, but deeply frozen
|
|
41
|
+
* @public exported from `@promptbook/utils`
|
|
42
|
+
*/
|
|
43
|
+
function $deepFreeze(objectValue) {
|
|
44
|
+
if (Array.isArray(objectValue)) {
|
|
45
|
+
return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
|
|
46
|
+
}
|
|
47
|
+
const propertyNames = Object.getOwnPropertyNames(objectValue);
|
|
48
|
+
for (const propertyName of propertyNames) {
|
|
49
|
+
const value = objectValue[propertyName];
|
|
50
|
+
if (value && typeof value === 'object') {
|
|
51
|
+
$deepFreeze(value);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
Object.freeze(objectValue);
|
|
55
|
+
return objectValue;
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
59
|
+
*/
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Represents the uncertain value
|
|
63
|
+
*
|
|
64
|
+
* @public exported from `@promptbook/core`
|
|
65
|
+
*/
|
|
66
|
+
const ZERO_VALUE = $deepFreeze({ value: 0 });
|
|
67
|
+
/**
|
|
68
|
+
* Represents the uncertain value
|
|
69
|
+
*
|
|
70
|
+
* @public exported from `@promptbook/core`
|
|
71
|
+
*/
|
|
72
|
+
const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
|
|
73
|
+
/**
|
|
74
|
+
* Represents the usage with no resources consumed
|
|
75
|
+
*
|
|
76
|
+
* @public exported from `@promptbook/core`
|
|
77
|
+
*/
|
|
78
|
+
$deepFreeze({
|
|
79
|
+
price: ZERO_VALUE,
|
|
80
|
+
input: {
|
|
81
|
+
tokensCount: ZERO_VALUE,
|
|
82
|
+
charactersCount: ZERO_VALUE,
|
|
83
|
+
wordsCount: ZERO_VALUE,
|
|
84
|
+
sentencesCount: ZERO_VALUE,
|
|
85
|
+
linesCount: ZERO_VALUE,
|
|
86
|
+
paragraphsCount: ZERO_VALUE,
|
|
87
|
+
pagesCount: ZERO_VALUE,
|
|
88
|
+
},
|
|
89
|
+
output: {
|
|
90
|
+
tokensCount: ZERO_VALUE,
|
|
91
|
+
charactersCount: ZERO_VALUE,
|
|
92
|
+
wordsCount: ZERO_VALUE,
|
|
93
|
+
sentencesCount: ZERO_VALUE,
|
|
94
|
+
linesCount: ZERO_VALUE,
|
|
95
|
+
paragraphsCount: ZERO_VALUE,
|
|
96
|
+
pagesCount: ZERO_VALUE,
|
|
97
|
+
},
|
|
98
|
+
});
|
|
99
|
+
/**
|
|
100
|
+
* Represents the usage with unknown resources consumed
|
|
101
|
+
*
|
|
102
|
+
* @public exported from `@promptbook/core`
|
|
103
|
+
*/
|
|
104
|
+
$deepFreeze({
|
|
105
|
+
price: UNCERTAIN_ZERO_VALUE,
|
|
106
|
+
input: {
|
|
107
|
+
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
108
|
+
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
109
|
+
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
110
|
+
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
111
|
+
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
112
|
+
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
113
|
+
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
114
|
+
},
|
|
115
|
+
output: {
|
|
116
|
+
tokensCount: UNCERTAIN_ZERO_VALUE,
|
|
117
|
+
charactersCount: UNCERTAIN_ZERO_VALUE,
|
|
118
|
+
wordsCount: UNCERTAIN_ZERO_VALUE,
|
|
119
|
+
sentencesCount: UNCERTAIN_ZERO_VALUE,
|
|
120
|
+
linesCount: UNCERTAIN_ZERO_VALUE,
|
|
121
|
+
paragraphsCount: UNCERTAIN_ZERO_VALUE,
|
|
122
|
+
pagesCount: UNCERTAIN_ZERO_VALUE,
|
|
123
|
+
},
|
|
124
|
+
});
|
|
125
|
+
/**
|
|
126
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
127
|
+
*/
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Orders JSON object by keys
|
|
131
|
+
*
|
|
132
|
+
* @returns The same type of object as the input re-ordered
|
|
133
|
+
* @public exported from `@promptbook/utils`
|
|
134
|
+
*/
|
|
135
|
+
function orderJson(options) {
|
|
136
|
+
const { value, order } = options;
|
|
137
|
+
const orderedValue = {
|
|
138
|
+
...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
|
|
139
|
+
...value,
|
|
140
|
+
};
|
|
141
|
+
return orderedValue;
|
|
142
|
+
}
|
|
143
|
+
|
|
33
144
|
/**
|
|
34
145
|
* Name for the Promptbook
|
|
35
146
|
*
|
|
@@ -50,6 +161,34 @@
|
|
|
50
161
|
* @public exported from `@promptbook/core`
|
|
51
162
|
*/
|
|
52
163
|
const ADMIN_GITHUB_NAME = 'hejny';
|
|
164
|
+
// <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
|
|
165
|
+
/**
|
|
166
|
+
* The maximum number of iterations for a loops
|
|
167
|
+
*
|
|
168
|
+
* @private within the repository - too low-level in comparison with other `MAX_...`
|
|
169
|
+
*/
|
|
170
|
+
const LOOP_LIMIT = 1000;
|
|
171
|
+
/**
|
|
172
|
+
* Strings to represent various values in the context of parameter values
|
|
173
|
+
*
|
|
174
|
+
* @public exported from `@promptbook/utils`
|
|
175
|
+
*/
|
|
176
|
+
const VALUE_STRINGS = {
|
|
177
|
+
empty: '(nothing; empty string)',
|
|
178
|
+
null: '(no value; null)',
|
|
179
|
+
undefined: '(unknown value; undefined)',
|
|
180
|
+
nan: '(not a number; NaN)',
|
|
181
|
+
infinity: '(infinity; ∞)',
|
|
182
|
+
negativeInfinity: '(negative infinity; -∞)',
|
|
183
|
+
unserializable: '(unserializable value)',
|
|
184
|
+
circular: '(circular JSON)',
|
|
185
|
+
};
|
|
186
|
+
/**
|
|
187
|
+
* Small number limit
|
|
188
|
+
*
|
|
189
|
+
* @public exported from `@promptbook/utils`
|
|
190
|
+
*/
|
|
191
|
+
const SMALL_NUMBER = 0.001;
|
|
53
192
|
// <- TODO: [🧜♂️]
|
|
54
193
|
/**
|
|
55
194
|
* Default settings for parsing and generating CSV files in Promptbook.
|
|
@@ -75,40 +214,6 @@
|
|
|
75
214
|
* TODO: [🧠][🧜♂️] Maybe join remoteServerUrl and path into single value
|
|
76
215
|
*/
|
|
77
216
|
|
|
78
|
-
/**
|
|
79
|
-
* Generates random token
|
|
80
|
-
*
|
|
81
|
-
* Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
|
|
82
|
-
*
|
|
83
|
-
* @private internal helper function
|
|
84
|
-
* @returns secure random token
|
|
85
|
-
*/
|
|
86
|
-
function $randomToken(randomness) {
|
|
87
|
-
return crypto.randomBytes(randomness).toString('hex');
|
|
88
|
-
}
|
|
89
|
-
/**
|
|
90
|
-
* TODO: Maybe use nanoid instead https://github.com/ai/nanoid
|
|
91
|
-
*/
|
|
92
|
-
|
|
93
|
-
/**
|
|
94
|
-
* This error indicates errors during the execution of the pipeline
|
|
95
|
-
*
|
|
96
|
-
* @public exported from `@promptbook/core`
|
|
97
|
-
*/
|
|
98
|
-
class PipelineExecutionError extends Error {
|
|
99
|
-
constructor(message) {
|
|
100
|
-
// Added id parameter
|
|
101
|
-
super(message);
|
|
102
|
-
this.name = 'PipelineExecutionError';
|
|
103
|
-
// TODO: [🐙] DRY - Maybe $randomId
|
|
104
|
-
this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
|
|
105
|
-
Object.setPrototypeOf(this, PipelineExecutionError.prototype);
|
|
106
|
-
}
|
|
107
|
-
}
|
|
108
|
-
/**
|
|
109
|
-
* TODO: [🧠][🌂] Add id to all errors
|
|
110
|
-
*/
|
|
111
|
-
|
|
112
217
|
/**
|
|
113
218
|
* Make error report URL for the given error
|
|
114
219
|
*
|
|
@@ -177,60 +282,6 @@
|
|
|
177
282
|
}
|
|
178
283
|
}
|
|
179
284
|
|
|
180
|
-
/**
|
|
181
|
-
* Simple wrapper `new Date().toISOString()`
|
|
182
|
-
*
|
|
183
|
-
* Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
|
|
184
|
-
*
|
|
185
|
-
* @returns string_date branded type
|
|
186
|
-
* @public exported from `@promptbook/utils`
|
|
187
|
-
*/
|
|
188
|
-
function $getCurrentDate() {
|
|
189
|
-
return new Date().toISOString();
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
/**
|
|
193
|
-
* Orders JSON object by keys
|
|
194
|
-
*
|
|
195
|
-
* @returns The same type of object as the input re-ordered
|
|
196
|
-
* @public exported from `@promptbook/utils`
|
|
197
|
-
*/
|
|
198
|
-
function orderJson(options) {
|
|
199
|
-
const { value, order } = options;
|
|
200
|
-
const orderedValue = {
|
|
201
|
-
...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
|
|
202
|
-
...value,
|
|
203
|
-
};
|
|
204
|
-
return orderedValue;
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
/**
|
|
208
|
-
* Freezes the given object and all its nested objects recursively
|
|
209
|
-
*
|
|
210
|
-
* Note: `$` is used to indicate that this function is not a pure function - it mutates given object
|
|
211
|
-
* Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
|
|
212
|
-
*
|
|
213
|
-
* @returns The same object as the input, but deeply frozen
|
|
214
|
-
* @public exported from `@promptbook/utils`
|
|
215
|
-
*/
|
|
216
|
-
function $deepFreeze(objectValue) {
|
|
217
|
-
if (Array.isArray(objectValue)) {
|
|
218
|
-
return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
|
|
219
|
-
}
|
|
220
|
-
const propertyNames = Object.getOwnPropertyNames(objectValue);
|
|
221
|
-
for (const propertyName of propertyNames) {
|
|
222
|
-
const value = objectValue[propertyName];
|
|
223
|
-
if (value && typeof value === 'object') {
|
|
224
|
-
$deepFreeze(value);
|
|
225
|
-
}
|
|
226
|
-
}
|
|
227
|
-
Object.freeze(objectValue);
|
|
228
|
-
return objectValue;
|
|
229
|
-
}
|
|
230
|
-
/**
|
|
231
|
-
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
232
|
-
*/
|
|
233
|
-
|
|
234
285
|
/**
|
|
235
286
|
* This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
|
|
236
287
|
*
|
|
@@ -479,89 +530,1752 @@
|
|
|
479
530
|
*/
|
|
480
531
|
|
|
481
532
|
/**
|
|
482
|
-
*
|
|
533
|
+
* Nonce which is used for replacing things in strings
|
|
483
534
|
*
|
|
484
|
-
* @
|
|
535
|
+
* @private within the repository
|
|
485
536
|
*/
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
}
|
|
537
|
+
const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
|
|
538
|
+
/**
|
|
539
|
+
* Nonce which is used as string which is not occurring in normal text
|
|
540
|
+
*
|
|
541
|
+
* @private within the repository
|
|
542
|
+
*/
|
|
543
|
+
const SALT_NONCE = 'ptbkghhewbvruets21t54et5';
|
|
544
|
+
/**
|
|
545
|
+
* Placeholder value indicating a parameter is missing its value.
|
|
546
|
+
*
|
|
547
|
+
* @private within the repository
|
|
548
|
+
*/
|
|
549
|
+
const RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
550
|
+
/**
|
|
551
|
+
* Placeholder value indicating a parameter is restricted and cannot be used directly.
|
|
552
|
+
*
|
|
553
|
+
* @private within the repository
|
|
554
|
+
*/
|
|
555
|
+
const RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
|
|
556
|
+
/**
|
|
557
|
+
* The names of the parameters that are reserved for special purposes
|
|
558
|
+
*
|
|
559
|
+
* @public exported from `@promptbook/core`
|
|
560
|
+
*/
|
|
561
|
+
exportJson({
|
|
562
|
+
name: 'RESERVED_PARAMETER_NAMES',
|
|
563
|
+
message: `The names of the parameters that are reserved for special purposes`,
|
|
564
|
+
value: [
|
|
565
|
+
'content',
|
|
566
|
+
'context',
|
|
567
|
+
'knowledge',
|
|
568
|
+
'examples',
|
|
569
|
+
'modelName',
|
|
570
|
+
'currentDate',
|
|
571
|
+
// <- TODO: list here all command names
|
|
572
|
+
// <- TODO: Add more like 'date', 'modelName',...
|
|
573
|
+
// <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
|
|
574
|
+
],
|
|
575
|
+
});
|
|
576
|
+
/**
|
|
577
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
578
|
+
*/
|
|
579
|
+
|
|
580
|
+
/**
|
|
581
|
+
* Generates random token
|
|
582
|
+
*
|
|
583
|
+
* Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
|
|
584
|
+
*
|
|
585
|
+
* @private internal helper function
|
|
586
|
+
* @returns secure random token
|
|
587
|
+
*/
|
|
588
|
+
function $randomToken(randomness) {
|
|
589
|
+
return crypto.randomBytes(randomness).toString('hex');
|
|
590
|
+
}
|
|
591
|
+
/**
|
|
592
|
+
* TODO: Maybe use nanoid instead https://github.com/ai/nanoid
|
|
593
|
+
*/
|
|
594
|
+
|
|
595
|
+
/**
|
|
596
|
+
* This error indicates errors during the execution of the pipeline
|
|
597
|
+
*
|
|
598
|
+
* @public exported from `@promptbook/core`
|
|
599
|
+
*/
|
|
600
|
+
class PipelineExecutionError extends Error {
|
|
601
|
+
constructor(message) {
|
|
602
|
+
// Added id parameter
|
|
603
|
+
super(message);
|
|
604
|
+
this.name = 'PipelineExecutionError';
|
|
605
|
+
// TODO: [🐙] DRY - Maybe $randomId
|
|
606
|
+
this.id = `error-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
|
|
607
|
+
Object.setPrototypeOf(this, PipelineExecutionError.prototype);
|
|
556
608
|
}
|
|
557
609
|
}
|
|
610
|
+
/**
|
|
611
|
+
* TODO: [🧠][🌂] Add id to all errors
|
|
612
|
+
*/
|
|
613
|
+
|
|
614
|
+
/**
|
|
615
|
+
* Counts number of characters in the text
|
|
616
|
+
*
|
|
617
|
+
* @public exported from `@promptbook/utils`
|
|
618
|
+
*/
|
|
619
|
+
function countCharacters(text) {
|
|
620
|
+
// Remove null characters
|
|
621
|
+
text = text.replace(/\0/g, '');
|
|
622
|
+
// Replace emojis (and also ZWJ sequence) with hyphens
|
|
623
|
+
text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
|
|
624
|
+
text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
|
|
625
|
+
text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
|
|
626
|
+
return text.length;
|
|
627
|
+
}
|
|
628
|
+
/**
|
|
629
|
+
* TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
|
|
630
|
+
*/
|
|
631
|
+
|
|
632
|
+
/**
|
|
633
|
+
* Number of characters per standard line with 11pt Arial font size.
|
|
634
|
+
*
|
|
635
|
+
* @public exported from `@promptbook/utils`
|
|
636
|
+
*/
|
|
637
|
+
const CHARACTERS_PER_STANDARD_LINE = 63;
|
|
638
|
+
/**
|
|
639
|
+
* Number of lines per standard A4 page with 11pt Arial font size and standard margins and spacing.
|
|
640
|
+
*
|
|
641
|
+
* @public exported from `@promptbook/utils`
|
|
642
|
+
*/
|
|
643
|
+
const LINES_PER_STANDARD_PAGE = 44;
|
|
644
|
+
/**
|
|
645
|
+
* TODO: [🧠] Should be this `constants.ts` or `config.ts`?
|
|
646
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
647
|
+
*/
|
|
648
|
+
|
|
649
|
+
/**
|
|
650
|
+
* Counts number of lines in the text
|
|
651
|
+
*
|
|
652
|
+
* Note: This does not check only for the presence of newlines, but also for the length of the standard line.
|
|
653
|
+
*
|
|
654
|
+
* @public exported from `@promptbook/utils`
|
|
655
|
+
*/
|
|
656
|
+
function countLines(text) {
|
|
657
|
+
text = text.replace('\r\n', '\n');
|
|
658
|
+
text = text.replace('\r', '\n');
|
|
659
|
+
const lines = text.split('\n');
|
|
660
|
+
return lines.reduce((count, line) => count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 0);
|
|
661
|
+
}
|
|
662
|
+
/**
|
|
663
|
+
* TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
|
|
664
|
+
*/
|
|
665
|
+
|
|
666
|
+
/**
|
|
667
|
+
* Counts number of pages in the text
|
|
668
|
+
*
|
|
669
|
+
* Note: This does not check only for the count of newlines, but also for the length of the standard line and length of the standard page.
|
|
670
|
+
*
|
|
671
|
+
* @public exported from `@promptbook/utils`
|
|
672
|
+
*/
|
|
673
|
+
function countPages(text) {
|
|
674
|
+
return Math.ceil(countLines(text) / LINES_PER_STANDARD_PAGE);
|
|
675
|
+
}
|
|
676
|
+
/**
|
|
677
|
+
* TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
|
|
678
|
+
*/
|
|
679
|
+
|
|
680
|
+
/**
|
|
681
|
+
* Counts number of paragraphs in the text
|
|
682
|
+
*
|
|
683
|
+
* @public exported from `@promptbook/utils`
|
|
684
|
+
*/
|
|
685
|
+
function countParagraphs(text) {
|
|
686
|
+
return text.split(/\n\s*\n/).filter((paragraph) => paragraph.trim() !== '').length;
|
|
687
|
+
}
|
|
688
|
+
/**
|
|
689
|
+
* TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
|
|
690
|
+
*/
|
|
691
|
+
|
|
692
|
+
/**
|
|
693
|
+
* Split text into sentences
|
|
694
|
+
*
|
|
695
|
+
* @public exported from `@promptbook/utils`
|
|
696
|
+
*/
|
|
697
|
+
function splitIntoSentences(text) {
|
|
698
|
+
return text.split(/[.!?]+/).filter((sentence) => sentence.trim() !== '');
|
|
699
|
+
}
|
|
700
|
+
/**
|
|
701
|
+
* Counts number of sentences in the text
|
|
702
|
+
*
|
|
703
|
+
* @public exported from `@promptbook/utils`
|
|
704
|
+
*/
|
|
705
|
+
function countSentences(text) {
|
|
706
|
+
return splitIntoSentences(text).length;
|
|
707
|
+
}
|
|
708
|
+
/**
|
|
709
|
+
* TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
|
|
710
|
+
*/
|
|
711
|
+
|
|
712
|
+
const defaultDiacriticsRemovalMap = [
|
|
713
|
+
{
|
|
714
|
+
base: 'A',
|
|
715
|
+
letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
|
|
716
|
+
},
|
|
717
|
+
{ base: 'AA', letters: '\uA732' },
|
|
718
|
+
{ base: 'AE', letters: '\u00C6\u01FC\u01E2' },
|
|
719
|
+
{ base: 'AO', letters: '\uA734' },
|
|
720
|
+
{ base: 'AU', letters: '\uA736' },
|
|
721
|
+
{ base: 'AV', letters: '\uA738\uA73A' },
|
|
722
|
+
{ base: 'AY', letters: '\uA73C' },
|
|
723
|
+
{
|
|
724
|
+
base: 'B',
|
|
725
|
+
letters: '\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181',
|
|
726
|
+
},
|
|
727
|
+
{
|
|
728
|
+
base: 'C',
|
|
729
|
+
letters: '\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E',
|
|
730
|
+
},
|
|
731
|
+
{
|
|
732
|
+
base: 'D',
|
|
733
|
+
letters: '\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779\u00D0',
|
|
734
|
+
},
|
|
735
|
+
{ base: 'DZ', letters: '\u01F1\u01C4' },
|
|
736
|
+
{ base: 'Dz', letters: '\u01F2\u01C5' },
|
|
737
|
+
{
|
|
738
|
+
base: 'E',
|
|
739
|
+
letters: '\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E',
|
|
740
|
+
},
|
|
741
|
+
{ base: 'F', letters: '\u0046\u24BB\uFF26\u1E1E\u0191\uA77B' },
|
|
742
|
+
{
|
|
743
|
+
base: 'G',
|
|
744
|
+
letters: '\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E',
|
|
745
|
+
},
|
|
746
|
+
{
|
|
747
|
+
base: 'H',
|
|
748
|
+
letters: '\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D',
|
|
749
|
+
},
|
|
750
|
+
{
|
|
751
|
+
base: 'I',
|
|
752
|
+
letters: '\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197',
|
|
753
|
+
},
|
|
754
|
+
{ base: 'J', letters: '\u004A\u24BF\uFF2A\u0134\u0248' },
|
|
755
|
+
{
|
|
756
|
+
base: 'K',
|
|
757
|
+
letters: '\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2',
|
|
758
|
+
},
|
|
759
|
+
{
|
|
760
|
+
base: 'L',
|
|
761
|
+
letters: '\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780',
|
|
762
|
+
},
|
|
763
|
+
{ base: 'LJ', letters: '\u01C7' },
|
|
764
|
+
{ base: 'Lj', letters: '\u01C8' },
|
|
765
|
+
{ base: 'M', letters: '\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C' },
|
|
766
|
+
{
|
|
767
|
+
base: 'N',
|
|
768
|
+
letters: '\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4',
|
|
769
|
+
},
|
|
770
|
+
{ base: 'NJ', letters: '\u01CA' },
|
|
771
|
+
{ base: 'Nj', letters: '\u01CB' },
|
|
772
|
+
{
|
|
773
|
+
base: 'O',
|
|
774
|
+
letters: '\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C',
|
|
775
|
+
},
|
|
776
|
+
{ base: 'OI', letters: '\u01A2' },
|
|
777
|
+
{ base: 'OO', letters: '\uA74E' },
|
|
778
|
+
{ base: 'OU', letters: '\u0222' },
|
|
779
|
+
{ base: 'OE', letters: '\u008C\u0152' },
|
|
780
|
+
{ base: 'oe', letters: '\u009C\u0153' },
|
|
781
|
+
{
|
|
782
|
+
base: 'P',
|
|
783
|
+
letters: '\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754',
|
|
784
|
+
},
|
|
785
|
+
{ base: 'Q', letters: '\u0051\u24C6\uFF31\uA756\uA758\u024A' },
|
|
786
|
+
{
|
|
787
|
+
base: 'R',
|
|
788
|
+
letters: '\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782',
|
|
789
|
+
},
|
|
790
|
+
{
|
|
791
|
+
base: 'S',
|
|
792
|
+
letters: '\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784',
|
|
793
|
+
},
|
|
794
|
+
{
|
|
795
|
+
base: 'T',
|
|
796
|
+
letters: '\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786',
|
|
797
|
+
},
|
|
798
|
+
{ base: 'TZ', letters: '\uA728' },
|
|
799
|
+
{
|
|
800
|
+
base: 'U',
|
|
801
|
+
letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
|
|
802
|
+
},
|
|
803
|
+
{ base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
|
|
804
|
+
{ base: 'VY', letters: '\uA760' },
|
|
805
|
+
{
|
|
806
|
+
base: 'W',
|
|
807
|
+
letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
|
|
808
|
+
},
|
|
809
|
+
{ base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
|
|
810
|
+
{
|
|
811
|
+
base: 'Y',
|
|
812
|
+
letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
|
|
813
|
+
},
|
|
814
|
+
{
|
|
815
|
+
base: 'Z',
|
|
816
|
+
letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
|
|
817
|
+
},
|
|
818
|
+
{
|
|
819
|
+
base: 'a',
|
|
820
|
+
letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
|
|
821
|
+
},
|
|
822
|
+
{ base: 'aa', letters: '\uA733' },
|
|
823
|
+
{ base: 'ae', letters: '\u00E6\u01FD\u01E3' },
|
|
824
|
+
{ base: 'ao', letters: '\uA735' },
|
|
825
|
+
{ base: 'au', letters: '\uA737' },
|
|
826
|
+
{ base: 'av', letters: '\uA739\uA73B' },
|
|
827
|
+
{ base: 'ay', letters: '\uA73D' },
|
|
828
|
+
{
|
|
829
|
+
base: 'b',
|
|
830
|
+
letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
|
|
831
|
+
},
|
|
832
|
+
{
|
|
833
|
+
base: 'c',
|
|
834
|
+
letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
|
|
835
|
+
},
|
|
836
|
+
{
|
|
837
|
+
base: 'd',
|
|
838
|
+
letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
|
|
839
|
+
},
|
|
840
|
+
{ base: 'dz', letters: '\u01F3\u01C6' },
|
|
841
|
+
{
|
|
842
|
+
base: 'e',
|
|
843
|
+
letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
|
|
844
|
+
},
|
|
845
|
+
{ base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
|
|
846
|
+
{
|
|
847
|
+
base: 'g',
|
|
848
|
+
letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
|
|
849
|
+
},
|
|
850
|
+
{
|
|
851
|
+
base: 'h',
|
|
852
|
+
letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
|
|
853
|
+
},
|
|
854
|
+
{ base: 'hv', letters: '\u0195' },
|
|
855
|
+
{
|
|
856
|
+
base: 'i',
|
|
857
|
+
letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
|
|
858
|
+
},
|
|
859
|
+
{ base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
|
|
860
|
+
{
|
|
861
|
+
base: 'k',
|
|
862
|
+
letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
|
|
863
|
+
},
|
|
864
|
+
{
|
|
865
|
+
base: 'l',
|
|
866
|
+
letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
|
|
867
|
+
},
|
|
868
|
+
{ base: 'lj', letters: '\u01C9' },
|
|
869
|
+
{ base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
|
|
870
|
+
{
|
|
871
|
+
base: 'n',
|
|
872
|
+
letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
|
|
873
|
+
},
|
|
874
|
+
{ base: 'nj', letters: '\u01CC' },
|
|
875
|
+
{
|
|
876
|
+
base: 'o',
|
|
877
|
+
letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
|
|
878
|
+
},
|
|
879
|
+
{ base: 'oi', letters: '\u01A3' },
|
|
880
|
+
{ base: 'ou', letters: '\u0223' },
|
|
881
|
+
{ base: 'oo', letters: '\uA74F' },
|
|
882
|
+
{
|
|
883
|
+
base: 'p',
|
|
884
|
+
letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
|
|
885
|
+
},
|
|
886
|
+
{ base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
|
|
887
|
+
{
|
|
888
|
+
base: 'r',
|
|
889
|
+
letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
|
|
890
|
+
},
|
|
891
|
+
{
|
|
892
|
+
base: 's',
|
|
893
|
+
letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
|
|
894
|
+
},
|
|
895
|
+
{
|
|
896
|
+
base: 't',
|
|
897
|
+
letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
|
|
898
|
+
},
|
|
899
|
+
{ base: 'tz', letters: '\uA729' },
|
|
900
|
+
{
|
|
901
|
+
base: 'u',
|
|
902
|
+
letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
|
|
903
|
+
},
|
|
904
|
+
{ base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
|
|
905
|
+
{ base: 'vy', letters: '\uA761' },
|
|
906
|
+
{
|
|
907
|
+
base: 'w',
|
|
908
|
+
letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
|
|
909
|
+
},
|
|
910
|
+
{ base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
|
|
911
|
+
{
|
|
912
|
+
base: 'y',
|
|
913
|
+
letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
|
|
914
|
+
},
|
|
915
|
+
{
|
|
916
|
+
base: 'z',
|
|
917
|
+
letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
|
|
918
|
+
},
|
|
919
|
+
];
|
|
920
|
+
/**
|
|
921
|
+
* Map of letters from diacritic variant to diacritless variant
|
|
922
|
+
* Contains lowercase and uppercase separatelly
|
|
923
|
+
*
|
|
924
|
+
* > "á" => "a"
|
|
925
|
+
* > "ě" => "e"
|
|
926
|
+
* > "Ă" => "A"
|
|
927
|
+
* > ...
|
|
928
|
+
*
|
|
929
|
+
* @public exported from `@promptbook/utils`
|
|
930
|
+
*/
|
|
931
|
+
const DIACRITIC_VARIANTS_LETTERS = {};
|
|
932
|
+
// tslint:disable-next-line: prefer-for-of
|
|
933
|
+
for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
|
|
934
|
+
const letters = defaultDiacriticsRemovalMap[i].letters;
|
|
935
|
+
// tslint:disable-next-line: prefer-for-of
|
|
936
|
+
for (let j = 0; j < letters.length; j++) {
|
|
937
|
+
DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
// <- TODO: [🍓] Put to maker function to save execution time if not needed
|
|
941
|
+
/*
|
|
942
|
+
@see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
|
|
943
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
944
|
+
you may not use this file except in compliance with the License.
|
|
945
|
+
You may obtain a copy of the License at
|
|
946
|
+
|
|
947
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
948
|
+
|
|
949
|
+
Unless required by applicable law or agreed to in writing, software
|
|
950
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
951
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
952
|
+
See the License for the specific language governing permissions and
|
|
953
|
+
limitations under the License.
|
|
954
|
+
*/
|
|
955
|
+
|
|
956
|
+
/**
|
|
957
|
+
* Removes diacritic marks (accents) from characters in a string.
|
|
958
|
+
*
|
|
959
|
+
* @param input The string containing diacritics to be normalized.
|
|
960
|
+
* @returns The string with diacritics removed or normalized.
|
|
961
|
+
* @public exported from `@promptbook/utils`
|
|
962
|
+
*/
|
|
963
|
+
function removeDiacritics(input) {
|
|
964
|
+
/*eslint no-control-regex: "off"*/
|
|
965
|
+
return input.replace(/[^\u0000-\u007E]/g, (a) => {
|
|
966
|
+
return DIACRITIC_VARIANTS_LETTERS[a] || a;
|
|
967
|
+
});
|
|
968
|
+
}
|
|
969
|
+
/**
|
|
970
|
+
* TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
|
|
971
|
+
*/
|
|
972
|
+
|
|
973
|
+
/**
|
|
974
|
+
* Counts number of words in the text
|
|
975
|
+
*
|
|
976
|
+
* @public exported from `@promptbook/utils`
|
|
977
|
+
*/
|
|
978
|
+
function countWords(text) {
|
|
979
|
+
text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
|
|
980
|
+
text = removeDiacritics(text);
|
|
981
|
+
// Add spaces before uppercase letters preceded by lowercase letters (for camelCase)
|
|
982
|
+
text = text.replace(/([a-z])([A-Z])/g, '$1 $2');
|
|
983
|
+
return text.split(/[^a-zа-я0-9]+/i).filter((word) => word.length > 0).length;
|
|
984
|
+
}
|
|
985
|
+
/**
|
|
986
|
+
* TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
|
|
987
|
+
*/
|
|
988
|
+
|
|
989
|
+
/**
|
|
990
|
+
* Helper of usage compute
|
|
991
|
+
*
|
|
992
|
+
* @param content the content of prompt or response
|
|
993
|
+
* @returns part of UsageCounts
|
|
994
|
+
*
|
|
995
|
+
* @private internal utility of LlmExecutionTools
|
|
996
|
+
*/
|
|
997
|
+
function computeUsageCounts(content) {
|
|
998
|
+
return {
|
|
999
|
+
charactersCount: { value: countCharacters(content) },
|
|
1000
|
+
wordsCount: { value: countWords(content) },
|
|
1001
|
+
sentencesCount: { value: countSentences(content) },
|
|
1002
|
+
linesCount: { value: countLines(content) },
|
|
1003
|
+
paragraphsCount: { value: countParagraphs(content) },
|
|
1004
|
+
pagesCount: { value: countPages(content) },
|
|
1005
|
+
};
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
/**
|
|
1009
|
+
* Make UncertainNumber
|
|
1010
|
+
*
|
|
1011
|
+
* @param value value of the uncertain number, if `NaN` or `undefined`, it will be set to 0 and `isUncertain=true`
|
|
1012
|
+
* @param isUncertain if `true`, the value is uncertain, otherwise depends on the value
|
|
1013
|
+
*
|
|
1014
|
+
* @private utility for initializating UncertainNumber
|
|
1015
|
+
*/
|
|
1016
|
+
function uncertainNumber(value, isUncertain) {
|
|
1017
|
+
if (value === null || value === undefined || Number.isNaN(value)) {
|
|
1018
|
+
return UNCERTAIN_ZERO_VALUE;
|
|
1019
|
+
}
|
|
1020
|
+
if (isUncertain === true) {
|
|
1021
|
+
return { value, isUncertain };
|
|
1022
|
+
}
|
|
1023
|
+
return { value };
|
|
1024
|
+
}
|
|
1025
|
+
|
|
1026
|
+
/**
|
|
1027
|
+
* Create price per one token based on the string value found on openai page
|
|
1028
|
+
*
|
|
1029
|
+
* @private within the repository, used only as internal helper for `OPENAI_MODELS`
|
|
1030
|
+
*/
|
|
1031
|
+
function pricing(value) {
|
|
1032
|
+
const [price, tokens] = value.split(' / ');
|
|
1033
|
+
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
/**
|
|
1037
|
+
* List of available OpenAI models with pricing
|
|
1038
|
+
*
|
|
1039
|
+
* Note: Done at 2025-05-06
|
|
1040
|
+
*
|
|
1041
|
+
* @see https://platform.openai.com/docs/models/
|
|
1042
|
+
* @see https://openai.com/api/pricing/
|
|
1043
|
+
* @public exported from `@promptbook/openai`
|
|
1044
|
+
*/
|
|
1045
|
+
const OPENAI_MODELS = exportJson({
|
|
1046
|
+
name: 'OPENAI_MODELS',
|
|
1047
|
+
value: [
|
|
1048
|
+
/*/
|
|
1049
|
+
{
|
|
1050
|
+
modelTitle: 'dall-e-3',
|
|
1051
|
+
modelName: 'dall-e-3',
|
|
1052
|
+
},
|
|
1053
|
+
/**/
|
|
1054
|
+
/*/
|
|
1055
|
+
{
|
|
1056
|
+
modelTitle: 'whisper-1',
|
|
1057
|
+
modelName: 'whisper-1',
|
|
1058
|
+
},
|
|
1059
|
+
/**/
|
|
1060
|
+
/**/
|
|
1061
|
+
{
|
|
1062
|
+
modelVariant: 'COMPLETION',
|
|
1063
|
+
modelTitle: 'davinci-002',
|
|
1064
|
+
modelName: 'davinci-002',
|
|
1065
|
+
modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
|
|
1066
|
+
pricing: {
|
|
1067
|
+
prompt: pricing(`$2.00 / 1M tokens`),
|
|
1068
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
1069
|
+
},
|
|
1070
|
+
},
|
|
1071
|
+
/**/
|
|
1072
|
+
/*/
|
|
1073
|
+
{
|
|
1074
|
+
modelTitle: 'dall-e-2',
|
|
1075
|
+
modelName: 'dall-e-2',
|
|
1076
|
+
},
|
|
1077
|
+
/**/
|
|
1078
|
+
/**/
|
|
1079
|
+
{
|
|
1080
|
+
modelVariant: 'CHAT',
|
|
1081
|
+
modelTitle: 'gpt-3.5-turbo-16k',
|
|
1082
|
+
modelName: 'gpt-3.5-turbo-16k',
|
|
1083
|
+
modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
|
|
1084
|
+
pricing: {
|
|
1085
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1086
|
+
output: pricing(`$4.00 / 1M tokens`),
|
|
1087
|
+
},
|
|
1088
|
+
},
|
|
1089
|
+
/**/
|
|
1090
|
+
/*/
|
|
1091
|
+
{
|
|
1092
|
+
modelTitle: 'tts-1-hd-1106',
|
|
1093
|
+
modelName: 'tts-1-hd-1106',
|
|
1094
|
+
},
|
|
1095
|
+
/**/
|
|
1096
|
+
/*/
|
|
1097
|
+
{
|
|
1098
|
+
modelTitle: 'tts-1-hd',
|
|
1099
|
+
modelName: 'tts-1-hd',
|
|
1100
|
+
},
|
|
1101
|
+
/**/
|
|
1102
|
+
/**/
|
|
1103
|
+
{
|
|
1104
|
+
modelVariant: 'CHAT',
|
|
1105
|
+
modelTitle: 'gpt-4',
|
|
1106
|
+
modelName: 'gpt-4',
|
|
1107
|
+
modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
|
|
1108
|
+
pricing: {
|
|
1109
|
+
prompt: pricing(`$30.00 / 1M tokens`),
|
|
1110
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
1111
|
+
},
|
|
1112
|
+
},
|
|
1113
|
+
/**/
|
|
1114
|
+
/**/
|
|
1115
|
+
{
|
|
1116
|
+
modelVariant: 'CHAT',
|
|
1117
|
+
modelTitle: 'gpt-4-32k',
|
|
1118
|
+
modelName: 'gpt-4-32k',
|
|
1119
|
+
modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
|
|
1120
|
+
pricing: {
|
|
1121
|
+
prompt: pricing(`$60.00 / 1M tokens`),
|
|
1122
|
+
output: pricing(`$120.00 / 1M tokens`),
|
|
1123
|
+
},
|
|
1124
|
+
},
|
|
1125
|
+
/**/
|
|
1126
|
+
/*/
|
|
1127
|
+
{
|
|
1128
|
+
modelVariant: 'CHAT',
|
|
1129
|
+
modelTitle: 'gpt-4-0613',
|
|
1130
|
+
modelName: 'gpt-4-0613',
|
|
1131
|
+
pricing: {
|
|
1132
|
+
prompt: computeUsage(` / 1M tokens`),
|
|
1133
|
+
output: computeUsage(` / 1M tokens`),
|
|
1134
|
+
},
|
|
1135
|
+
},
|
|
1136
|
+
/**/
|
|
1137
|
+
/**/
|
|
1138
|
+
{
|
|
1139
|
+
modelVariant: 'CHAT',
|
|
1140
|
+
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
1141
|
+
modelName: 'gpt-4-turbo-2024-04-09',
|
|
1142
|
+
modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
|
|
1143
|
+
pricing: {
|
|
1144
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1145
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
1146
|
+
},
|
|
1147
|
+
},
|
|
1148
|
+
/**/
|
|
1149
|
+
/**/
|
|
1150
|
+
{
|
|
1151
|
+
modelVariant: 'CHAT',
|
|
1152
|
+
modelTitle: 'gpt-3.5-turbo-1106',
|
|
1153
|
+
modelName: 'gpt-3.5-turbo-1106',
|
|
1154
|
+
modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
|
|
1155
|
+
pricing: {
|
|
1156
|
+
prompt: pricing(`$1.00 / 1M tokens`),
|
|
1157
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
1158
|
+
},
|
|
1159
|
+
},
|
|
1160
|
+
/**/
|
|
1161
|
+
/**/
|
|
1162
|
+
{
|
|
1163
|
+
modelVariant: 'CHAT',
|
|
1164
|
+
modelTitle: 'gpt-4-turbo',
|
|
1165
|
+
modelName: 'gpt-4-turbo',
|
|
1166
|
+
modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
|
|
1167
|
+
pricing: {
|
|
1168
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1169
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
1170
|
+
},
|
|
1171
|
+
},
|
|
1172
|
+
/**/
|
|
1173
|
+
/**/
|
|
1174
|
+
{
|
|
1175
|
+
modelVariant: 'COMPLETION',
|
|
1176
|
+
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
1177
|
+
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
1178
|
+
modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
|
|
1179
|
+
pricing: {
|
|
1180
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1181
|
+
output: pricing(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
|
|
1182
|
+
},
|
|
1183
|
+
},
|
|
1184
|
+
/**/
|
|
1185
|
+
/**/
|
|
1186
|
+
{
|
|
1187
|
+
modelVariant: 'COMPLETION',
|
|
1188
|
+
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
1189
|
+
modelName: 'gpt-3.5-turbo-instruct',
|
|
1190
|
+
modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
|
|
1191
|
+
pricing: {
|
|
1192
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1193
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
1194
|
+
},
|
|
1195
|
+
},
|
|
1196
|
+
/**/
|
|
1197
|
+
/*/
|
|
1198
|
+
{
|
|
1199
|
+
modelTitle: 'tts-1',
|
|
1200
|
+
modelName: 'tts-1',
|
|
1201
|
+
},
|
|
1202
|
+
/**/
|
|
1203
|
+
/**/
|
|
1204
|
+
{
|
|
1205
|
+
modelVariant: 'CHAT',
|
|
1206
|
+
modelTitle: 'gpt-3.5-turbo',
|
|
1207
|
+
modelName: 'gpt-3.5-turbo',
|
|
1208
|
+
modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
|
|
1209
|
+
pricing: {
|
|
1210
|
+
prompt: pricing(`$0.50 / 1M tokens`),
|
|
1211
|
+
output: pricing(`$1.50 / 1M tokens`),
|
|
1212
|
+
},
|
|
1213
|
+
},
|
|
1214
|
+
/**/
|
|
1215
|
+
/**/
|
|
1216
|
+
{
|
|
1217
|
+
modelVariant: 'CHAT',
|
|
1218
|
+
modelTitle: 'gpt-3.5-turbo-0301',
|
|
1219
|
+
modelName: 'gpt-3.5-turbo-0301',
|
|
1220
|
+
modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
|
|
1221
|
+
pricing: {
|
|
1222
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1223
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
1224
|
+
},
|
|
1225
|
+
},
|
|
1226
|
+
/**/
|
|
1227
|
+
/**/
|
|
1228
|
+
{
|
|
1229
|
+
modelVariant: 'COMPLETION',
|
|
1230
|
+
modelTitle: 'babbage-002',
|
|
1231
|
+
modelName: 'babbage-002',
|
|
1232
|
+
modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
|
|
1233
|
+
pricing: {
|
|
1234
|
+
prompt: pricing(`$0.40 / 1M tokens`),
|
|
1235
|
+
output: pricing(`$0.40 / 1M tokens`),
|
|
1236
|
+
},
|
|
1237
|
+
},
|
|
1238
|
+
/**/
|
|
1239
|
+
/**/
|
|
1240
|
+
{
|
|
1241
|
+
modelVariant: 'CHAT',
|
|
1242
|
+
modelTitle: 'gpt-4-1106-preview',
|
|
1243
|
+
modelName: 'gpt-4-1106-preview',
|
|
1244
|
+
modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
|
|
1245
|
+
pricing: {
|
|
1246
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1247
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
1248
|
+
},
|
|
1249
|
+
},
|
|
1250
|
+
/**/
|
|
1251
|
+
/**/
|
|
1252
|
+
{
|
|
1253
|
+
modelVariant: 'CHAT',
|
|
1254
|
+
modelTitle: 'gpt-4-0125-preview',
|
|
1255
|
+
modelName: 'gpt-4-0125-preview',
|
|
1256
|
+
modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
|
|
1257
|
+
pricing: {
|
|
1258
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1259
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
1260
|
+
},
|
|
1261
|
+
},
|
|
1262
|
+
/**/
|
|
1263
|
+
/*/
|
|
1264
|
+
{
|
|
1265
|
+
modelTitle: 'tts-1-1106',
|
|
1266
|
+
modelName: 'tts-1-1106',
|
|
1267
|
+
},
|
|
1268
|
+
/**/
|
|
1269
|
+
/**/
|
|
1270
|
+
{
|
|
1271
|
+
modelVariant: 'CHAT',
|
|
1272
|
+
modelTitle: 'gpt-3.5-turbo-0125',
|
|
1273
|
+
modelName: 'gpt-3.5-turbo-0125',
|
|
1274
|
+
modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
|
|
1275
|
+
pricing: {
|
|
1276
|
+
prompt: pricing(`$0.50 / 1M tokens`),
|
|
1277
|
+
output: pricing(`$1.50 / 1M tokens`),
|
|
1278
|
+
},
|
|
1279
|
+
},
|
|
1280
|
+
/**/
|
|
1281
|
+
/**/
|
|
1282
|
+
{
|
|
1283
|
+
modelVariant: 'CHAT',
|
|
1284
|
+
modelTitle: 'gpt-4-turbo-preview',
|
|
1285
|
+
modelName: 'gpt-4-turbo-preview',
|
|
1286
|
+
modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
|
|
1287
|
+
pricing: {
|
|
1288
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
1289
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
1290
|
+
},
|
|
1291
|
+
},
|
|
1292
|
+
/**/
|
|
1293
|
+
/**/
|
|
1294
|
+
{
|
|
1295
|
+
modelVariant: 'EMBEDDING',
|
|
1296
|
+
modelTitle: 'text-embedding-3-large',
|
|
1297
|
+
modelName: 'text-embedding-3-large',
|
|
1298
|
+
modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
|
|
1299
|
+
pricing: {
|
|
1300
|
+
prompt: pricing(`$0.13 / 1M tokens`),
|
|
1301
|
+
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
1302
|
+
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1303
|
+
},
|
|
1304
|
+
},
|
|
1305
|
+
/**/
|
|
1306
|
+
/**/
|
|
1307
|
+
{
|
|
1308
|
+
modelVariant: 'EMBEDDING',
|
|
1309
|
+
modelTitle: 'text-embedding-3-small',
|
|
1310
|
+
modelName: 'text-embedding-3-small',
|
|
1311
|
+
modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
|
|
1312
|
+
pricing: {
|
|
1313
|
+
prompt: pricing(`$0.02 / 1M tokens`),
|
|
1314
|
+
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
1315
|
+
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1316
|
+
},
|
|
1317
|
+
},
|
|
1318
|
+
/**/
|
|
1319
|
+
/**/
|
|
1320
|
+
{
|
|
1321
|
+
modelVariant: 'CHAT',
|
|
1322
|
+
modelTitle: 'gpt-3.5-turbo-0613',
|
|
1323
|
+
modelName: 'gpt-3.5-turbo-0613',
|
|
1324
|
+
modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
|
|
1325
|
+
pricing: {
|
|
1326
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
1327
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
1328
|
+
},
|
|
1329
|
+
},
|
|
1330
|
+
/**/
|
|
1331
|
+
/**/
|
|
1332
|
+
{
|
|
1333
|
+
modelVariant: 'EMBEDDING',
|
|
1334
|
+
modelTitle: 'text-embedding-ada-002',
|
|
1335
|
+
modelName: 'text-embedding-ada-002',
|
|
1336
|
+
modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
|
|
1337
|
+
pricing: {
|
|
1338
|
+
prompt: pricing(`$0.1 / 1M tokens`),
|
|
1339
|
+
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
1340
|
+
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1341
|
+
},
|
|
1342
|
+
},
|
|
1343
|
+
/**/
|
|
1344
|
+
/*/
|
|
1345
|
+
{
|
|
1346
|
+
modelVariant: 'CHAT',
|
|
1347
|
+
modelTitle: 'gpt-4-1106-vision-preview',
|
|
1348
|
+
modelName: 'gpt-4-1106-vision-preview',
|
|
1349
|
+
},
|
|
1350
|
+
/**/
|
|
1351
|
+
/*/
|
|
1352
|
+
{
|
|
1353
|
+
modelVariant: 'CHAT',
|
|
1354
|
+
modelTitle: 'gpt-4-vision-preview',
|
|
1355
|
+
modelName: 'gpt-4-vision-preview',
|
|
1356
|
+
pricing: {
|
|
1357
|
+
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
1358
|
+
output: computeUsage(`$30.00 / 1M tokens`),
|
|
1359
|
+
},
|
|
1360
|
+
},
|
|
1361
|
+
/**/
|
|
1362
|
+
/**/
|
|
1363
|
+
{
|
|
1364
|
+
modelVariant: 'CHAT',
|
|
1365
|
+
modelTitle: 'gpt-4o-2024-05-13',
|
|
1366
|
+
modelName: 'gpt-4o-2024-05-13',
|
|
1367
|
+
modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
|
|
1368
|
+
pricing: {
|
|
1369
|
+
prompt: pricing(`$5.00 / 1M tokens`),
|
|
1370
|
+
output: pricing(`$15.00 / 1M tokens`),
|
|
1371
|
+
},
|
|
1372
|
+
},
|
|
1373
|
+
/**/
|
|
1374
|
+
/**/
|
|
1375
|
+
{
|
|
1376
|
+
modelVariant: 'CHAT',
|
|
1377
|
+
modelTitle: 'gpt-4o',
|
|
1378
|
+
modelName: 'gpt-4o',
|
|
1379
|
+
modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
|
|
1380
|
+
pricing: {
|
|
1381
|
+
prompt: pricing(`$5.00 / 1M tokens`),
|
|
1382
|
+
output: pricing(`$15.00 / 1M tokens`),
|
|
1383
|
+
},
|
|
1384
|
+
},
|
|
1385
|
+
/**/
|
|
1386
|
+
/**/
|
|
1387
|
+
{
|
|
1388
|
+
modelVariant: 'CHAT',
|
|
1389
|
+
modelTitle: 'gpt-4o-mini',
|
|
1390
|
+
modelName: 'gpt-4o-mini',
|
|
1391
|
+
modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
|
|
1392
|
+
pricing: {
|
|
1393
|
+
prompt: pricing(`$0.15 / 1M tokens`),
|
|
1394
|
+
output: pricing(`$0.60 / 1M tokens`),
|
|
1395
|
+
},
|
|
1396
|
+
},
|
|
1397
|
+
/**/
|
|
1398
|
+
/**/
|
|
1399
|
+
{
|
|
1400
|
+
modelVariant: 'CHAT',
|
|
1401
|
+
modelTitle: 'o1-preview',
|
|
1402
|
+
modelName: 'o1-preview',
|
|
1403
|
+
modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
|
|
1404
|
+
pricing: {
|
|
1405
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
|
1406
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
1407
|
+
},
|
|
1408
|
+
},
|
|
1409
|
+
/**/
|
|
1410
|
+
/**/
|
|
1411
|
+
{
|
|
1412
|
+
modelVariant: 'CHAT',
|
|
1413
|
+
modelTitle: 'o1-preview-2024-09-12',
|
|
1414
|
+
modelName: 'o1-preview-2024-09-12',
|
|
1415
|
+
modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
|
|
1416
|
+
// <- TODO: [💩] Some better system to organize these date suffixes and versions
|
|
1417
|
+
pricing: {
|
|
1418
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
|
1419
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
1420
|
+
},
|
|
1421
|
+
},
|
|
1422
|
+
/**/
|
|
1423
|
+
/**/
|
|
1424
|
+
{
|
|
1425
|
+
modelVariant: 'CHAT',
|
|
1426
|
+
modelTitle: 'o1-mini',
|
|
1427
|
+
modelName: 'o1-mini',
|
|
1428
|
+
modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
|
|
1429
|
+
pricing: {
|
|
1430
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1431
|
+
output: pricing(`$12.00 / 1M tokens`),
|
|
1432
|
+
},
|
|
1433
|
+
},
|
|
1434
|
+
/**/
|
|
1435
|
+
/**/
|
|
1436
|
+
{
|
|
1437
|
+
modelVariant: 'CHAT',
|
|
1438
|
+
modelTitle: 'o1',
|
|
1439
|
+
modelName: 'o1',
|
|
1440
|
+
modelDescription: "OpenAI's advanced reasoning model focused on logic and problem-solving. Designed for complex analytical tasks with rigorous step-by-step reasoning. 128K context window.",
|
|
1441
|
+
pricing: {
|
|
1442
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
|
1443
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
1444
|
+
},
|
|
1445
|
+
},
|
|
1446
|
+
/**/
|
|
1447
|
+
/**/
|
|
1448
|
+
{
|
|
1449
|
+
modelVariant: 'CHAT',
|
|
1450
|
+
modelTitle: 'o3-mini',
|
|
1451
|
+
modelName: 'o3-mini',
|
|
1452
|
+
modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving. Efficient performance on STEM tasks with deep mathematical and scientific knowledge. 128K context window.',
|
|
1453
|
+
pricing: {
|
|
1454
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1455
|
+
output: pricing(`$12.00 / 1M tokens`),
|
|
1456
|
+
// <- TODO: !! Unsure, check the pricing
|
|
1457
|
+
},
|
|
1458
|
+
},
|
|
1459
|
+
/**/
|
|
1460
|
+
/**/
|
|
1461
|
+
{
|
|
1462
|
+
modelVariant: 'CHAT',
|
|
1463
|
+
modelTitle: 'o1-mini-2024-09-12',
|
|
1464
|
+
modelName: 'o1-mini-2024-09-12',
|
|
1465
|
+
modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
|
|
1466
|
+
pricing: {
|
|
1467
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1468
|
+
output: pricing(`$12.00 / 1M tokens`),
|
|
1469
|
+
},
|
|
1470
|
+
},
|
|
1471
|
+
/**/
|
|
1472
|
+
/**/
|
|
1473
|
+
{
|
|
1474
|
+
modelVariant: 'CHAT',
|
|
1475
|
+
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
1476
|
+
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
1477
|
+
modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
|
|
1478
|
+
pricing: {
|
|
1479
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1480
|
+
output: pricing(`$4.00 / 1M tokens`),
|
|
1481
|
+
},
|
|
1482
|
+
},
|
|
1483
|
+
/**/
|
|
1484
|
+
// <- [🕕]
|
|
1485
|
+
],
|
|
1486
|
+
});
|
|
1487
|
+
/**
|
|
1488
|
+
* Note: [🤖] Add models of new variant
|
|
1489
|
+
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
1490
|
+
* TODO: [🎰] Some mechanism to auto-update available models
|
|
1491
|
+
* TODO: [🎰][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
1492
|
+
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
1493
|
+
* @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
|
1494
|
+
* @see https://openai.com/api/pricing/
|
|
1495
|
+
* @see /other/playground/playground.ts
|
|
1496
|
+
* TODO: [🍓][💩] Make better
|
|
1497
|
+
* TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
|
|
1498
|
+
* TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
|
|
1499
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
1500
|
+
*/
|
|
1501
|
+
|
|
1502
|
+
/**
|
|
1503
|
+
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
1504
|
+
*
|
|
1505
|
+
* @param promptContent The content of the prompt
|
|
1506
|
+
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
1507
|
+
* @param rawResponse The raw response from OpenAI API
|
|
1508
|
+
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
|
|
1509
|
+
* @private internal utility of `OpenAiExecutionTools`
|
|
1510
|
+
*/
|
|
1511
|
+
function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
1512
|
+
resultContent, rawResponse) {
|
|
1513
|
+
var _a, _b;
|
|
1514
|
+
if (rawResponse.usage === undefined) {
|
|
1515
|
+
throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
|
|
1516
|
+
}
|
|
1517
|
+
if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
|
|
1518
|
+
throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
|
|
1519
|
+
}
|
|
1520
|
+
const inputTokens = rawResponse.usage.prompt_tokens;
|
|
1521
|
+
const outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
|
|
1522
|
+
let isUncertain = false;
|
|
1523
|
+
let modelInfo = OPENAI_MODELS.find((model) => model.modelName === rawResponse.model);
|
|
1524
|
+
if (modelInfo === undefined) {
|
|
1525
|
+
// Note: Model is not in the list of known models, fallback to the family of the models and mark price as uncertain
|
|
1526
|
+
modelInfo = OPENAI_MODELS.find((model) => (rawResponse.model || SALT_NONCE).startsWith(model.modelName));
|
|
1527
|
+
if (modelInfo !== undefined) {
|
|
1528
|
+
isUncertain = true;
|
|
1529
|
+
}
|
|
1530
|
+
}
|
|
1531
|
+
let price;
|
|
1532
|
+
if (modelInfo === undefined || modelInfo.pricing === undefined) {
|
|
1533
|
+
price = uncertainNumber();
|
|
1534
|
+
}
|
|
1535
|
+
else {
|
|
1536
|
+
price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output, isUncertain);
|
|
1537
|
+
}
|
|
1538
|
+
return {
|
|
1539
|
+
price,
|
|
1540
|
+
input: {
|
|
1541
|
+
tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens),
|
|
1542
|
+
...computeUsageCounts(promptContent),
|
|
1543
|
+
},
|
|
1544
|
+
output: {
|
|
1545
|
+
tokensCount: uncertainNumber(outputTokens),
|
|
1546
|
+
...computeUsageCounts(resultContent),
|
|
1547
|
+
},
|
|
1548
|
+
};
|
|
1549
|
+
}
|
|
1550
|
+
/**
|
|
1551
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
1552
|
+
*/
|
|
1553
|
+
|
|
1554
|
+
/**
|
|
1555
|
+
* Simple wrapper `new Date().toISOString()`
|
|
1556
|
+
*
|
|
1557
|
+
* Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
|
|
1558
|
+
*
|
|
1559
|
+
* @returns string_date branded type
|
|
1560
|
+
* @public exported from `@promptbook/utils`
|
|
1561
|
+
*/
|
|
1562
|
+
function $getCurrentDate() {
|
|
1563
|
+
return new Date().toISOString();
|
|
1564
|
+
}
|
|
1565
|
+
|
|
1566
|
+
/**
|
|
1567
|
+
* This error type indicates that some limit was reached
|
|
1568
|
+
*
|
|
1569
|
+
* @public exported from `@promptbook/core`
|
|
1570
|
+
*/
|
|
1571
|
+
class LimitReachedError extends Error {
|
|
1572
|
+
constructor(message) {
|
|
1573
|
+
super(message);
|
|
1574
|
+
this.name = 'LimitReachedError';
|
|
1575
|
+
Object.setPrototypeOf(this, LimitReachedError.prototype);
|
|
1576
|
+
}
|
|
1577
|
+
}
|
|
1578
|
+
|
|
1579
|
+
/**
|
|
1580
|
+
* Format either small or big number
|
|
1581
|
+
*
|
|
1582
|
+
* @public exported from `@promptbook/utils`
|
|
1583
|
+
*/
|
|
1584
|
+
function numberToString(value) {
|
|
1585
|
+
if (value === 0) {
|
|
1586
|
+
return '0';
|
|
1587
|
+
}
|
|
1588
|
+
else if (Number.isNaN(value)) {
|
|
1589
|
+
return VALUE_STRINGS.nan;
|
|
1590
|
+
}
|
|
1591
|
+
else if (value === Infinity) {
|
|
1592
|
+
return VALUE_STRINGS.infinity;
|
|
1593
|
+
}
|
|
1594
|
+
else if (value === -Infinity) {
|
|
1595
|
+
return VALUE_STRINGS.negativeInfinity;
|
|
1596
|
+
}
|
|
1597
|
+
for (let exponent = 0; exponent < 15; exponent++) {
|
|
1598
|
+
const factor = 10 ** exponent;
|
|
1599
|
+
const valueRounded = Math.round(value * factor) / factor;
|
|
1600
|
+
if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
|
|
1601
|
+
return valueRounded.toFixed(exponent);
|
|
1602
|
+
}
|
|
1603
|
+
}
|
|
1604
|
+
return value.toString();
|
|
1605
|
+
}
|
|
1606
|
+
|
|
1607
|
+
/**
|
|
1608
|
+
* Function `valueToString` will convert the given value to string
|
|
1609
|
+
* This is useful and used in the `templateParameters` function
|
|
1610
|
+
*
|
|
1611
|
+
* Note: This function is not just calling `toString` method
|
|
1612
|
+
* It's more complex and can handle this conversion specifically for LLM models
|
|
1613
|
+
* See `VALUE_STRINGS`
|
|
1614
|
+
*
|
|
1615
|
+
* Note: There are 2 similar functions
|
|
1616
|
+
* - `valueToString` converts value to string for LLM models as human-readable string
|
|
1617
|
+
* - `asSerializable` converts value to string to preserve full information to be able to convert it back
|
|
1618
|
+
*
|
|
1619
|
+
* @public exported from `@promptbook/utils`
|
|
1620
|
+
*/
|
|
1621
|
+
function valueToString(value) {
|
|
1622
|
+
try {
|
|
1623
|
+
if (value === '') {
|
|
1624
|
+
return VALUE_STRINGS.empty;
|
|
1625
|
+
}
|
|
1626
|
+
else if (value === null) {
|
|
1627
|
+
return VALUE_STRINGS.null;
|
|
1628
|
+
}
|
|
1629
|
+
else if (value === undefined) {
|
|
1630
|
+
return VALUE_STRINGS.undefined;
|
|
1631
|
+
}
|
|
1632
|
+
else if (typeof value === 'string') {
|
|
1633
|
+
return value;
|
|
1634
|
+
}
|
|
1635
|
+
else if (typeof value === 'number') {
|
|
1636
|
+
return numberToString(value);
|
|
1637
|
+
}
|
|
1638
|
+
else if (value instanceof Date) {
|
|
1639
|
+
return value.toISOString();
|
|
1640
|
+
}
|
|
1641
|
+
else {
|
|
1642
|
+
try {
|
|
1643
|
+
return JSON.stringify(value);
|
|
1644
|
+
}
|
|
1645
|
+
catch (error) {
|
|
1646
|
+
if (error instanceof TypeError && error.message.includes('circular structure')) {
|
|
1647
|
+
return VALUE_STRINGS.circular;
|
|
1648
|
+
}
|
|
1649
|
+
throw error;
|
|
1650
|
+
}
|
|
1651
|
+
}
|
|
1652
|
+
}
|
|
1653
|
+
catch (error) {
|
|
1654
|
+
assertsError(error);
|
|
1655
|
+
console.error(error);
|
|
1656
|
+
return VALUE_STRINGS.unserializable;
|
|
1657
|
+
}
|
|
1658
|
+
}
|
|
1659
|
+
|
|
1660
|
+
/**
|
|
1661
|
+
* Replaces parameters in template with values from parameters object
|
|
1662
|
+
*
|
|
1663
|
+
* Note: This function is not places strings into string,
|
|
1664
|
+
* It's more complex and can handle this operation specifically for LLM models
|
|
1665
|
+
*
|
|
1666
|
+
* @param template the template with parameters in {curly} braces
|
|
1667
|
+
* @param parameters the object with parameters
|
|
1668
|
+
* @returns the template with replaced parameters
|
|
1669
|
+
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
1670
|
+
* @public exported from `@promptbook/utils`
|
|
1671
|
+
*/
|
|
1672
|
+
function templateParameters(template, parameters) {
|
|
1673
|
+
for (const [parameterName, parameterValue] of Object.entries(parameters)) {
|
|
1674
|
+
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
1675
|
+
throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
|
|
1676
|
+
}
|
|
1677
|
+
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
1678
|
+
// TODO: [🍵]
|
|
1679
|
+
throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
|
|
1680
|
+
}
|
|
1681
|
+
}
|
|
1682
|
+
let replacedTemplates = template;
|
|
1683
|
+
let match;
|
|
1684
|
+
let loopLimit = LOOP_LIMIT;
|
|
1685
|
+
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
1686
|
+
.exec(replacedTemplates))) {
|
|
1687
|
+
if (loopLimit-- < 0) {
|
|
1688
|
+
throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
|
|
1689
|
+
}
|
|
1690
|
+
const precol = match.groups.precol;
|
|
1691
|
+
const parameterName = match.groups.parameterName;
|
|
1692
|
+
if (parameterName === '') {
|
|
1693
|
+
// Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
|
|
1694
|
+
continue;
|
|
1695
|
+
}
|
|
1696
|
+
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
1697
|
+
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
1698
|
+
}
|
|
1699
|
+
if (parameters[parameterName] === undefined) {
|
|
1700
|
+
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
1701
|
+
}
|
|
1702
|
+
let parameterValue = parameters[parameterName];
|
|
1703
|
+
if (parameterValue === undefined) {
|
|
1704
|
+
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
1705
|
+
}
|
|
1706
|
+
parameterValue = valueToString(parameterValue);
|
|
1707
|
+
// Escape curly braces in parameter values to prevent prompt-injection
|
|
1708
|
+
parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
|
|
1709
|
+
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
1710
|
+
parameterValue = parameterValue
|
|
1711
|
+
.split('\n')
|
|
1712
|
+
.map((line, index) => (index === 0 ? line : `${precol}${line}`))
|
|
1713
|
+
.join('\n');
|
|
1714
|
+
}
|
|
1715
|
+
replacedTemplates =
|
|
1716
|
+
replacedTemplates.substring(0, match.index + precol.length) +
|
|
1717
|
+
parameterValue +
|
|
1718
|
+
replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
|
|
1719
|
+
}
|
|
1720
|
+
// [💫] Check if there are parameters that are not closed properly
|
|
1721
|
+
if (/{\w+$/.test(replacedTemplates)) {
|
|
1722
|
+
throw new PipelineExecutionError('Parameter is not closed');
|
|
1723
|
+
}
|
|
1724
|
+
// [💫] Check if there are parameters that are not opened properly
|
|
1725
|
+
if (/^\w+}/.test(replacedTemplates)) {
|
|
1726
|
+
throw new PipelineExecutionError('Parameter is not opened');
|
|
1727
|
+
}
|
|
1728
|
+
return replacedTemplates;
|
|
1729
|
+
}
|
|
1730
|
+
|
|
1731
|
+
/**
|
|
1732
|
+
* Execution Tools for calling OpenAI API or other OpeenAI compatible provider
|
|
1733
|
+
*
|
|
1734
|
+
* @public exported from `@promptbook/openai`
|
|
1735
|
+
*/
|
|
1736
|
+
class OpenAiCompatibleExecutionTools {
|
|
1737
|
+
/**
|
|
1738
|
+
* Creates OpenAI compatible Execution Tools.
|
|
1739
|
+
*
|
|
1740
|
+
* @param options which are relevant are directly passed to the OpenAI compatible client
|
|
1741
|
+
*/
|
|
1742
|
+
constructor(options) {
|
|
1743
|
+
this.options = options;
|
|
1744
|
+
/**
|
|
1745
|
+
* OpenAI API client.
|
|
1746
|
+
*/
|
|
1747
|
+
this.client = null;
|
|
1748
|
+
// TODO: Allow configuring rate limits via options
|
|
1749
|
+
this.limiter = new Bottleneck__default["default"]({
|
|
1750
|
+
minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
|
|
1751
|
+
});
|
|
1752
|
+
}
|
|
1753
|
+
async getClient() {
|
|
1754
|
+
if (this.client === null) {
|
|
1755
|
+
// Note: Passing only OpenAI relevant options to OpenAI constructor
|
|
1756
|
+
const openAiOptions = { ...this.options };
|
|
1757
|
+
delete openAiOptions.isVerbose;
|
|
1758
|
+
delete openAiOptions.userId;
|
|
1759
|
+
this.client = new OpenAI__default["default"](openAiOptions);
|
|
1760
|
+
}
|
|
1761
|
+
return this.client;
|
|
1762
|
+
}
|
|
1763
|
+
/**
|
|
1764
|
+
* Check the `options` passed to `constructor`
|
|
1765
|
+
*/
|
|
1766
|
+
async checkConfiguration() {
|
|
1767
|
+
await this.getClient();
|
|
1768
|
+
// TODO: [🎍] Do here a real check that API is online, working and API key is correct
|
|
1769
|
+
}
|
|
1770
|
+
/**
|
|
1771
|
+
* List all available OpenAI compatible models that can be used
|
|
1772
|
+
*/
|
|
1773
|
+
async listModels() {
|
|
1774
|
+
const client = await this.getClient();
|
|
1775
|
+
const rawModelsList = await client.models.list();
|
|
1776
|
+
const availableModels = rawModelsList.data
|
|
1777
|
+
.sort((a, b) => (a.created > b.created ? 1 : -1))
|
|
1778
|
+
.map((modelFromApi) => {
|
|
1779
|
+
const modelFromList = this.HARDCODED_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
|
|
1780
|
+
modelName.startsWith(modelFromApi.id) ||
|
|
1781
|
+
modelFromApi.id.startsWith(modelName));
|
|
1782
|
+
if (modelFromList !== undefined) {
|
|
1783
|
+
return modelFromList;
|
|
1784
|
+
}
|
|
1785
|
+
return {
|
|
1786
|
+
modelVariant: 'CHAT',
|
|
1787
|
+
modelTitle: modelFromApi.id,
|
|
1788
|
+
modelName: modelFromApi.id,
|
|
1789
|
+
modelDescription: '',
|
|
1790
|
+
};
|
|
1791
|
+
});
|
|
1792
|
+
return availableModels;
|
|
1793
|
+
}
|
|
1794
|
+
/**
|
|
1795
|
+
* Calls OpenAI compatible API to use a chat model.
|
|
1796
|
+
*/
|
|
1797
|
+
async callChatModel(prompt) {
|
|
1798
|
+
var _a;
|
|
1799
|
+
if (this.options.isVerbose) {
|
|
1800
|
+
console.info(`💬 ${this.title} callChatModel call`, { prompt });
|
|
1801
|
+
}
|
|
1802
|
+
const { content, parameters, modelRequirements, format } = prompt;
|
|
1803
|
+
const client = await this.getClient();
|
|
1804
|
+
// TODO: [☂] Use here more modelRequirements
|
|
1805
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
1806
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
1807
|
+
}
|
|
1808
|
+
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
1809
|
+
const modelSettings = {
|
|
1810
|
+
model: modelName,
|
|
1811
|
+
max_tokens: modelRequirements.maxTokens,
|
|
1812
|
+
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
1813
|
+
temperature: modelRequirements.temperature,
|
|
1814
|
+
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
1815
|
+
// <- Note: [🧆]
|
|
1816
|
+
}; // <- TODO: [💩] Guard here types better
|
|
1817
|
+
if (format === 'JSON') {
|
|
1818
|
+
modelSettings.response_format = {
|
|
1819
|
+
type: 'json_object',
|
|
1820
|
+
};
|
|
1821
|
+
}
|
|
1822
|
+
// <- TODO: [🚸] Not all models are compatible with JSON mode
|
|
1823
|
+
// > 'response_format' of type 'json_object' is not supported with this model.
|
|
1824
|
+
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
1825
|
+
const rawRequest = {
|
|
1826
|
+
...modelSettings,
|
|
1827
|
+
messages: [
|
|
1828
|
+
...(modelRequirements.systemMessage === undefined
|
|
1829
|
+
? []
|
|
1830
|
+
: [
|
|
1831
|
+
{
|
|
1832
|
+
role: 'system',
|
|
1833
|
+
content: modelRequirements.systemMessage,
|
|
1834
|
+
},
|
|
1835
|
+
]),
|
|
1836
|
+
{
|
|
1837
|
+
role: 'user',
|
|
1838
|
+
content: rawPromptContent,
|
|
1839
|
+
},
|
|
1840
|
+
],
|
|
1841
|
+
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
|
1842
|
+
};
|
|
1843
|
+
const start = $getCurrentDate();
|
|
1844
|
+
if (this.options.isVerbose) {
|
|
1845
|
+
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
1846
|
+
}
|
|
1847
|
+
const rawResponse = await this.limiter
|
|
1848
|
+
.schedule(() => client.chat.completions.create(rawRequest))
|
|
1849
|
+
.catch((error) => {
|
|
1850
|
+
assertsError(error);
|
|
1851
|
+
if (this.options.isVerbose) {
|
|
1852
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
|
1853
|
+
}
|
|
1854
|
+
throw error;
|
|
1855
|
+
});
|
|
1856
|
+
if (this.options.isVerbose) {
|
|
1857
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
1858
|
+
}
|
|
1859
|
+
const complete = $getCurrentDate();
|
|
1860
|
+
if (!rawResponse.choices[0]) {
|
|
1861
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
1862
|
+
}
|
|
1863
|
+
if (rawResponse.choices.length > 1) {
|
|
1864
|
+
// TODO: This should be maybe only warning
|
|
1865
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
1866
|
+
}
|
|
1867
|
+
const resultContent = rawResponse.choices[0].message.content;
|
|
1868
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
1869
|
+
if (resultContent === null) {
|
|
1870
|
+
throw new PipelineExecutionError(`No response message from ${this.title}`);
|
|
1871
|
+
}
|
|
1872
|
+
return exportJson({
|
|
1873
|
+
name: 'promptResult',
|
|
1874
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
|
1875
|
+
order: [],
|
|
1876
|
+
value: {
|
|
1877
|
+
content: resultContent,
|
|
1878
|
+
modelName: rawResponse.model || modelName,
|
|
1879
|
+
timing: {
|
|
1880
|
+
start,
|
|
1881
|
+
complete,
|
|
1882
|
+
},
|
|
1883
|
+
usage,
|
|
1884
|
+
rawPromptContent,
|
|
1885
|
+
rawRequest,
|
|
1886
|
+
rawResponse,
|
|
1887
|
+
// <- [🗯]
|
|
1888
|
+
},
|
|
1889
|
+
});
|
|
1890
|
+
}
|
|
1891
|
+
/**
|
|
1892
|
+
* Calls OpenAI API to use a complete model.
|
|
1893
|
+
*/
|
|
1894
|
+
async callCompletionModel(prompt) {
|
|
1895
|
+
var _a;
|
|
1896
|
+
if (this.options.isVerbose) {
|
|
1897
|
+
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
|
|
1898
|
+
}
|
|
1899
|
+
const { content, parameters, modelRequirements } = prompt;
|
|
1900
|
+
const client = await this.getClient();
|
|
1901
|
+
// TODO: [☂] Use here more modelRequirements
|
|
1902
|
+
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
1903
|
+
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
1904
|
+
}
|
|
1905
|
+
const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
1906
|
+
const modelSettings = {
|
|
1907
|
+
model: modelName,
|
|
1908
|
+
max_tokens: modelRequirements.maxTokens || 2000,
|
|
1909
|
+
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
1910
|
+
temperature: modelRequirements.temperature,
|
|
1911
|
+
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
1912
|
+
// <- Note: [🧆]
|
|
1913
|
+
};
|
|
1914
|
+
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
1915
|
+
const rawRequest = {
|
|
1916
|
+
...modelSettings,
|
|
1917
|
+
prompt: rawPromptContent,
|
|
1918
|
+
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
|
1919
|
+
};
|
|
1920
|
+
const start = $getCurrentDate();
|
|
1921
|
+
if (this.options.isVerbose) {
|
|
1922
|
+
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
1923
|
+
}
|
|
1924
|
+
const rawResponse = await this.limiter
|
|
1925
|
+
.schedule(() => client.completions.create(rawRequest))
|
|
1926
|
+
.catch((error) => {
|
|
1927
|
+
assertsError(error);
|
|
1928
|
+
if (this.options.isVerbose) {
|
|
1929
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
|
1930
|
+
}
|
|
1931
|
+
throw error;
|
|
1932
|
+
});
|
|
1933
|
+
if (this.options.isVerbose) {
|
|
1934
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
1935
|
+
}
|
|
1936
|
+
const complete = $getCurrentDate();
|
|
1937
|
+
if (!rawResponse.choices[0]) {
|
|
1938
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
1939
|
+
}
|
|
1940
|
+
if (rawResponse.choices.length > 1) {
|
|
1941
|
+
// TODO: This should be maybe only warning
|
|
1942
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
1943
|
+
}
|
|
1944
|
+
const resultContent = rawResponse.choices[0].text;
|
|
1945
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
1946
|
+
return exportJson({
|
|
1947
|
+
name: 'promptResult',
|
|
1948
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
|
1949
|
+
order: [],
|
|
1950
|
+
value: {
|
|
1951
|
+
content: resultContent,
|
|
1952
|
+
modelName: rawResponse.model || modelName,
|
|
1953
|
+
timing: {
|
|
1954
|
+
start,
|
|
1955
|
+
complete,
|
|
1956
|
+
},
|
|
1957
|
+
usage,
|
|
1958
|
+
rawPromptContent,
|
|
1959
|
+
rawRequest,
|
|
1960
|
+
rawResponse,
|
|
1961
|
+
// <- [🗯]
|
|
1962
|
+
},
|
|
1963
|
+
});
|
|
1964
|
+
}
|
|
1965
|
+
/**
|
|
1966
|
+
* Calls OpenAI compatible API to use a embedding model
|
|
1967
|
+
*/
|
|
1968
|
+
async callEmbeddingModel(prompt) {
|
|
1969
|
+
if (this.options.isVerbose) {
|
|
1970
|
+
console.info(`🖋 ${this.title} embedding call`, { prompt });
|
|
1971
|
+
}
|
|
1972
|
+
const { content, parameters, modelRequirements } = prompt;
|
|
1973
|
+
const client = await this.getClient();
|
|
1974
|
+
// TODO: [☂] Use here more modelRequirements
|
|
1975
|
+
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
1976
|
+
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
1977
|
+
}
|
|
1978
|
+
const modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
1979
|
+
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
1980
|
+
const rawRequest = {
|
|
1981
|
+
input: rawPromptContent,
|
|
1982
|
+
model: modelName,
|
|
1983
|
+
};
|
|
1984
|
+
const start = $getCurrentDate();
|
|
1985
|
+
if (this.options.isVerbose) {
|
|
1986
|
+
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
1987
|
+
}
|
|
1988
|
+
const rawResponse = await this.limiter
|
|
1989
|
+
.schedule(() => client.embeddings.create(rawRequest))
|
|
1990
|
+
.catch((error) => {
|
|
1991
|
+
assertsError(error);
|
|
1992
|
+
if (this.options.isVerbose) {
|
|
1993
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
|
1994
|
+
}
|
|
1995
|
+
throw error;
|
|
1996
|
+
});
|
|
1997
|
+
if (this.options.isVerbose) {
|
|
1998
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
1999
|
+
}
|
|
2000
|
+
const complete = $getCurrentDate();
|
|
2001
|
+
if (rawResponse.data.length !== 1) {
|
|
2002
|
+
throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
|
|
2003
|
+
}
|
|
2004
|
+
const resultContent = rawResponse.data[0].embedding;
|
|
2005
|
+
const usage = this.computeUsage(content || '', '',
|
|
2006
|
+
// <- Note: Embedding does not have result content
|
|
2007
|
+
rawResponse);
|
|
2008
|
+
return exportJson({
|
|
2009
|
+
name: 'promptResult',
|
|
2010
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
|
|
2011
|
+
order: [],
|
|
2012
|
+
value: {
|
|
2013
|
+
content: resultContent,
|
|
2014
|
+
modelName: rawResponse.model || modelName,
|
|
2015
|
+
timing: {
|
|
2016
|
+
start,
|
|
2017
|
+
complete,
|
|
2018
|
+
},
|
|
2019
|
+
usage,
|
|
2020
|
+
rawPromptContent,
|
|
2021
|
+
rawRequest,
|
|
2022
|
+
rawResponse,
|
|
2023
|
+
// <- [🗯]
|
|
2024
|
+
},
|
|
2025
|
+
});
|
|
2026
|
+
}
|
|
2027
|
+
// <- Note: [🤖] callXxxModel
|
|
2028
|
+
/**
|
|
2029
|
+
* Get the model that should be used as default
|
|
2030
|
+
*/
|
|
2031
|
+
getDefaultModel(defaultModelName) {
|
|
2032
|
+
// Note: Match exact or prefix for model families
|
|
2033
|
+
const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
|
|
2034
|
+
if (model === undefined) {
|
|
2035
|
+
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
|
2036
|
+
Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
|
|
2037
|
+
|
|
2038
|
+
Available models:
|
|
2039
|
+
${block(this.HARDCODED_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
|
|
2040
|
+
|
|
2041
|
+
Model "${defaultModelName}" is probably not available anymore, not installed, inaccessible or misconfigured.
|
|
2042
|
+
|
|
2043
|
+
`));
|
|
2044
|
+
}
|
|
2045
|
+
return model;
|
|
2046
|
+
}
|
|
2047
|
+
}
|
|
2048
|
+
/**
|
|
2049
|
+
* TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
|
|
2050
|
+
* TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
|
|
2051
|
+
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
2052
|
+
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
2053
|
+
*/
|
|
2054
|
+
|
|
2055
|
+
/**
|
|
2056
|
+
* List of available models in Ollama library
|
|
2057
|
+
*
|
|
2058
|
+
* Note: Done at 2025-05-19
|
|
2059
|
+
*
|
|
2060
|
+
* @see https://ollama.com/library
|
|
2061
|
+
* @public exported from `@promptbook/ollama`
|
|
2062
|
+
*/
|
|
2063
|
+
const OLLAMA_MODELS = exportJson({
|
|
2064
|
+
name: 'OLLAMA_MODELS',
|
|
2065
|
+
value: [
|
|
2066
|
+
{
|
|
2067
|
+
modelVariant: 'CHAT',
|
|
2068
|
+
modelTitle: 'llama2',
|
|
2069
|
+
modelName: 'llama2',
|
|
2070
|
+
modelDescription: 'Meta Llama 2, a general-purpose large language model.',
|
|
2071
|
+
},
|
|
2072
|
+
{
|
|
2073
|
+
modelVariant: 'CHAT',
|
|
2074
|
+
modelTitle: 'llama2-chat',
|
|
2075
|
+
modelName: 'llama2-chat',
|
|
2076
|
+
modelDescription: 'Meta Llama 2 Chat, optimized for conversational tasks.',
|
|
2077
|
+
},
|
|
2078
|
+
{
|
|
2079
|
+
modelVariant: 'CHAT',
|
|
2080
|
+
modelTitle: 'alpaca-7b',
|
|
2081
|
+
modelName: 'alpaca-7b',
|
|
2082
|
+
modelDescription: 'Stanford Alpaca 7B, instruction-tuned LLaMA model.',
|
|
2083
|
+
},
|
|
2084
|
+
{
|
|
2085
|
+
modelVariant: 'CHAT',
|
|
2086
|
+
modelTitle: 'alpaca-30b',
|
|
2087
|
+
modelName: 'alpaca-30b',
|
|
2088
|
+
modelDescription: 'Stanford Alpaca 30B, larger instruction-tuned LLaMA model.',
|
|
2089
|
+
},
|
|
2090
|
+
{
|
|
2091
|
+
modelVariant: 'CHAT',
|
|
2092
|
+
modelTitle: 'vicuna-13b',
|
|
2093
|
+
modelName: 'vicuna-13b',
|
|
2094
|
+
modelDescription: 'Vicuna 13B, fine-tuned LLaMA for chat and instruction.',
|
|
2095
|
+
},
|
|
2096
|
+
{
|
|
2097
|
+
modelVariant: 'CHAT',
|
|
2098
|
+
modelTitle: 'falcon-7b',
|
|
2099
|
+
modelName: 'falcon-7b',
|
|
2100
|
+
modelDescription: 'Falcon 7B, a performant open large language model.',
|
|
2101
|
+
},
|
|
2102
|
+
{
|
|
2103
|
+
modelVariant: 'CHAT',
|
|
2104
|
+
modelTitle: 'falcon-40b',
|
|
2105
|
+
modelName: 'falcon-40b',
|
|
2106
|
+
modelDescription: 'Falcon 40B, a larger open large language model.',
|
|
2107
|
+
},
|
|
2108
|
+
{
|
|
2109
|
+
modelVariant: 'CHAT',
|
|
2110
|
+
modelTitle: 'bloom-7b',
|
|
2111
|
+
modelName: 'bloom-7b',
|
|
2112
|
+
modelDescription: 'BLOOM 7B, multilingual large language model.',
|
|
2113
|
+
},
|
|
2114
|
+
{
|
|
2115
|
+
modelVariant: 'CHAT',
|
|
2116
|
+
modelTitle: 'mistral-7b',
|
|
2117
|
+
modelName: 'mistral-7b',
|
|
2118
|
+
modelDescription: 'Mistral 7B, efficient and fast open LLM.',
|
|
2119
|
+
},
|
|
2120
|
+
{
|
|
2121
|
+
modelVariant: 'CHAT',
|
|
2122
|
+
modelTitle: 'gorilla',
|
|
2123
|
+
modelName: 'gorilla',
|
|
2124
|
+
modelDescription: 'Gorilla, open-source LLM for tool use and APIs.',
|
|
2125
|
+
},
|
|
2126
|
+
{
|
|
2127
|
+
modelVariant: 'CHAT',
|
|
2128
|
+
modelTitle: 'cerebras-13b',
|
|
2129
|
+
modelName: 'cerebras-13b',
|
|
2130
|
+
modelDescription: 'Cerebras-GPT 13B, open large language model.',
|
|
2131
|
+
},
|
|
2132
|
+
{
|
|
2133
|
+
modelVariant: 'CHAT',
|
|
2134
|
+
modelTitle: 'openchat-7b',
|
|
2135
|
+
modelName: 'openchat-7b',
|
|
2136
|
+
modelDescription: 'OpenChat 7B, fine-tuned for conversational tasks.',
|
|
2137
|
+
},
|
|
2138
|
+
{
|
|
2139
|
+
modelVariant: 'CHAT',
|
|
2140
|
+
modelTitle: 'openchat-13b',
|
|
2141
|
+
modelName: 'openchat-13b',
|
|
2142
|
+
modelDescription: 'OpenChat 13B, larger conversational LLM.',
|
|
2143
|
+
},
|
|
2144
|
+
{
|
|
2145
|
+
modelVariant: 'CHAT',
|
|
2146
|
+
modelTitle: 'mpt-7b-chat',
|
|
2147
|
+
modelName: 'mpt-7b-chat',
|
|
2148
|
+
modelDescription: 'MPT-7B Chat, optimized for dialogue and chat.',
|
|
2149
|
+
},
|
|
2150
|
+
{
|
|
2151
|
+
modelVariant: 'CHAT',
|
|
2152
|
+
modelTitle: 'mpt-7b-instruct',
|
|
2153
|
+
modelName: 'mpt-7b-instruct',
|
|
2154
|
+
modelDescription: 'MPT-7B Instruct, instruction-tuned variant.',
|
|
2155
|
+
},
|
|
2156
|
+
{
|
|
2157
|
+
modelVariant: 'CHAT',
|
|
2158
|
+
modelTitle: 'command-7b',
|
|
2159
|
+
modelName: 'command-7b',
|
|
2160
|
+
modelDescription: 'Command 7B, instruction-following LLM.',
|
|
2161
|
+
},
|
|
2162
|
+
{
|
|
2163
|
+
modelVariant: 'CHAT',
|
|
2164
|
+
modelTitle: 'starcoder',
|
|
2165
|
+
modelName: 'starcoder',
|
|
2166
|
+
modelDescription: 'StarCoder, code generation large language model.',
|
|
2167
|
+
},
|
|
2168
|
+
{
|
|
2169
|
+
modelVariant: 'CHAT',
|
|
2170
|
+
modelTitle: 'starcoder2',
|
|
2171
|
+
modelName: 'starcoder2',
|
|
2172
|
+
modelDescription: 'StarCoder2, improved code generation model.',
|
|
2173
|
+
},
|
|
2174
|
+
{
|
|
2175
|
+
modelVariant: 'CHAT',
|
|
2176
|
+
modelTitle: 'mixtral-7b-chat',
|
|
2177
|
+
modelName: 'mixtral-7b-chat',
|
|
2178
|
+
modelDescription: 'Mixtral 7B Chat, Mixture-of-Experts conversational model.',
|
|
2179
|
+
},
|
|
2180
|
+
{
|
|
2181
|
+
modelVariant: 'CHAT',
|
|
2182
|
+
modelTitle: 'mixtral-8x7b',
|
|
2183
|
+
modelName: 'mixtral-8x7b',
|
|
2184
|
+
modelDescription: 'Mixtral 8x7B, Mixture-of-Experts large language model.',
|
|
2185
|
+
},
|
|
2186
|
+
{
|
|
2187
|
+
modelVariant: 'CHAT',
|
|
2188
|
+
modelTitle: 'mixtral-8x7b-instruct',
|
|
2189
|
+
modelName: 'mixtral-8x7b-instruct',
|
|
2190
|
+
modelDescription: 'Mixtral 8x7B Instruct, instruction-tuned Mixture-of-Experts model.',
|
|
2191
|
+
},
|
|
2192
|
+
// <- [🕕]
|
|
2193
|
+
],
|
|
2194
|
+
});
|
|
2195
|
+
/**
|
|
2196
|
+
* TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
|
|
2197
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
2198
|
+
*/
|
|
2199
|
+
|
|
2200
|
+
/**
|
|
2201
|
+
* Default base URL for Ollama API
|
|
2202
|
+
*
|
|
2203
|
+
* @public exported from `@promptbook/ollama`
|
|
2204
|
+
*/
|
|
2205
|
+
const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434/v1';
|
|
2206
|
+
|
|
2207
|
+
/**
|
|
2208
|
+
* Execution Tools for calling Ollama API
|
|
2209
|
+
*
|
|
2210
|
+
* @public exported from `@promptbook/ollama`
|
|
2211
|
+
*/
|
|
2212
|
+
class OllamaExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
2213
|
+
/* <- TODO: [🍚] `, Destroyable` */
|
|
2214
|
+
constructor(ollamaOptions) {
|
|
2215
|
+
const openAiCompatibleOptions = {
|
|
2216
|
+
baseURL: DEFAULT_OLLAMA_BASE_URL,
|
|
2217
|
+
...ollamaOptions,
|
|
2218
|
+
userId: 'ollama',
|
|
2219
|
+
};
|
|
2220
|
+
super(openAiCompatibleOptions);
|
|
2221
|
+
}
|
|
2222
|
+
get title() {
|
|
2223
|
+
return 'Ollama';
|
|
2224
|
+
}
|
|
2225
|
+
get description() {
|
|
2226
|
+
return 'Use all models provided by Ollama';
|
|
2227
|
+
}
|
|
2228
|
+
/**
|
|
2229
|
+
* List all available models (non dynamically)
|
|
2230
|
+
*
|
|
2231
|
+
* Note: Purpose of this is to provide more information about models than standard listing from API
|
|
2232
|
+
*/
|
|
2233
|
+
get HARDCODED_MODELS() {
|
|
2234
|
+
return OLLAMA_MODELS;
|
|
2235
|
+
}
|
|
2236
|
+
/**
|
|
2237
|
+
* Computes the usage of the Ollama API based on the response from Ollama
|
|
2238
|
+
*/
|
|
2239
|
+
computeUsage(...args) {
|
|
2240
|
+
return {
|
|
2241
|
+
...computeOpenAiUsage(...args),
|
|
2242
|
+
price: ZERO_VALUE, // <- Note: Running on local model, so no price, maybe in the future we can add a way to calculate price based on electricity usage
|
|
2243
|
+
};
|
|
2244
|
+
}
|
|
2245
|
+
/**
|
|
2246
|
+
* Default model for chat variant.
|
|
2247
|
+
*/
|
|
2248
|
+
getDefaultChatModel() {
|
|
2249
|
+
return this.getDefaultModel('llama2'); // <- TODO: [🧠] Pick the best default model
|
|
2250
|
+
// <- TODO: [🛄] When 'llama2' not installed, maybe better error message
|
|
2251
|
+
}
|
|
2252
|
+
/**
|
|
2253
|
+
* Default model for completion variant.
|
|
2254
|
+
*/
|
|
2255
|
+
getDefaultCompletionModel() {
|
|
2256
|
+
return this.getDefaultModel('llama2'); // <- TODO: [🧠] Pick the best default model
|
|
2257
|
+
// <- TODO: [🛄] When 'llama2' not installed, maybe better error message
|
|
2258
|
+
}
|
|
2259
|
+
/**
|
|
2260
|
+
* Default model for completion variant.
|
|
2261
|
+
*/
|
|
2262
|
+
getDefaultEmbeddingModel() {
|
|
2263
|
+
return this.getDefaultModel('text-embedding-3-large'); // <- TODO: [🧠] Pick the best default model
|
|
2264
|
+
// <- TODO: [🛄]
|
|
2265
|
+
}
|
|
2266
|
+
}
|
|
2267
|
+
/**
|
|
2268
|
+
* TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
|
|
2269
|
+
*/
|
|
558
2270
|
|
|
559
2271
|
/**
|
|
560
2272
|
* Execution Tools for calling Ollama API
|
|
561
2273
|
*
|
|
562
2274
|
* @public exported from `@promptbook/ollama`
|
|
563
2275
|
*/
|
|
564
|
-
const createOllamaExecutionTools = Object.assign((options) =>
|
|
2276
|
+
const createOllamaExecutionTools = Object.assign((options) => {
|
|
2277
|
+
return new OllamaExecutionTools(options);
|
|
2278
|
+
}, {
|
|
565
2279
|
packageName: '@promptbook/ollama',
|
|
566
2280
|
className: 'OllamaExecutionTools',
|
|
567
2281
|
});
|
|
@@ -745,6 +2459,8 @@
|
|
|
745
2459
|
*/
|
|
746
2460
|
|
|
747
2461
|
exports.BOOK_LANGUAGE_VERSION = BOOK_LANGUAGE_VERSION;
|
|
2462
|
+
exports.DEFAULT_OLLAMA_BASE_URL = DEFAULT_OLLAMA_BASE_URL;
|
|
2463
|
+
exports.OLLAMA_MODELS = OLLAMA_MODELS;
|
|
748
2464
|
exports.OllamaExecutionTools = OllamaExecutionTools;
|
|
749
2465
|
exports.PROMPTBOOK_ENGINE_VERSION = PROMPTBOOK_ENGINE_VERSION;
|
|
750
2466
|
exports._OllamaRegistration = _OllamaRegistration;
|