@promptbook/core 0.89.0-9 → 0.92.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -7
- package/esm/index.es.js +801 -391
- package/esm/index.es.js.map +1 -1
- package/esm/typings/servers.d.ts +40 -0
- package/esm/typings/src/_packages/core.index.d.ts +14 -4
- package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
- package/esm/typings/src/_packages/google.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +18 -0
- package/esm/typings/src/_packages/utils.index.d.ts +6 -0
- package/esm/typings/src/cli/cli-commands/login.d.ts +0 -1
- package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +16 -3
- package/esm/typings/src/cli/test/ptbk.d.ts +1 -1
- package/esm/typings/src/commands/EXPECT/expectCommandParser.d.ts +2 -0
- package/esm/typings/src/config.d.ts +10 -19
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
- package/esm/typings/src/errors/0-index.d.ts +7 -4
- package/esm/typings/src/errors/PipelineExecutionError.d.ts +1 -1
- package/esm/typings/src/errors/WrappedError.d.ts +10 -0
- package/esm/typings/src/errors/assertsError.d.ts +11 -0
- package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
- package/esm/typings/src/execution/PromptbookFetch.d.ts +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
- package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
- package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
- package/esm/typings/src/formats/csv/utils/isValidCsvString.d.ts +9 -0
- package/esm/typings/src/formats/csv/utils/isValidCsvString.test.d.ts +1 -0
- package/esm/typings/src/formats/json/utils/isValidJsonString.d.ts +3 -0
- package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
- package/esm/typings/src/formats/xml/utils/isValidXmlString.d.ts +9 -0
- package/esm/typings/src/formats/xml/utils/isValidXmlString.test.d.ts +1 -0
- package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
- package/esm/typings/src/llm-providers/_common/register/{$provideEnvFilepath.d.ts → $provideEnvFilename.d.ts} +2 -2
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizzardOrCli.d.ts +11 -2
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/personas/preparePersona.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
- package/esm/typings/src/remote-server/openapi-types.d.ts +626 -0
- package/esm/typings/src/remote-server/openapi.d.ts +581 -0
- package/esm/typings/src/remote-server/socket-types/_subtypes/Identification.d.ts +7 -1
- package/esm/typings/src/remote-server/socket-types/_subtypes/identificationToPromptbookToken.d.ts +11 -0
- package/esm/typings/src/remote-server/socket-types/_subtypes/promptbookTokenToIdentification.d.ts +10 -0
- package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
- package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -9
- package/esm/typings/src/storage/env-storage/$EnvStorage.d.ts +40 -0
- package/esm/typings/src/types/typeAliases.d.ts +26 -0
- package/package.json +7 -3
- package/umd/index.umd.js +807 -392
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/cli/test/ptbk2.d.ts +0 -5
package/umd/index.umd.js
CHANGED
|
@@ -27,7 +27,7 @@
|
|
|
27
27
|
* @generated
|
|
28
28
|
* @see https://github.com/webgptorg/promptbook
|
|
29
29
|
*/
|
|
30
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
30
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-10';
|
|
31
31
|
/**
|
|
32
32
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
33
33
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -159,236 +159,34 @@
|
|
|
159
159
|
*/
|
|
160
160
|
|
|
161
161
|
/**
|
|
162
|
-
*
|
|
163
|
-
*
|
|
164
|
-
* @public exported from `@promptbook/utils`
|
|
165
|
-
*/
|
|
166
|
-
function isValidJsonString(value /* <- [👨⚖️] */) {
|
|
167
|
-
try {
|
|
168
|
-
JSON.parse(value);
|
|
169
|
-
return true;
|
|
170
|
-
}
|
|
171
|
-
catch (error) {
|
|
172
|
-
if (!(error instanceof Error)) {
|
|
173
|
-
throw error;
|
|
174
|
-
}
|
|
175
|
-
if (error.message.includes('Unexpected token')) {
|
|
176
|
-
return false;
|
|
177
|
-
}
|
|
178
|
-
return false;
|
|
179
|
-
}
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
/**
|
|
183
|
-
* Function `validatePipelineString` will validate the if the string is a valid pipeline string
|
|
184
|
-
* It does not check if the string is fully logically correct, but if it is a string that can be a pipeline string or the string looks completely different.
|
|
185
|
-
*
|
|
186
|
-
* @param {string} pipelineString the candidate for a pipeline string
|
|
187
|
-
* @returns {PipelineString} the same string as input, but validated as valid
|
|
188
|
-
* @throws {ParseError} if the string is not a valid pipeline string
|
|
189
|
-
* @public exported from `@promptbook/core`
|
|
190
|
-
*/
|
|
191
|
-
function validatePipelineString(pipelineString) {
|
|
192
|
-
if (isValidJsonString(pipelineString)) {
|
|
193
|
-
throw new ParseError('Expected a book, but got a JSON string');
|
|
194
|
-
}
|
|
195
|
-
else if (isValidUrl(pipelineString)) {
|
|
196
|
-
throw new ParseError(`Expected a book, but got just the URL "${pipelineString}"`);
|
|
197
|
-
}
|
|
198
|
-
else if (isValidFilePath(pipelineString)) {
|
|
199
|
-
throw new ParseError(`Expected a book, but got just the file path "${pipelineString}"`);
|
|
200
|
-
}
|
|
201
|
-
else if (isValidEmail(pipelineString)) {
|
|
202
|
-
throw new ParseError(`Expected a book, but got just the email "${pipelineString}"`);
|
|
203
|
-
}
|
|
204
|
-
// <- TODO: Implement the validation + add tests when the pipeline logic considered as invalid
|
|
205
|
-
return pipelineString;
|
|
206
|
-
}
|
|
207
|
-
/**
|
|
208
|
-
* TODO: [🧠][🈴] Where is the best location for this file
|
|
209
|
-
*/
|
|
210
|
-
|
|
211
|
-
/**
|
|
212
|
-
* Prettify the html code
|
|
213
|
-
*
|
|
214
|
-
* @param content raw html code
|
|
215
|
-
* @returns formatted html code
|
|
216
|
-
* @private withing the package because of HUGE size of prettier dependency
|
|
217
|
-
*/
|
|
218
|
-
function prettifyMarkdown(content) {
|
|
219
|
-
try {
|
|
220
|
-
return prettier.format(content, {
|
|
221
|
-
parser: 'markdown',
|
|
222
|
-
plugins: [parserHtml__default["default"]],
|
|
223
|
-
// TODO: DRY - make some import or auto-copy of .prettierrc
|
|
224
|
-
endOfLine: 'lf',
|
|
225
|
-
tabWidth: 4,
|
|
226
|
-
singleQuote: true,
|
|
227
|
-
trailingComma: 'all',
|
|
228
|
-
arrowParens: 'always',
|
|
229
|
-
printWidth: 120,
|
|
230
|
-
htmlWhitespaceSensitivity: 'ignore',
|
|
231
|
-
jsxBracketSameLine: false,
|
|
232
|
-
bracketSpacing: true,
|
|
233
|
-
});
|
|
234
|
-
}
|
|
235
|
-
catch (error) {
|
|
236
|
-
// TODO: [🟥] Detect browser / node and make it colorfull
|
|
237
|
-
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
238
|
-
error,
|
|
239
|
-
html: content,
|
|
240
|
-
});
|
|
241
|
-
return content;
|
|
242
|
-
}
|
|
243
|
-
}
|
|
244
|
-
|
|
245
|
-
/**
|
|
246
|
-
* Makes first letter of a string uppercase
|
|
247
|
-
*
|
|
248
|
-
* @public exported from `@promptbook/utils`
|
|
249
|
-
*/
|
|
250
|
-
function capitalize(word) {
|
|
251
|
-
return word.substring(0, 1).toUpperCase() + word.substring(1);
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
/**
|
|
255
|
-
* Converts promptbook in JSON format to string format
|
|
162
|
+
* Available remote servers for the Promptbook
|
|
256
163
|
*
|
|
257
|
-
* @deprecated TODO: [🥍][🧠] Backup original files in `PipelineJson` same as in Promptbook.studio
|
|
258
|
-
* @param pipelineJson Promptbook in JSON format (.bookc)
|
|
259
|
-
* @returns Promptbook in string format (.book.md)
|
|
260
164
|
* @public exported from `@promptbook/core`
|
|
261
165
|
*/
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
}
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
pipelineString += '\n\n';
|
|
285
|
-
pipelineString += commands.map((command) => `- ${command}`).join('\n');
|
|
286
|
-
for (const task of tasks) {
|
|
287
|
-
const {
|
|
288
|
-
/* Note: Not using:> name, */
|
|
289
|
-
title, description,
|
|
290
|
-
/* Note: dependentParameterNames, */
|
|
291
|
-
jokerParameterNames: jokers, taskType, content, postprocessingFunctionNames: postprocessing, expectations, format, resultingParameterName, } = task;
|
|
292
|
-
pipelineString += '\n\n';
|
|
293
|
-
pipelineString += `## ${title}`;
|
|
294
|
-
if (description) {
|
|
295
|
-
pipelineString += '\n\n';
|
|
296
|
-
pipelineString += description;
|
|
297
|
-
}
|
|
298
|
-
const commands = [];
|
|
299
|
-
let contentLanguage = 'text';
|
|
300
|
-
if (taskType === 'PROMPT_TASK') {
|
|
301
|
-
const { modelRequirements } = task;
|
|
302
|
-
const { modelName, modelVariant } = modelRequirements || {};
|
|
303
|
-
// Note: Do nothing, it is default
|
|
304
|
-
// commands.push(`PROMPT`);
|
|
305
|
-
if (modelVariant) {
|
|
306
|
-
commands.push(`MODEL VARIANT ${capitalize(modelVariant)}`);
|
|
307
|
-
}
|
|
308
|
-
if (modelName) {
|
|
309
|
-
commands.push(`MODEL NAME \`${modelName}\``);
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
else if (taskType === 'SIMPLE_TASK') {
|
|
313
|
-
commands.push(`SIMPLE TEMPLATE`);
|
|
314
|
-
// Note: Nothing special here
|
|
315
|
-
}
|
|
316
|
-
else if (taskType === 'SCRIPT_TASK') {
|
|
317
|
-
commands.push(`SCRIPT`);
|
|
318
|
-
if (task.contentLanguage) {
|
|
319
|
-
contentLanguage = task.contentLanguage;
|
|
320
|
-
}
|
|
321
|
-
else {
|
|
322
|
-
contentLanguage = '';
|
|
323
|
-
}
|
|
324
|
-
}
|
|
325
|
-
else if (taskType === 'DIALOG_TASK') {
|
|
326
|
-
commands.push(`DIALOG`);
|
|
327
|
-
// Note: Nothing special here
|
|
328
|
-
} // <- }else if([🅱]
|
|
329
|
-
if (jokers) {
|
|
330
|
-
for (const joker of jokers) {
|
|
331
|
-
commands.push(`JOKER {${joker}}`);
|
|
332
|
-
}
|
|
333
|
-
} /* not else */
|
|
334
|
-
if (postprocessing) {
|
|
335
|
-
for (const postprocessingFunctionName of postprocessing) {
|
|
336
|
-
commands.push(`POSTPROCESSING \`${postprocessingFunctionName}\``);
|
|
337
|
-
}
|
|
338
|
-
} /* not else */
|
|
339
|
-
if (expectations) {
|
|
340
|
-
for (const [unit, { min, max }] of Object.entries(expectations)) {
|
|
341
|
-
if (min === max) {
|
|
342
|
-
commands.push(`EXPECT EXACTLY ${min} ${capitalize(unit + (min > 1 ? 's' : ''))}`);
|
|
343
|
-
}
|
|
344
|
-
else {
|
|
345
|
-
if (min !== undefined) {
|
|
346
|
-
commands.push(`EXPECT MIN ${min} ${capitalize(unit + (min > 1 ? 's' : ''))}`);
|
|
347
|
-
} /* not else */
|
|
348
|
-
if (max !== undefined) {
|
|
349
|
-
commands.push(`EXPECT MAX ${max} ${capitalize(unit + (max > 1 ? 's' : ''))}`);
|
|
350
|
-
}
|
|
351
|
-
}
|
|
352
|
-
}
|
|
353
|
-
} /* not else */
|
|
354
|
-
if (format) {
|
|
355
|
-
if (format === 'JSON') {
|
|
356
|
-
// TODO: @deprecated remove
|
|
357
|
-
commands.push(`FORMAT JSON`);
|
|
358
|
-
}
|
|
359
|
-
} /* not else */
|
|
360
|
-
pipelineString += '\n\n';
|
|
361
|
-
pipelineString += commands.map((command) => `- ${command}`).join('\n');
|
|
362
|
-
pipelineString += '\n\n';
|
|
363
|
-
pipelineString += '```' + contentLanguage;
|
|
364
|
-
pipelineString += '\n';
|
|
365
|
-
pipelineString += spaceTrim__default["default"](content);
|
|
366
|
-
// <- TODO: [main] !!3 Escape
|
|
367
|
-
// <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
|
|
368
|
-
pipelineString += '\n';
|
|
369
|
-
pipelineString += '```';
|
|
370
|
-
pipelineString += '\n\n';
|
|
371
|
-
pipelineString += `\`-> {${resultingParameterName}}\``; // <- TODO: [main] !!3 If the parameter here has description, add it and use taskParameterJsonToString
|
|
372
|
-
}
|
|
373
|
-
return validatePipelineString(pipelineString);
|
|
374
|
-
}
|
|
375
|
-
/**
|
|
376
|
-
* @private internal utility of `pipelineJsonToString`
|
|
377
|
-
*/
|
|
378
|
-
function taskParameterJsonToString(taskParameterJson) {
|
|
379
|
-
const { name, description } = taskParameterJson;
|
|
380
|
-
let parameterString = `{${name}}`;
|
|
381
|
-
if (description) {
|
|
382
|
-
parameterString = `${parameterString} ${description}`;
|
|
383
|
-
}
|
|
384
|
-
return parameterString;
|
|
385
|
-
}
|
|
166
|
+
const REMOTE_SERVER_URLS = [
|
|
167
|
+
{
|
|
168
|
+
title: 'Promptbook',
|
|
169
|
+
description: `Servers of Promptbook.studio`,
|
|
170
|
+
owner: 'AI Web, LLC <legal@ptbk.io> (https://www.ptbk.io/)',
|
|
171
|
+
isAnonymousModeAllowed: true,
|
|
172
|
+
urls: [
|
|
173
|
+
'https://promptbook.s5.ptbk.io/',
|
|
174
|
+
// Note: Servers 1-4 are not running
|
|
175
|
+
],
|
|
176
|
+
},
|
|
177
|
+
/*
|
|
178
|
+
Note: Working on older version of Promptbook and not supported anymore
|
|
179
|
+
{
|
|
180
|
+
title: 'Pavol Promptbook Server',
|
|
181
|
+
description: `Personal server of Pavol Hejný with simple testing server, DO NOT USE IT FOR PRODUCTION`,
|
|
182
|
+
owner: 'Pavol Hejný <pavol@ptbk.io> (https://www.pavolhejny.com/)',
|
|
183
|
+
isAnonymousModeAllowed: true,
|
|
184
|
+
urls: ['https://api.pavolhejny.com/promptbook'],
|
|
185
|
+
},
|
|
186
|
+
*/
|
|
187
|
+
];
|
|
386
188
|
/**
|
|
387
|
-
*
|
|
388
|
-
* TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
|
|
389
|
-
* TODO: [🏛] Maybe make some markdown builder
|
|
390
|
-
* TODO: [🏛] Escape all
|
|
391
|
-
* TODO: [🧠] Should be in generated .book.md file GENERATOR_WARNING
|
|
189
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
392
190
|
*/
|
|
393
191
|
|
|
394
192
|
/**
|
|
@@ -446,22 +244,6 @@
|
|
|
446
244
|
*/
|
|
447
245
|
const CLAIM = `It's time for a paradigm shift. The future of software in plain English, French or Latin`;
|
|
448
246
|
// <- TODO: [🐊] Pick the best claim
|
|
449
|
-
/**
|
|
450
|
-
* Logo for the light theme
|
|
451
|
-
*
|
|
452
|
-
* TODO: [🗽] Unite branding and make single place for it
|
|
453
|
-
*
|
|
454
|
-
* @public exported from `@promptbook/core`
|
|
455
|
-
*/
|
|
456
|
-
const LOGO_LIGHT_SRC = `https://promptbook.studio/_next/static/media/promptbook-logo.b21f0c70.png`; // <- TODO: !!! Auto-update or remove
|
|
457
|
-
/**
|
|
458
|
-
* Logo for the dark theme
|
|
459
|
-
*
|
|
460
|
-
* TODO: [🗽] Unite branding and make single place for it
|
|
461
|
-
*
|
|
462
|
-
* @public exported from `@promptbook/core`
|
|
463
|
-
*/
|
|
464
|
-
const LOGO_DARK_SRC = `https://promptbook.studio/_next/static/media/promptbook-logo-white.09887cbc.png`; // <- TODO: !!! Auto-update or remove
|
|
465
247
|
/**
|
|
466
248
|
* When the title is not provided, the default title is used
|
|
467
249
|
*
|
|
@@ -512,6 +294,7 @@
|
|
|
512
294
|
infinity: '(infinity; ∞)',
|
|
513
295
|
negativeInfinity: '(negative infinity; -∞)',
|
|
514
296
|
unserializable: '(unserializable value)',
|
|
297
|
+
circular: '(circular JSON)',
|
|
515
298
|
};
|
|
516
299
|
/**
|
|
517
300
|
* Small number limit
|
|
@@ -572,7 +355,7 @@
|
|
|
572
355
|
*/
|
|
573
356
|
const DEFAULT_BOOKS_DIRNAME = './books';
|
|
574
357
|
// <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
|
|
575
|
-
// TODO:
|
|
358
|
+
// TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
|
|
576
359
|
/**
|
|
577
360
|
* Where to store the temporary downloads
|
|
578
361
|
*
|
|
@@ -634,11 +417,11 @@
|
|
|
634
417
|
ss: 3, // <- least number of seconds to be counted in seconds, minus 1. Must be set after setting the `s` unit or without setting the `s` unit.
|
|
635
418
|
};
|
|
636
419
|
/**
|
|
637
|
-
*
|
|
420
|
+
* Default remote server URL for the Promptbook
|
|
638
421
|
*
|
|
639
422
|
* @public exported from `@promptbook/core`
|
|
640
423
|
*/
|
|
641
|
-
const DEFAULT_REMOTE_SERVER_URL =
|
|
424
|
+
const DEFAULT_REMOTE_SERVER_URL = REMOTE_SERVER_URLS[0].urls[0];
|
|
642
425
|
// <- TODO: [🧜♂️]
|
|
643
426
|
/**
|
|
644
427
|
* @@@
|
|
@@ -694,51 +477,9 @@
|
|
|
694
477
|
*/
|
|
695
478
|
|
|
696
479
|
/**
|
|
697
|
-
*
|
|
480
|
+
* Make error report URL for the given error
|
|
698
481
|
*
|
|
699
|
-
* @
|
|
700
|
-
* @public exported from `@promptbook/utils`
|
|
701
|
-
*/
|
|
702
|
-
function orderJson(options) {
|
|
703
|
-
const { value, order } = options;
|
|
704
|
-
const orderedValue = {
|
|
705
|
-
...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
|
|
706
|
-
...value,
|
|
707
|
-
};
|
|
708
|
-
return orderedValue;
|
|
709
|
-
}
|
|
710
|
-
|
|
711
|
-
/**
|
|
712
|
-
* Freezes the given object and all its nested objects recursively
|
|
713
|
-
*
|
|
714
|
-
* Note: `$` is used to indicate that this function is not a pure function - it mutates given object
|
|
715
|
-
* Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
|
|
716
|
-
*
|
|
717
|
-
* @returns The same object as the input, but deeply frozen
|
|
718
|
-
* @public exported from `@promptbook/utils`
|
|
719
|
-
*/
|
|
720
|
-
function $deepFreeze(objectValue) {
|
|
721
|
-
if (Array.isArray(objectValue)) {
|
|
722
|
-
return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
|
|
723
|
-
}
|
|
724
|
-
const propertyNames = Object.getOwnPropertyNames(objectValue);
|
|
725
|
-
for (const propertyName of propertyNames) {
|
|
726
|
-
const value = objectValue[propertyName];
|
|
727
|
-
if (value && typeof value === 'object') {
|
|
728
|
-
$deepFreeze(value);
|
|
729
|
-
}
|
|
730
|
-
}
|
|
731
|
-
Object.freeze(objectValue);
|
|
732
|
-
return objectValue;
|
|
733
|
-
}
|
|
734
|
-
/**
|
|
735
|
-
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
736
|
-
*/
|
|
737
|
-
|
|
738
|
-
/**
|
|
739
|
-
* Make error report URL for the given error
|
|
740
|
-
*
|
|
741
|
-
* @private private within the repository
|
|
482
|
+
* @private private within the repository
|
|
742
483
|
*/
|
|
743
484
|
function getErrorReportUrl(error) {
|
|
744
485
|
const report = {
|
|
@@ -769,39 +510,363 @@
|
|
|
769
510
|
\`\`\`
|
|
770
511
|
</details>
|
|
771
512
|
|
|
772
|
-
`),
|
|
513
|
+
`),
|
|
514
|
+
};
|
|
515
|
+
const reportUrl = new URL(`https://github.com/webgptorg/promptbook/issues/new`);
|
|
516
|
+
reportUrl.searchParams.set('labels', 'bug');
|
|
517
|
+
reportUrl.searchParams.set('assignees', ADMIN_GITHUB_NAME);
|
|
518
|
+
reportUrl.searchParams.set('title', report.title);
|
|
519
|
+
reportUrl.searchParams.set('body', report.body);
|
|
520
|
+
return reportUrl;
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
/**
|
|
524
|
+
* This error type indicates that the error should not happen and its last check before crashing with some other error
|
|
525
|
+
*
|
|
526
|
+
* @public exported from `@promptbook/core`
|
|
527
|
+
*/
|
|
528
|
+
class UnexpectedError extends Error {
|
|
529
|
+
constructor(message) {
|
|
530
|
+
super(spaceTrim.spaceTrim((block) => `
|
|
531
|
+
${block(message)}
|
|
532
|
+
|
|
533
|
+
Note: This error should not happen.
|
|
534
|
+
It's probbably a bug in the pipeline collection
|
|
535
|
+
|
|
536
|
+
Please report issue:
|
|
537
|
+
${block(getErrorReportUrl(new Error(message)).href)}
|
|
538
|
+
|
|
539
|
+
Or contact us on ${ADMIN_EMAIL}
|
|
540
|
+
|
|
541
|
+
`));
|
|
542
|
+
this.name = 'UnexpectedError';
|
|
543
|
+
Object.setPrototypeOf(this, UnexpectedError.prototype);
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
/**
|
|
548
|
+
* This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
|
|
549
|
+
*
|
|
550
|
+
* @public exported from `@promptbook/core`
|
|
551
|
+
*/
|
|
552
|
+
class WrappedError extends Error {
|
|
553
|
+
constructor(whatWasThrown) {
|
|
554
|
+
const tag = `[🤮]`;
|
|
555
|
+
console.error(tag, whatWasThrown);
|
|
556
|
+
super(spaceTrim.spaceTrim(`
|
|
557
|
+
Non-Error object was thrown
|
|
558
|
+
|
|
559
|
+
Note: Look for ${tag} in the console for more details
|
|
560
|
+
Please report issue on ${ADMIN_EMAIL}
|
|
561
|
+
`));
|
|
562
|
+
this.name = 'WrappedError';
|
|
563
|
+
Object.setPrototypeOf(this, WrappedError.prototype);
|
|
564
|
+
}
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
/**
|
|
568
|
+
* Helper used in catch blocks to assert that the error is an instance of `Error`
|
|
569
|
+
*
|
|
570
|
+
* @param whatWasThrown Any object that was thrown
|
|
571
|
+
* @returns Nothing if the error is an instance of `Error`
|
|
572
|
+
* @throws `WrappedError` or `UnexpectedError` if the error is not standard
|
|
573
|
+
*
|
|
574
|
+
* @private within the repository
|
|
575
|
+
*/
|
|
576
|
+
function assertsError(whatWasThrown) {
|
|
577
|
+
// Case 1: Handle error which was rethrown as `WrappedError`
|
|
578
|
+
if (whatWasThrown instanceof WrappedError) {
|
|
579
|
+
const wrappedError = whatWasThrown;
|
|
580
|
+
throw wrappedError;
|
|
581
|
+
}
|
|
582
|
+
// Case 2: Handle unexpected errors
|
|
583
|
+
if (whatWasThrown instanceof UnexpectedError) {
|
|
584
|
+
const unexpectedError = whatWasThrown;
|
|
585
|
+
throw unexpectedError;
|
|
586
|
+
}
|
|
587
|
+
// Case 3: Handle standard errors - keep them up to consumer
|
|
588
|
+
if (whatWasThrown instanceof Error) {
|
|
589
|
+
return;
|
|
590
|
+
}
|
|
591
|
+
// Case 4: Handle non-standard errors - wrap them into `WrappedError` and throw
|
|
592
|
+
throw new WrappedError(whatWasThrown);
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
/**
|
|
596
|
+
* Function isValidJsonString will tell you if the string is valid JSON or not
|
|
597
|
+
*
|
|
598
|
+
* @param value The string to check
|
|
599
|
+
* @returns True if the string is a valid JSON string, false otherwise
|
|
600
|
+
*
|
|
601
|
+
* @public exported from `@promptbook/utils`
|
|
602
|
+
*/
|
|
603
|
+
function isValidJsonString(value /* <- [👨⚖️] */) {
|
|
604
|
+
try {
|
|
605
|
+
JSON.parse(value);
|
|
606
|
+
return true;
|
|
607
|
+
}
|
|
608
|
+
catch (error) {
|
|
609
|
+
assertsError(error);
|
|
610
|
+
if (error.message.includes('Unexpected token')) {
|
|
611
|
+
return false;
|
|
612
|
+
}
|
|
613
|
+
return false;
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
/**
|
|
618
|
+
* Function `validatePipelineString` will validate the if the string is a valid pipeline string
|
|
619
|
+
* It does not check if the string is fully logically correct, but if it is a string that can be a pipeline string or the string looks completely different.
|
|
620
|
+
*
|
|
621
|
+
* @param {string} pipelineString the candidate for a pipeline string
|
|
622
|
+
* @returns {PipelineString} the same string as input, but validated as valid
|
|
623
|
+
* @throws {ParseError} if the string is not a valid pipeline string
|
|
624
|
+
* @public exported from `@promptbook/core`
|
|
625
|
+
*/
|
|
626
|
+
function validatePipelineString(pipelineString) {
|
|
627
|
+
if (isValidJsonString(pipelineString)) {
|
|
628
|
+
throw new ParseError('Expected a book, but got a JSON string');
|
|
629
|
+
}
|
|
630
|
+
else if (isValidUrl(pipelineString)) {
|
|
631
|
+
throw new ParseError(`Expected a book, but got just the URL "${pipelineString}"`);
|
|
632
|
+
}
|
|
633
|
+
else if (isValidFilePath(pipelineString)) {
|
|
634
|
+
throw new ParseError(`Expected a book, but got just the file path "${pipelineString}"`);
|
|
635
|
+
}
|
|
636
|
+
else if (isValidEmail(pipelineString)) {
|
|
637
|
+
throw new ParseError(`Expected a book, but got just the email "${pipelineString}"`);
|
|
638
|
+
}
|
|
639
|
+
// <- TODO: Implement the validation + add tests when the pipeline logic considered as invalid
|
|
640
|
+
return pipelineString;
|
|
641
|
+
}
|
|
642
|
+
/**
|
|
643
|
+
* TODO: [🧠][🈴] Where is the best location for this file
|
|
644
|
+
*/
|
|
645
|
+
|
|
646
|
+
/**
|
|
647
|
+
* Prettify the html code
|
|
648
|
+
*
|
|
649
|
+
* @param content raw html code
|
|
650
|
+
* @returns formatted html code
|
|
651
|
+
* @private withing the package because of HUGE size of prettier dependency
|
|
652
|
+
*/
|
|
653
|
+
function prettifyMarkdown(content) {
|
|
654
|
+
try {
|
|
655
|
+
return prettier.format(content, {
|
|
656
|
+
parser: 'markdown',
|
|
657
|
+
plugins: [parserHtml__default["default"]],
|
|
658
|
+
// TODO: DRY - make some import or auto-copy of .prettierrc
|
|
659
|
+
endOfLine: 'lf',
|
|
660
|
+
tabWidth: 4,
|
|
661
|
+
singleQuote: true,
|
|
662
|
+
trailingComma: 'all',
|
|
663
|
+
arrowParens: 'always',
|
|
664
|
+
printWidth: 120,
|
|
665
|
+
htmlWhitespaceSensitivity: 'ignore',
|
|
666
|
+
jsxBracketSameLine: false,
|
|
667
|
+
bracketSpacing: true,
|
|
668
|
+
});
|
|
669
|
+
}
|
|
670
|
+
catch (error) {
|
|
671
|
+
// TODO: [🟥] Detect browser / node and make it colorfull
|
|
672
|
+
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
673
|
+
error,
|
|
674
|
+
html: content,
|
|
675
|
+
});
|
|
676
|
+
return content;
|
|
677
|
+
}
|
|
678
|
+
}
|
|
679
|
+
|
|
680
|
+
/**
|
|
681
|
+
* Makes first letter of a string uppercase
|
|
682
|
+
*
|
|
683
|
+
* @public exported from `@promptbook/utils`
|
|
684
|
+
*/
|
|
685
|
+
function capitalize(word) {
|
|
686
|
+
return word.substring(0, 1).toUpperCase() + word.substring(1);
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
/**
|
|
690
|
+
* Converts promptbook in JSON format to string format
|
|
691
|
+
*
|
|
692
|
+
* @deprecated TODO: [🥍][🧠] Backup original files in `PipelineJson` same as in Promptbook.studio
|
|
693
|
+
* @param pipelineJson Promptbook in JSON format (.bookc)
|
|
694
|
+
* @returns Promptbook in string format (.book.md)
|
|
695
|
+
* @public exported from `@promptbook/core`
|
|
696
|
+
*/
|
|
697
|
+
function pipelineJsonToString(pipelineJson) {
|
|
698
|
+
const { title, pipelineUrl, bookVersion, description, parameters, tasks } = pipelineJson;
|
|
699
|
+
let pipelineString = `# ${title}`;
|
|
700
|
+
if (description) {
|
|
701
|
+
pipelineString += '\n\n';
|
|
702
|
+
pipelineString += description;
|
|
703
|
+
}
|
|
704
|
+
const commands = [];
|
|
705
|
+
if (pipelineUrl) {
|
|
706
|
+
commands.push(`PIPELINE URL ${pipelineUrl}`);
|
|
707
|
+
}
|
|
708
|
+
if (bookVersion !== `undefined`) {
|
|
709
|
+
commands.push(`BOOK VERSION ${bookVersion}`);
|
|
710
|
+
}
|
|
711
|
+
// TODO: [main] !!5 This increases size of the bundle and is probbably not necessary
|
|
712
|
+
pipelineString = prettifyMarkdown(pipelineString);
|
|
713
|
+
for (const parameter of parameters.filter(({ isInput }) => isInput)) {
|
|
714
|
+
commands.push(`INPUT PARAMETER ${taskParameterJsonToString(parameter)}`);
|
|
715
|
+
}
|
|
716
|
+
for (const parameter of parameters.filter(({ isOutput }) => isOutput)) {
|
|
717
|
+
commands.push(`OUTPUT PARAMETER ${taskParameterJsonToString(parameter)}`);
|
|
718
|
+
}
|
|
719
|
+
pipelineString += '\n\n';
|
|
720
|
+
pipelineString += commands.map((command) => `- ${command}`).join('\n');
|
|
721
|
+
for (const task of tasks) {
|
|
722
|
+
const {
|
|
723
|
+
/* Note: Not using:> name, */
|
|
724
|
+
title, description,
|
|
725
|
+
/* Note: dependentParameterNames, */
|
|
726
|
+
jokerParameterNames: jokers, taskType, content, postprocessingFunctionNames: postprocessing, expectations, format, resultingParameterName, } = task;
|
|
727
|
+
pipelineString += '\n\n';
|
|
728
|
+
pipelineString += `## ${title}`;
|
|
729
|
+
if (description) {
|
|
730
|
+
pipelineString += '\n\n';
|
|
731
|
+
pipelineString += description;
|
|
732
|
+
}
|
|
733
|
+
const commands = [];
|
|
734
|
+
let contentLanguage = 'text';
|
|
735
|
+
if (taskType === 'PROMPT_TASK') {
|
|
736
|
+
const { modelRequirements } = task;
|
|
737
|
+
const { modelName, modelVariant } = modelRequirements || {};
|
|
738
|
+
// Note: Do nothing, it is default
|
|
739
|
+
// commands.push(`PROMPT`);
|
|
740
|
+
if (modelVariant) {
|
|
741
|
+
commands.push(`MODEL VARIANT ${capitalize(modelVariant)}`);
|
|
742
|
+
}
|
|
743
|
+
if (modelName) {
|
|
744
|
+
commands.push(`MODEL NAME \`${modelName}\``);
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
else if (taskType === 'SIMPLE_TASK') {
|
|
748
|
+
commands.push(`SIMPLE TEMPLATE`);
|
|
749
|
+
// Note: Nothing special here
|
|
750
|
+
}
|
|
751
|
+
else if (taskType === 'SCRIPT_TASK') {
|
|
752
|
+
commands.push(`SCRIPT`);
|
|
753
|
+
if (task.contentLanguage) {
|
|
754
|
+
contentLanguage = task.contentLanguage;
|
|
755
|
+
}
|
|
756
|
+
else {
|
|
757
|
+
contentLanguage = '';
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
else if (taskType === 'DIALOG_TASK') {
|
|
761
|
+
commands.push(`DIALOG`);
|
|
762
|
+
// Note: Nothing special here
|
|
763
|
+
} // <- }else if([🅱]
|
|
764
|
+
if (jokers) {
|
|
765
|
+
for (const joker of jokers) {
|
|
766
|
+
commands.push(`JOKER {${joker}}`);
|
|
767
|
+
}
|
|
768
|
+
} /* not else */
|
|
769
|
+
if (postprocessing) {
|
|
770
|
+
for (const postprocessingFunctionName of postprocessing) {
|
|
771
|
+
commands.push(`POSTPROCESSING \`${postprocessingFunctionName}\``);
|
|
772
|
+
}
|
|
773
|
+
} /* not else */
|
|
774
|
+
if (expectations) {
|
|
775
|
+
for (const [unit, { min, max }] of Object.entries(expectations)) {
|
|
776
|
+
if (min === max) {
|
|
777
|
+
commands.push(`EXPECT EXACTLY ${min} ${capitalize(unit + (min > 1 ? 's' : ''))}`);
|
|
778
|
+
}
|
|
779
|
+
else {
|
|
780
|
+
if (min !== undefined) {
|
|
781
|
+
commands.push(`EXPECT MIN ${min} ${capitalize(unit + (min > 1 ? 's' : ''))}`);
|
|
782
|
+
} /* not else */
|
|
783
|
+
if (max !== undefined) {
|
|
784
|
+
commands.push(`EXPECT MAX ${max} ${capitalize(unit + (max > 1 ? 's' : ''))}`);
|
|
785
|
+
}
|
|
786
|
+
}
|
|
787
|
+
}
|
|
788
|
+
} /* not else */
|
|
789
|
+
if (format) {
|
|
790
|
+
if (format === 'JSON') {
|
|
791
|
+
// TODO: @deprecated remove
|
|
792
|
+
commands.push(`FORMAT JSON`);
|
|
793
|
+
}
|
|
794
|
+
} /* not else */
|
|
795
|
+
pipelineString += '\n\n';
|
|
796
|
+
pipelineString += commands.map((command) => `- ${command}`).join('\n');
|
|
797
|
+
pipelineString += '\n\n';
|
|
798
|
+
pipelineString += '```' + contentLanguage;
|
|
799
|
+
pipelineString += '\n';
|
|
800
|
+
pipelineString += spaceTrim__default["default"](content);
|
|
801
|
+
// <- TODO: [main] !!3 Escape
|
|
802
|
+
// <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
|
|
803
|
+
pipelineString += '\n';
|
|
804
|
+
pipelineString += '```';
|
|
805
|
+
pipelineString += '\n\n';
|
|
806
|
+
pipelineString += `\`-> {${resultingParameterName}}\``; // <- TODO: [main] !!3 If the parameter here has description, add it and use taskParameterJsonToString
|
|
807
|
+
}
|
|
808
|
+
return validatePipelineString(pipelineString);
|
|
809
|
+
}
|
|
810
|
+
/**
|
|
811
|
+
* @private internal utility of `pipelineJsonToString`
|
|
812
|
+
*/
|
|
813
|
+
function taskParameterJsonToString(taskParameterJson) {
|
|
814
|
+
const { name, description } = taskParameterJson;
|
|
815
|
+
let parameterString = `{${name}}`;
|
|
816
|
+
if (description) {
|
|
817
|
+
parameterString = `${parameterString} ${description}`;
|
|
818
|
+
}
|
|
819
|
+
return parameterString;
|
|
820
|
+
}
|
|
821
|
+
/**
|
|
822
|
+
* TODO: [🛋] Implement new features and commands into `pipelineJsonToString` + `taskParameterJsonToString` , use `stringifyCommand`
|
|
823
|
+
* TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
|
|
824
|
+
* TODO: [🏛] Maybe make some markdown builder
|
|
825
|
+
* TODO: [🏛] Escape all
|
|
826
|
+
* TODO: [🧠] Should be in generated .book.md file GENERATOR_WARNING
|
|
827
|
+
*/
|
|
828
|
+
|
|
829
|
+
/**
|
|
830
|
+
* Orders JSON object by keys
|
|
831
|
+
*
|
|
832
|
+
* @returns The same type of object as the input re-ordered
|
|
833
|
+
* @public exported from `@promptbook/utils`
|
|
834
|
+
*/
|
|
835
|
+
function orderJson(options) {
|
|
836
|
+
const { value, order } = options;
|
|
837
|
+
const orderedValue = {
|
|
838
|
+
...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
|
|
839
|
+
...value,
|
|
773
840
|
};
|
|
774
|
-
|
|
775
|
-
reportUrl.searchParams.set('labels', 'bug');
|
|
776
|
-
reportUrl.searchParams.set('assignees', ADMIN_GITHUB_NAME);
|
|
777
|
-
reportUrl.searchParams.set('title', report.title);
|
|
778
|
-
reportUrl.searchParams.set('body', report.body);
|
|
779
|
-
return reportUrl;
|
|
841
|
+
return orderedValue;
|
|
780
842
|
}
|
|
781
843
|
|
|
782
844
|
/**
|
|
783
|
-
*
|
|
845
|
+
* Freezes the given object and all its nested objects recursively
|
|
784
846
|
*
|
|
785
|
-
*
|
|
847
|
+
* Note: `$` is used to indicate that this function is not a pure function - it mutates given object
|
|
848
|
+
* Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
|
|
849
|
+
*
|
|
850
|
+
* @returns The same object as the input, but deeply frozen
|
|
851
|
+
* @public exported from `@promptbook/utils`
|
|
786
852
|
*/
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
${block(message)}
|
|
791
|
-
|
|
792
|
-
Note: This error should not happen.
|
|
793
|
-
It's probbably a bug in the pipeline collection
|
|
794
|
-
|
|
795
|
-
Please report issue:
|
|
796
|
-
${block(getErrorReportUrl(new Error(message)).href)}
|
|
797
|
-
|
|
798
|
-
Or contact us on ${ADMIN_EMAIL}
|
|
799
|
-
|
|
800
|
-
`));
|
|
801
|
-
this.name = 'UnexpectedError';
|
|
802
|
-
Object.setPrototypeOf(this, UnexpectedError.prototype);
|
|
853
|
+
function $deepFreeze(objectValue) {
|
|
854
|
+
if (Array.isArray(objectValue)) {
|
|
855
|
+
return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
|
|
803
856
|
}
|
|
857
|
+
const propertyNames = Object.getOwnPropertyNames(objectValue);
|
|
858
|
+
for (const propertyName of propertyNames) {
|
|
859
|
+
const value = objectValue[propertyName];
|
|
860
|
+
if (value && typeof value === 'object') {
|
|
861
|
+
$deepFreeze(value);
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
Object.freeze(objectValue);
|
|
865
|
+
return objectValue;
|
|
804
866
|
}
|
|
867
|
+
/**
|
|
868
|
+
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
869
|
+
*/
|
|
805
870
|
|
|
806
871
|
/**
|
|
807
872
|
* Checks if the value is [🚉] serializable as JSON
|
|
@@ -894,9 +959,7 @@
|
|
|
894
959
|
JSON.stringify(value); // <- TODO: [0]
|
|
895
960
|
}
|
|
896
961
|
catch (error) {
|
|
897
|
-
|
|
898
|
-
throw error;
|
|
899
|
-
}
|
|
962
|
+
assertsError(error);
|
|
900
963
|
throw new UnexpectedError(spaceTrim__default["default"]((block) => `
|
|
901
964
|
\`${name}\` is not serializable
|
|
902
965
|
|
|
@@ -1491,7 +1554,7 @@
|
|
|
1491
1554
|
*/
|
|
1492
1555
|
function unpreparePipeline(pipeline) {
|
|
1493
1556
|
let { personas, knowledgeSources, tasks } = pipeline;
|
|
1494
|
-
personas = personas.map((persona) => ({ ...persona,
|
|
1557
|
+
personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
|
|
1495
1558
|
knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
|
|
1496
1559
|
tasks = tasks.map((task) => {
|
|
1497
1560
|
let { dependentParameterNames } = task;
|
|
@@ -1758,7 +1821,7 @@
|
|
|
1758
1821
|
};
|
|
1759
1822
|
}
|
|
1760
1823
|
|
|
1761
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1824
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1762
1825
|
|
|
1763
1826
|
/**
|
|
1764
1827
|
* This error type indicates that some tools are missing for pipeline execution or preparation
|
|
@@ -1791,7 +1854,7 @@
|
|
|
1791
1854
|
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
1792
1855
|
return false;
|
|
1793
1856
|
}
|
|
1794
|
-
if (!pipeline.personas.every((persona) => persona.
|
|
1857
|
+
if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
|
|
1795
1858
|
return false;
|
|
1796
1859
|
}
|
|
1797
1860
|
if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
|
|
@@ -1830,6 +1893,45 @@
|
|
|
1830
1893
|
* TODO: Maybe use nanoid instead https://github.com/ai/nanoid
|
|
1831
1894
|
*/
|
|
1832
1895
|
|
|
1896
|
+
/**
|
|
1897
|
+
* Converts a JavaScript Object Notation (JSON) string into an object.
|
|
1898
|
+
*
|
|
1899
|
+
* Note: This is wrapper around `JSON.parse()` with better error and type handling
|
|
1900
|
+
*
|
|
1901
|
+
* @public exported from `@promptbook/utils`
|
|
1902
|
+
*/
|
|
1903
|
+
function jsonParse(value) {
|
|
1904
|
+
if (value === undefined) {
|
|
1905
|
+
throw new Error(`Can not parse JSON from undefined value.`);
|
|
1906
|
+
}
|
|
1907
|
+
else if (typeof value !== 'string') {
|
|
1908
|
+
console.error('Can not parse JSON from non-string value.', { text: value });
|
|
1909
|
+
throw new Error(spaceTrim__default["default"](`
|
|
1910
|
+
Can not parse JSON from non-string value.
|
|
1911
|
+
|
|
1912
|
+
The value type: ${typeof value}
|
|
1913
|
+
See more in console.
|
|
1914
|
+
`));
|
|
1915
|
+
}
|
|
1916
|
+
try {
|
|
1917
|
+
return JSON.parse(value);
|
|
1918
|
+
}
|
|
1919
|
+
catch (error) {
|
|
1920
|
+
if (!(error instanceof Error)) {
|
|
1921
|
+
throw error;
|
|
1922
|
+
}
|
|
1923
|
+
throw new Error(spaceTrim__default["default"]((block) => `
|
|
1924
|
+
${block(error.message)}
|
|
1925
|
+
|
|
1926
|
+
The JSON text:
|
|
1927
|
+
${block(value)}
|
|
1928
|
+
`));
|
|
1929
|
+
}
|
|
1930
|
+
}
|
|
1931
|
+
/**
|
|
1932
|
+
* TODO: !!!! Use in Promptbook.studio
|
|
1933
|
+
*/
|
|
1934
|
+
|
|
1833
1935
|
/**
|
|
1834
1936
|
* Recursively converts JSON strings to JSON objects
|
|
1835
1937
|
|
|
@@ -1848,7 +1950,7 @@
|
|
|
1848
1950
|
const newObject = { ...object };
|
|
1849
1951
|
for (const [key, value] of Object.entries(object)) {
|
|
1850
1952
|
if (typeof value === 'string' && isValidJsonString(value)) {
|
|
1851
|
-
newObject[key] =
|
|
1953
|
+
newObject[key] = jsonParse(value);
|
|
1852
1954
|
}
|
|
1853
1955
|
else {
|
|
1854
1956
|
newObject[key] = jsonStringsToJsons(value);
|
|
@@ -1876,7 +1978,7 @@
|
|
|
1876
1978
|
}
|
|
1877
1979
|
}
|
|
1878
1980
|
/**
|
|
1879
|
-
* TODO:
|
|
1981
|
+
* TODO: [🧠][🌂] Add id to all errors
|
|
1880
1982
|
*/
|
|
1881
1983
|
|
|
1882
1984
|
/**
|
|
@@ -2046,7 +2148,10 @@
|
|
|
2046
2148
|
PipelineExecutionError,
|
|
2047
2149
|
PipelineLogicError,
|
|
2048
2150
|
PipelineUrlError,
|
|
2151
|
+
AuthenticationError,
|
|
2152
|
+
PromptbookFetchError,
|
|
2049
2153
|
UnexpectedError,
|
|
2154
|
+
WrappedError,
|
|
2050
2155
|
// TODO: [🪑]> VersionMismatchError,
|
|
2051
2156
|
};
|
|
2052
2157
|
/**
|
|
@@ -2063,8 +2168,6 @@
|
|
|
2063
2168
|
TypeError,
|
|
2064
2169
|
URIError,
|
|
2065
2170
|
AggregateError,
|
|
2066
|
-
AuthenticationError,
|
|
2067
|
-
PromptbookFetchError,
|
|
2068
2171
|
/*
|
|
2069
2172
|
Note: Not widely supported
|
|
2070
2173
|
> InternalError,
|
|
@@ -2187,8 +2290,8 @@
|
|
|
2187
2290
|
updatedAt = new Date();
|
|
2188
2291
|
errors.push(...executionResult.errors);
|
|
2189
2292
|
warnings.push(...executionResult.warnings);
|
|
2190
|
-
// <- TODO:
|
|
2191
|
-
// TODO: [🧠]
|
|
2293
|
+
// <- TODO: [🌂] Only unique errors and warnings should be added (or filtered)
|
|
2294
|
+
// TODO: [🧠] !! errors, warning, isSuccessful are redundant both in `ExecutionTask` and `ExecutionTask.currentValue`
|
|
2192
2295
|
// Also maybe move `ExecutionTask.currentValue.usage` -> `ExecutionTask.usage`
|
|
2193
2296
|
// And delete `ExecutionTask.currentValue.preparedPipeline`
|
|
2194
2297
|
assertsTaskSuccessful(executionResult);
|
|
@@ -2198,6 +2301,7 @@
|
|
|
2198
2301
|
partialResultSubject.next(executionResult);
|
|
2199
2302
|
}
|
|
2200
2303
|
catch (error) {
|
|
2304
|
+
assertsError(error);
|
|
2201
2305
|
status = 'ERROR';
|
|
2202
2306
|
errors.push(error);
|
|
2203
2307
|
partialResultSubject.error(error);
|
|
@@ -2343,13 +2447,19 @@
|
|
|
2343
2447
|
return value.toISOString();
|
|
2344
2448
|
}
|
|
2345
2449
|
else {
|
|
2346
|
-
|
|
2450
|
+
try {
|
|
2451
|
+
return JSON.stringify(value);
|
|
2452
|
+
}
|
|
2453
|
+
catch (error) {
|
|
2454
|
+
if (error instanceof TypeError && error.message.includes('circular structure')) {
|
|
2455
|
+
return VALUE_STRINGS.circular;
|
|
2456
|
+
}
|
|
2457
|
+
throw error;
|
|
2458
|
+
}
|
|
2347
2459
|
}
|
|
2348
2460
|
}
|
|
2349
2461
|
catch (error) {
|
|
2350
|
-
|
|
2351
|
-
throw error;
|
|
2352
|
-
}
|
|
2462
|
+
assertsError(error);
|
|
2353
2463
|
console.error(error);
|
|
2354
2464
|
return VALUE_STRINGS.unserializable;
|
|
2355
2465
|
}
|
|
@@ -2521,9 +2631,7 @@
|
|
|
2521
2631
|
}
|
|
2522
2632
|
}
|
|
2523
2633
|
catch (error) {
|
|
2524
|
-
|
|
2525
|
-
throw error;
|
|
2526
|
-
}
|
|
2634
|
+
assertsError(error);
|
|
2527
2635
|
throw new ParseError(spaceTrim.spaceTrim((block) => `
|
|
2528
2636
|
Can not extract variables from the script
|
|
2529
2637
|
${block(error.stack || error.message)}
|
|
@@ -2642,6 +2750,46 @@
|
|
|
2642
2750
|
// encoding: 'utf-8',
|
|
2643
2751
|
});
|
|
2644
2752
|
|
|
2753
|
+
/**
|
|
2754
|
+
* Function to check if a string is valid CSV
|
|
2755
|
+
*
|
|
2756
|
+
* @param value The string to check
|
|
2757
|
+
* @returns True if the string is a valid CSV string, false otherwise
|
|
2758
|
+
*
|
|
2759
|
+
* @public exported from `@promptbook/utils`
|
|
2760
|
+
*/
|
|
2761
|
+
function isValidCsvString(value) {
|
|
2762
|
+
try {
|
|
2763
|
+
// A simple check for CSV format: at least one comma and no invalid characters
|
|
2764
|
+
if (value.includes(',') && /^[\w\s,"']+$/.test(value)) {
|
|
2765
|
+
return true;
|
|
2766
|
+
}
|
|
2767
|
+
return false;
|
|
2768
|
+
}
|
|
2769
|
+
catch (error) {
|
|
2770
|
+
assertsError(error);
|
|
2771
|
+
return false;
|
|
2772
|
+
}
|
|
2773
|
+
}
|
|
2774
|
+
|
|
2775
|
+
/**
|
|
2776
|
+
* Converts a CSV string into an object
|
|
2777
|
+
*
|
|
2778
|
+
* Note: This is wrapper around `papaparse.parse()` with better autohealing
|
|
2779
|
+
*
|
|
2780
|
+
* @private - for now until `@promptbook/csv` is released
|
|
2781
|
+
*/
|
|
2782
|
+
function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
|
|
2783
|
+
settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
|
|
2784
|
+
// Note: Autoheal invalid '\n' characters
|
|
2785
|
+
if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
|
|
2786
|
+
console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
|
|
2787
|
+
value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
|
|
2788
|
+
}
|
|
2789
|
+
const csv = papaparse.parse(value, settings);
|
|
2790
|
+
return csv;
|
|
2791
|
+
}
|
|
2792
|
+
|
|
2645
2793
|
/**
|
|
2646
2794
|
* Definition for CSV spreadsheet
|
|
2647
2795
|
*
|
|
@@ -2652,7 +2800,7 @@
|
|
|
2652
2800
|
formatName: 'CSV',
|
|
2653
2801
|
aliases: ['SPREADSHEET', 'TABLE'],
|
|
2654
2802
|
isValid(value, settings, schema) {
|
|
2655
|
-
return
|
|
2803
|
+
return isValidCsvString(value);
|
|
2656
2804
|
},
|
|
2657
2805
|
canBeValid(partialValue, settings, schema) {
|
|
2658
2806
|
return true;
|
|
@@ -2664,8 +2812,7 @@
|
|
|
2664
2812
|
{
|
|
2665
2813
|
subvalueName: 'ROW',
|
|
2666
2814
|
async mapValues(value, outputParameterName, settings, mapCallback) {
|
|
2667
|
-
|
|
2668
|
-
const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
2815
|
+
const csv = csvParse(value, settings);
|
|
2669
2816
|
if (csv.errors.length !== 0) {
|
|
2670
2817
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
2671
2818
|
CSV parsing error
|
|
@@ -2695,8 +2842,7 @@
|
|
|
2695
2842
|
{
|
|
2696
2843
|
subvalueName: 'CELL',
|
|
2697
2844
|
async mapValues(value, outputParameterName, settings, mapCallback) {
|
|
2698
|
-
|
|
2699
|
-
const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
2845
|
+
const csv = csvParse(value, settings);
|
|
2700
2846
|
if (csv.errors.length !== 0) {
|
|
2701
2847
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
2702
2848
|
CSV parsing error
|
|
@@ -2806,6 +2952,30 @@
|
|
|
2806
2952
|
* TODO: [🏢] Allow to expect something inside each item of list and other formats
|
|
2807
2953
|
*/
|
|
2808
2954
|
|
|
2955
|
+
/**
|
|
2956
|
+
* Function to check if a string is valid XML
|
|
2957
|
+
*
|
|
2958
|
+
* @param value
|
|
2959
|
+
* @returns True if the string is a valid XML string, false otherwise
|
|
2960
|
+
*
|
|
2961
|
+
* @public exported from `@promptbook/utils`
|
|
2962
|
+
*/
|
|
2963
|
+
function isValidXmlString(value) {
|
|
2964
|
+
try {
|
|
2965
|
+
const parser = new DOMParser();
|
|
2966
|
+
const parsedDocument = parser.parseFromString(value, 'application/xml');
|
|
2967
|
+
const parserError = parsedDocument.getElementsByTagName('parsererror');
|
|
2968
|
+
if (parserError.length > 0) {
|
|
2969
|
+
return false;
|
|
2970
|
+
}
|
|
2971
|
+
return true;
|
|
2972
|
+
}
|
|
2973
|
+
catch (error) {
|
|
2974
|
+
assertsError(error);
|
|
2975
|
+
return false;
|
|
2976
|
+
}
|
|
2977
|
+
}
|
|
2978
|
+
|
|
2809
2979
|
/**
|
|
2810
2980
|
* Definition for XML format
|
|
2811
2981
|
*
|
|
@@ -2815,7 +2985,7 @@
|
|
|
2815
2985
|
formatName: 'XML',
|
|
2816
2986
|
mimeType: 'application/xml',
|
|
2817
2987
|
isValid(value, settings, schema) {
|
|
2818
|
-
return
|
|
2988
|
+
return isValidXmlString(value);
|
|
2819
2989
|
},
|
|
2820
2990
|
canBeValid(partialValue, settings, schema) {
|
|
2821
2991
|
return true;
|
|
@@ -3012,14 +3182,15 @@
|
|
|
3012
3182
|
}
|
|
3013
3183
|
}
|
|
3014
3184
|
catch (error) {
|
|
3015
|
-
|
|
3185
|
+
assertsError(error);
|
|
3186
|
+
if (error instanceof UnexpectedError) {
|
|
3016
3187
|
throw error;
|
|
3017
3188
|
}
|
|
3018
3189
|
errors.push({ llmExecutionTools, error });
|
|
3019
3190
|
}
|
|
3020
3191
|
}
|
|
3021
3192
|
if (errors.length === 1) {
|
|
3022
|
-
throw errors[0];
|
|
3193
|
+
throw errors[0].error;
|
|
3023
3194
|
}
|
|
3024
3195
|
else if (errors.length > 1) {
|
|
3025
3196
|
throw new PipelineExecutionError(
|
|
@@ -3898,9 +4069,7 @@
|
|
|
3898
4069
|
break scripts;
|
|
3899
4070
|
}
|
|
3900
4071
|
catch (error) {
|
|
3901
|
-
|
|
3902
|
-
throw error;
|
|
3903
|
-
}
|
|
4072
|
+
assertsError(error);
|
|
3904
4073
|
if (error instanceof UnexpectedError) {
|
|
3905
4074
|
throw error;
|
|
3906
4075
|
}
|
|
@@ -3970,9 +4139,7 @@
|
|
|
3970
4139
|
break scripts;
|
|
3971
4140
|
}
|
|
3972
4141
|
catch (error) {
|
|
3973
|
-
|
|
3974
|
-
throw error;
|
|
3975
|
-
}
|
|
4142
|
+
assertsError(error);
|
|
3976
4143
|
if (error instanceof UnexpectedError) {
|
|
3977
4144
|
throw error;
|
|
3978
4145
|
}
|
|
@@ -4215,13 +4382,79 @@
|
|
|
4215
4382
|
/**
|
|
4216
4383
|
* @@@
|
|
4217
4384
|
*
|
|
4385
|
+
* Here is the place where RAG (retrieval-augmented generation) happens
|
|
4386
|
+
*
|
|
4218
4387
|
* @private internal utility of `createPipelineExecutor`
|
|
4219
4388
|
*/
|
|
4220
4389
|
async function getKnowledgeForTask(options) {
|
|
4221
|
-
const { preparedPipeline, task } = options;
|
|
4222
|
-
|
|
4390
|
+
const { tools, preparedPipeline, task } = options;
|
|
4391
|
+
const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
|
|
4392
|
+
const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
|
|
4393
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
4394
|
+
if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
|
|
4395
|
+
return 'No knowledge pieces found';
|
|
4396
|
+
}
|
|
4397
|
+
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
4398
|
+
const _llms = arrayableToArray(tools.llm);
|
|
4399
|
+
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4400
|
+
const taskEmbeddingPrompt = {
|
|
4401
|
+
title: 'Knowledge Search',
|
|
4402
|
+
modelRequirements: {
|
|
4403
|
+
modelVariant: 'EMBEDDING',
|
|
4404
|
+
modelName: firstKnowlegeIndex.modelName,
|
|
4405
|
+
},
|
|
4406
|
+
content: task.content,
|
|
4407
|
+
parameters: {
|
|
4408
|
+
/* !!!!!!!! */
|
|
4409
|
+
},
|
|
4410
|
+
};
|
|
4411
|
+
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
4412
|
+
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
4413
|
+
const { index } = knowledgePiece;
|
|
4414
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
|
|
4415
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
4416
|
+
if (knowledgePieceIndex === undefined) {
|
|
4417
|
+
return {
|
|
4418
|
+
content: knowledgePiece.content,
|
|
4419
|
+
relevance: 0,
|
|
4420
|
+
};
|
|
4421
|
+
}
|
|
4422
|
+
const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
|
|
4423
|
+
return {
|
|
4424
|
+
content: knowledgePiece.content,
|
|
4425
|
+
relevance,
|
|
4426
|
+
};
|
|
4427
|
+
});
|
|
4428
|
+
const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
|
|
4429
|
+
const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
|
|
4430
|
+
console.log('!!! Embedding', {
|
|
4431
|
+
task,
|
|
4432
|
+
taskEmbeddingPrompt,
|
|
4433
|
+
taskEmbeddingResult,
|
|
4434
|
+
firstKnowlegePiece,
|
|
4435
|
+
firstKnowlegeIndex,
|
|
4436
|
+
knowledgePiecesWithRelevance,
|
|
4437
|
+
knowledgePiecesSorted,
|
|
4438
|
+
knowledgePiecesLimited,
|
|
4439
|
+
});
|
|
4440
|
+
return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
|
|
4223
4441
|
// <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
|
|
4224
4442
|
}
|
|
4443
|
+
// TODO: !!!!!! Annotate + to new file
|
|
4444
|
+
function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
|
|
4445
|
+
if (embeddingVector1.length !== embeddingVector2.length) {
|
|
4446
|
+
throw new TypeError('Embedding vectors must have the same length');
|
|
4447
|
+
}
|
|
4448
|
+
const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
|
|
4449
|
+
const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
|
|
4450
|
+
const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
|
|
4451
|
+
return 1 - dotProduct / (magnitude1 * magnitude2);
|
|
4452
|
+
}
|
|
4453
|
+
/**
|
|
4454
|
+
* TODO: !!!! Verify if this is working
|
|
4455
|
+
* TODO: [♨] Implement Better - use keyword search
|
|
4456
|
+
* TODO: [♨] Examples of values
|
|
4457
|
+
*/
|
|
4225
4458
|
|
|
4226
4459
|
/**
|
|
4227
4460
|
* @@@
|
|
@@ -4229,9 +4462,9 @@
|
|
|
4229
4462
|
* @private internal utility of `createPipelineExecutor`
|
|
4230
4463
|
*/
|
|
4231
4464
|
async function getReservedParametersForTask(options) {
|
|
4232
|
-
const { preparedPipeline, task, pipelineIdentification } = options;
|
|
4465
|
+
const { tools, preparedPipeline, task, pipelineIdentification } = options;
|
|
4233
4466
|
const context = await getContextForTask(); // <- [🏍]
|
|
4234
|
-
const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
|
|
4467
|
+
const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
|
|
4235
4468
|
const examples = await getExamplesForTask();
|
|
4236
4469
|
const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
|
|
4237
4470
|
const modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
@@ -4293,6 +4526,7 @@
|
|
|
4293
4526
|
}
|
|
4294
4527
|
const definedParameters = Object.freeze({
|
|
4295
4528
|
...(await getReservedParametersForTask({
|
|
4529
|
+
tools,
|
|
4296
4530
|
preparedPipeline,
|
|
4297
4531
|
task: currentTask,
|
|
4298
4532
|
pipelineIdentification,
|
|
@@ -4593,9 +4827,7 @@
|
|
|
4593
4827
|
await Promise.all(resolving);
|
|
4594
4828
|
}
|
|
4595
4829
|
catch (error /* <- Note: [3] */) {
|
|
4596
|
-
|
|
4597
|
-
throw error;
|
|
4598
|
-
}
|
|
4830
|
+
assertsError(error);
|
|
4599
4831
|
// Note: No need to rethrow UnexpectedError
|
|
4600
4832
|
// if (error instanceof UnexpectedError) {
|
|
4601
4833
|
// Note: Count usage, [🧠] Maybe put to separate function executionReportJsonToUsage + DRY [🤹♂️]
|
|
@@ -4851,27 +5083,48 @@
|
|
|
4851
5083
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
4852
5084
|
tools,
|
|
4853
5085
|
});
|
|
4854
|
-
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
4855
5086
|
const _llms = arrayableToArray(tools.llm);
|
|
4856
5087
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4857
|
-
const availableModels = await llmTools.listModels()
|
|
4858
|
-
const availableModelNames = availableModels
|
|
5088
|
+
const availableModels = (await llmTools.listModels())
|
|
4859
5089
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
4860
|
-
.map(({ modelName }) =>
|
|
4861
|
-
|
|
4862
|
-
|
|
5090
|
+
.map(({ modelName, modelDescription }) => ({
|
|
5091
|
+
modelName,
|
|
5092
|
+
modelDescription,
|
|
5093
|
+
// <- Note: `modelTitle` and `modelVariant` is not relevant for this task
|
|
5094
|
+
}));
|
|
5095
|
+
const result = await preparePersonaExecutor({
|
|
5096
|
+
availableModels /* <- Note: Passing as JSON */,
|
|
5097
|
+
personaDescription,
|
|
5098
|
+
}).asPromise();
|
|
4863
5099
|
const { outputParameters } = result;
|
|
4864
|
-
const {
|
|
4865
|
-
|
|
5100
|
+
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
5101
|
+
let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
|
|
4866
5102
|
if (isVerbose) {
|
|
4867
|
-
console.info(`PERSONA ${personaDescription}`,
|
|
5103
|
+
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
4868
5104
|
}
|
|
4869
|
-
|
|
4870
|
-
|
|
5105
|
+
if (!Array.isArray(modelsRequirementsUnchecked)) {
|
|
5106
|
+
// <- TODO: Book should have syntax and system to enforce shape of JSON
|
|
5107
|
+
modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
|
|
5108
|
+
/*
|
|
5109
|
+
throw new UnexpectedError(
|
|
5110
|
+
spaceTrim(
|
|
5111
|
+
(block) => `
|
|
5112
|
+
Invalid \`modelsRequirements\`:
|
|
5113
|
+
|
|
5114
|
+
\`\`\`json
|
|
5115
|
+
${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
|
|
5116
|
+
\`\`\`
|
|
5117
|
+
`,
|
|
5118
|
+
),
|
|
5119
|
+
);
|
|
5120
|
+
*/
|
|
5121
|
+
}
|
|
5122
|
+
const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
|
|
4871
5123
|
modelVariant: 'CHAT',
|
|
4872
|
-
|
|
4873
|
-
|
|
4874
|
-
|
|
5124
|
+
...modelRequirements,
|
|
5125
|
+
}));
|
|
5126
|
+
return {
|
|
5127
|
+
modelsRequirements,
|
|
4875
5128
|
};
|
|
4876
5129
|
}
|
|
4877
5130
|
/**
|
|
@@ -5311,9 +5564,7 @@
|
|
|
5311
5564
|
return await fetch(urlOrRequest, init);
|
|
5312
5565
|
}
|
|
5313
5566
|
catch (error) {
|
|
5314
|
-
|
|
5315
|
-
throw error;
|
|
5316
|
-
}
|
|
5567
|
+
assertsError(error);
|
|
5317
5568
|
let url;
|
|
5318
5569
|
if (typeof urlOrRequest === 'string') {
|
|
5319
5570
|
url = urlOrRequest;
|
|
@@ -5442,7 +5693,7 @@
|
|
|
5442
5693
|
> },
|
|
5443
5694
|
*/
|
|
5444
5695
|
async asJson() {
|
|
5445
|
-
return
|
|
5696
|
+
return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
|
|
5446
5697
|
},
|
|
5447
5698
|
async asText() {
|
|
5448
5699
|
return await tools.fs.readFile(filename, 'utf-8');
|
|
@@ -5544,9 +5795,7 @@
|
|
|
5544
5795
|
knowledgePreparedUnflatten[index] = pieces;
|
|
5545
5796
|
}
|
|
5546
5797
|
catch (error) {
|
|
5547
|
-
|
|
5548
|
-
throw error;
|
|
5549
|
-
}
|
|
5798
|
+
assertsError(error);
|
|
5550
5799
|
console.warn(error);
|
|
5551
5800
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
5552
5801
|
}
|
|
@@ -5702,14 +5951,14 @@
|
|
|
5702
5951
|
// TODO: [🖌][🧠] Implement some `mapAsync` function
|
|
5703
5952
|
const preparedPersonas = new Array(personas.length);
|
|
5704
5953
|
await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
|
|
5705
|
-
const
|
|
5954
|
+
const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
|
|
5706
5955
|
rootDirname,
|
|
5707
5956
|
maxParallelCount /* <- TODO: [🪂] */,
|
|
5708
5957
|
isVerbose,
|
|
5709
5958
|
});
|
|
5710
5959
|
const preparedPersona = {
|
|
5711
5960
|
...persona,
|
|
5712
|
-
|
|
5961
|
+
modelsRequirements,
|
|
5713
5962
|
preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
|
|
5714
5963
|
// <- TODO: [🍙] Make some standard order of json properties
|
|
5715
5964
|
};
|
|
@@ -6344,6 +6593,8 @@
|
|
|
6344
6593
|
*/
|
|
6345
6594
|
|
|
6346
6595
|
/**
|
|
6596
|
+
import { WrappedError } from '../../errors/WrappedError';
|
|
6597
|
+
import { assertsError } from '../../errors/assertsError';
|
|
6347
6598
|
* Parses the expect command
|
|
6348
6599
|
*
|
|
6349
6600
|
* @see `documentationUrl` for more details
|
|
@@ -6435,9 +6686,7 @@
|
|
|
6435
6686
|
};
|
|
6436
6687
|
}
|
|
6437
6688
|
catch (error) {
|
|
6438
|
-
|
|
6439
|
-
throw error;
|
|
6440
|
-
}
|
|
6689
|
+
assertsError(error);
|
|
6441
6690
|
throw new ParseError(spaceTrim__default["default"]((block) => `
|
|
6442
6691
|
Invalid FORMAT command
|
|
6443
6692
|
${block(error.message)}:
|
|
@@ -9798,6 +10047,81 @@
|
|
|
9798
10047
|
},
|
|
9799
10048
|
};
|
|
9800
10049
|
|
|
10050
|
+
/**
|
|
10051
|
+
* Creates a wrapper around LlmExecutionTools that only exposes models matching the filter function
|
|
10052
|
+
*
|
|
10053
|
+
* @param llmTools The original LLM execution tools to wrap
|
|
10054
|
+
* @param modelFilter Function that determines whether a model should be included
|
|
10055
|
+
* @returns A new LlmExecutionTools instance with filtered models
|
|
10056
|
+
*
|
|
10057
|
+
* @public exported from `@promptbook/core`
|
|
10058
|
+
*/
|
|
10059
|
+
function filterModels(llmTools, modelFilter) {
|
|
10060
|
+
const filteredTools = {
|
|
10061
|
+
// Keep all properties from the original llmTools
|
|
10062
|
+
...llmTools,
|
|
10063
|
+
get description() {
|
|
10064
|
+
return `${llmTools.description} (filtered)`;
|
|
10065
|
+
},
|
|
10066
|
+
// Override listModels to filter the models
|
|
10067
|
+
async listModels() {
|
|
10068
|
+
const originalModels = await llmTools.listModels();
|
|
10069
|
+
// Handle both synchronous and Promise return types
|
|
10070
|
+
if (originalModels instanceof Promise) {
|
|
10071
|
+
return originalModels.then((models) => models.filter(modelFilter));
|
|
10072
|
+
}
|
|
10073
|
+
else {
|
|
10074
|
+
return originalModels.filter(modelFilter);
|
|
10075
|
+
}
|
|
10076
|
+
},
|
|
10077
|
+
};
|
|
10078
|
+
// Helper function to validate if a model is allowed
|
|
10079
|
+
async function isModelAllowed(modelName) {
|
|
10080
|
+
const models = await filteredTools.listModels();
|
|
10081
|
+
return models.some((model) => model.modelName === modelName);
|
|
10082
|
+
}
|
|
10083
|
+
// Override callChatModel if it exists in the original tools
|
|
10084
|
+
if (llmTools.callChatModel) {
|
|
10085
|
+
filteredTools.callChatModel = async (prompt) => {
|
|
10086
|
+
var _a;
|
|
10087
|
+
const modelName = (_a = prompt.modelRequirements) === null || _a === void 0 ? void 0 : _a.modelName;
|
|
10088
|
+
// If a specific model is requested, check if it's allowed
|
|
10089
|
+
if (modelName && !(await isModelAllowed(modelName))) {
|
|
10090
|
+
throw new PipelineExecutionError(`Model ${modelName} is not allowed by the filter for chat calls`);
|
|
10091
|
+
}
|
|
10092
|
+
return llmTools.callChatModel(prompt);
|
|
10093
|
+
};
|
|
10094
|
+
}
|
|
10095
|
+
// Override callCompletionModel if it exists in the original tools
|
|
10096
|
+
if (llmTools.callCompletionModel) {
|
|
10097
|
+
filteredTools.callCompletionModel = async (prompt) => {
|
|
10098
|
+
var _a;
|
|
10099
|
+
const modelName = (_a = prompt.modelRequirements) === null || _a === void 0 ? void 0 : _a.modelName;
|
|
10100
|
+
// If a specific model is requested, check if it's allowed
|
|
10101
|
+
if (modelName && !(await isModelAllowed(modelName))) {
|
|
10102
|
+
throw new PipelineExecutionError(`Model ${modelName} is not allowed by the filter for completion calls`);
|
|
10103
|
+
}
|
|
10104
|
+
return llmTools.callCompletionModel(prompt);
|
|
10105
|
+
};
|
|
10106
|
+
}
|
|
10107
|
+
// Override callEmbeddingModel if it exists in the original tools
|
|
10108
|
+
if (llmTools.callEmbeddingModel) {
|
|
10109
|
+
filteredTools.callEmbeddingModel = async (prompt) => {
|
|
10110
|
+
var _a;
|
|
10111
|
+
const modelName = (_a = prompt.modelRequirements) === null || _a === void 0 ? void 0 : _a.modelName;
|
|
10112
|
+
// If a specific model is requested, check if it's allowed
|
|
10113
|
+
if (modelName && !(await isModelAllowed(modelName))) {
|
|
10114
|
+
throw new PipelineExecutionError(`Model ${modelName} is not allowed by the filter for embedding calls`);
|
|
10115
|
+
}
|
|
10116
|
+
return llmTools.callEmbeddingModel(prompt);
|
|
10117
|
+
};
|
|
10118
|
+
}
|
|
10119
|
+
return filteredTools;
|
|
10120
|
+
}
|
|
10121
|
+
/**
|
|
10122
|
+
* TODO: !!! [models] Test that this is working
|
|
10123
|
+
*/
|
|
10124
|
+
|
|
9801
10125
|
/**
|
|
9802
10126
|
* @@@
|
|
9803
10127
|
*
|
|
@@ -10015,6 +10339,48 @@
|
|
|
10015
10339
|
* TODO: [®] DRY Register logic
|
|
10016
10340
|
*/
|
|
10017
10341
|
|
|
10342
|
+
/**
|
|
10343
|
+
* How is the model provider trusted?
|
|
10344
|
+
*
|
|
10345
|
+
* @public exported from `@promptbook/core`
|
|
10346
|
+
*/
|
|
10347
|
+
const MODEL_TRUST_LEVEL = {
|
|
10348
|
+
FULL: `Model is running on the local machine, training data and model weights are known, data are ethically sourced`,
|
|
10349
|
+
OPEN: `Model is open source, training data and model weights are known`,
|
|
10350
|
+
PARTIALLY_OPEN: `Model is open source, but training data and model weights are not (fully) known`,
|
|
10351
|
+
CLOSED_LOCAL: `Model can be run locally, but it is not open source`,
|
|
10352
|
+
CLOSED_FREE: `Model is behind API gateway but free to use`,
|
|
10353
|
+
CLOSED_BUSINESS: `Model is behind API gateway and paid but has good SLA, TOS, privacy policy and in general is a good to use in business applications`,
|
|
10354
|
+
CLOSED: `Model is behind API gateway and paid`,
|
|
10355
|
+
UNTRUSTED: `Model has questions about the training data and ethics, but it is not known if it is a problem or not`,
|
|
10356
|
+
VURNABLE: `Model has some known serious vulnerabilities, leaks, ethical problems, etc.`,
|
|
10357
|
+
};
|
|
10358
|
+
// <- TODO: Maybe do better levels of trust
|
|
10359
|
+
/**
|
|
10360
|
+
* How is the model provider important?
|
|
10361
|
+
*
|
|
10362
|
+
* @public exported from `@promptbook/core`
|
|
10363
|
+
*/
|
|
10364
|
+
const MODEL_ORDER = {
|
|
10365
|
+
/**
|
|
10366
|
+
* Top-tier models, e.g. OpenAI, Anthropic,...
|
|
10367
|
+
*/
|
|
10368
|
+
TOP_TIER: 333,
|
|
10369
|
+
/**
|
|
10370
|
+
* Mid-tier models, e.g. Llama, Mistral, etc.
|
|
10371
|
+
*/
|
|
10372
|
+
NORMAL: 100,
|
|
10373
|
+
/**
|
|
10374
|
+
* Low-tier models, e.g. Phi, Tiny, etc.
|
|
10375
|
+
*/
|
|
10376
|
+
LOW_TIER: 0,
|
|
10377
|
+
};
|
|
10378
|
+
/**
|
|
10379
|
+
* TODO: Add configuration schema and maybe some documentation link
|
|
10380
|
+
* TODO: Maybe constrain LlmToolsConfiguration[number] by generic to ensure that `createConfigurationFromEnv` and `getBoilerplateConfiguration` always create same `packageName` and `className`
|
|
10381
|
+
* TODO: [®] DRY Register logic
|
|
10382
|
+
*/
|
|
10383
|
+
|
|
10018
10384
|
/**
|
|
10019
10385
|
* Stores data in memory (HEAP)
|
|
10020
10386
|
*
|
|
@@ -10214,9 +10580,11 @@
|
|
|
10214
10580
|
packageName: '@promptbook/anthropic-claude',
|
|
10215
10581
|
className: 'AnthropicClaudeExecutionTools',
|
|
10216
10582
|
envVariables: ['ANTHROPIC_CLAUDE_API_KEY'],
|
|
10583
|
+
trustLevel: 'CLOSED',
|
|
10584
|
+
order: MODEL_ORDER.TOP_TIER,
|
|
10217
10585
|
getBoilerplateConfiguration() {
|
|
10218
10586
|
return {
|
|
10219
|
-
title: 'Anthropic Claude
|
|
10587
|
+
title: 'Anthropic Claude',
|
|
10220
10588
|
packageName: '@promptbook/anthropic-claude',
|
|
10221
10589
|
className: 'AnthropicClaudeExecutionTools',
|
|
10222
10590
|
options: {
|
|
@@ -10259,9 +10627,11 @@
|
|
|
10259
10627
|
packageName: '@promptbook/azure-openai',
|
|
10260
10628
|
className: 'AzureOpenAiExecutionTools',
|
|
10261
10629
|
envVariables: ['AZUREOPENAI_RESOURCE_NAME', 'AZUREOPENAI_DEPLOYMENT_NAME', 'AZUREOPENAI_API_KEY'],
|
|
10630
|
+
trustLevel: 'CLOSED_BUSINESS',
|
|
10631
|
+
order: MODEL_ORDER.NORMAL,
|
|
10262
10632
|
getBoilerplateConfiguration() {
|
|
10263
10633
|
return {
|
|
10264
|
-
title: 'Azure Open AI
|
|
10634
|
+
title: 'Azure Open AI',
|
|
10265
10635
|
packageName: '@promptbook/azure-openai',
|
|
10266
10636
|
className: 'AzureOpenAiExecutionTools',
|
|
10267
10637
|
options: {
|
|
@@ -10345,9 +10715,11 @@
|
|
|
10345
10715
|
packageName: '@promptbook/deepseek',
|
|
10346
10716
|
className: 'DeepseekExecutionTools',
|
|
10347
10717
|
envVariables: ['DEEPSEEK_GENERATIVE_AI_API_KEY'],
|
|
10718
|
+
trustLevel: 'UNTRUSTED',
|
|
10719
|
+
order: MODEL_ORDER.NORMAL,
|
|
10348
10720
|
getBoilerplateConfiguration() {
|
|
10349
10721
|
return {
|
|
10350
|
-
title: 'Deepseek
|
|
10722
|
+
title: 'Deepseek',
|
|
10351
10723
|
packageName: '@promptbook/deepseek',
|
|
10352
10724
|
className: 'DeepseekExecutionTools',
|
|
10353
10725
|
options: {
|
|
@@ -10394,9 +10766,11 @@
|
|
|
10394
10766
|
packageName: '@promptbook/google',
|
|
10395
10767
|
className: 'GoogleExecutionTools',
|
|
10396
10768
|
envVariables: ['GOOGLE_GENERATIVE_AI_API_KEY'],
|
|
10769
|
+
trustLevel: 'CLOSED',
|
|
10770
|
+
order: MODEL_ORDER.NORMAL,
|
|
10397
10771
|
getBoilerplateConfiguration() {
|
|
10398
10772
|
return {
|
|
10399
|
-
title: 'Google Gemini
|
|
10773
|
+
title: 'Google Gemini',
|
|
10400
10774
|
packageName: '@promptbook/google',
|
|
10401
10775
|
className: 'GoogleExecutionTools',
|
|
10402
10776
|
options: {
|
|
@@ -10443,9 +10817,11 @@
|
|
|
10443
10817
|
packageName: '@promptbook/openai',
|
|
10444
10818
|
className: 'OpenAiExecutionTools',
|
|
10445
10819
|
envVariables: ['OPENAI_API_KEY'],
|
|
10820
|
+
trustLevel: 'CLOSED',
|
|
10821
|
+
order: MODEL_ORDER.TOP_TIER,
|
|
10446
10822
|
getBoilerplateConfiguration() {
|
|
10447
10823
|
return {
|
|
10448
|
-
title: 'Open AI
|
|
10824
|
+
title: 'Open AI',
|
|
10449
10825
|
packageName: '@promptbook/openai',
|
|
10450
10826
|
className: 'OpenAiExecutionTools',
|
|
10451
10827
|
options: {
|
|
@@ -10483,9 +10859,11 @@
|
|
|
10483
10859
|
className: 'OpenAiAssistantExecutionTools',
|
|
10484
10860
|
envVariables: null,
|
|
10485
10861
|
// <- TODO: ['OPENAI_API_KEY', 'OPENAI_ASSISTANT_ID']
|
|
10862
|
+
trustLevel: 'CLOSED',
|
|
10863
|
+
order: MODEL_ORDER.NORMAL,
|
|
10486
10864
|
getBoilerplateConfiguration() {
|
|
10487
10865
|
return {
|
|
10488
|
-
title: 'Open AI Assistant
|
|
10866
|
+
title: 'Open AI Assistant',
|
|
10489
10867
|
packageName: '@promptbook/openai',
|
|
10490
10868
|
className: 'OpenAiAssistantExecutionTools',
|
|
10491
10869
|
options: {
|
|
@@ -10531,9 +10909,7 @@
|
|
|
10531
10909
|
return true;
|
|
10532
10910
|
}
|
|
10533
10911
|
catch (error) {
|
|
10534
|
-
|
|
10535
|
-
throw error;
|
|
10536
|
-
}
|
|
10912
|
+
assertsError(error);
|
|
10537
10913
|
return false;
|
|
10538
10914
|
}
|
|
10539
10915
|
}
|
|
@@ -10628,6 +11004,40 @@
|
|
|
10628
11004
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
10629
11005
|
*/
|
|
10630
11006
|
|
|
11007
|
+
/**
|
|
11008
|
+
* Convert identification to Promptbook token
|
|
11009
|
+
*
|
|
11010
|
+
* @param identification
|
|
11011
|
+
*
|
|
11012
|
+
* @public exported from `@promptbook/core`
|
|
11013
|
+
*/
|
|
11014
|
+
function identificationToPromptbookToken(identification) {
|
|
11015
|
+
const { appId, userId, userToken } = identification;
|
|
11016
|
+
const promptbookToken = `${appId}-${userId}-${userToken}`;
|
|
11017
|
+
return promptbookToken;
|
|
11018
|
+
}
|
|
11019
|
+
|
|
11020
|
+
/**
|
|
11021
|
+
* Convert Promptbook token to identification
|
|
11022
|
+
*
|
|
11023
|
+
* @param promptbookToken
|
|
11024
|
+
*
|
|
11025
|
+
* @public exported from `@promptbook/core`
|
|
11026
|
+
*/
|
|
11027
|
+
function promptbookTokenToIdentification(promptbookToken) {
|
|
11028
|
+
const [appId, userId, userToken] = promptbookToken.split('-');
|
|
11029
|
+
if (!appId || !userId || !userToken) {
|
|
11030
|
+
throw new Error(`Invalid promptbook token: ${promptbookToken}`);
|
|
11031
|
+
}
|
|
11032
|
+
const identification = {
|
|
11033
|
+
appId,
|
|
11034
|
+
userId,
|
|
11035
|
+
userToken,
|
|
11036
|
+
isAnonymous: false,
|
|
11037
|
+
};
|
|
11038
|
+
return identification;
|
|
11039
|
+
}
|
|
11040
|
+
|
|
10631
11041
|
/**
|
|
10632
11042
|
* Metadata of the scraper
|
|
10633
11043
|
*
|
|
@@ -10965,11 +11375,11 @@
|
|
|
10965
11375
|
exports.GenericFormfactorDefinition = GenericFormfactorDefinition;
|
|
10966
11376
|
exports.ImageGeneratorFormfactorDefinition = ImageGeneratorFormfactorDefinition;
|
|
10967
11377
|
exports.KnowledgeScrapeError = KnowledgeScrapeError;
|
|
10968
|
-
exports.LOGO_DARK_SRC = LOGO_DARK_SRC;
|
|
10969
|
-
exports.LOGO_LIGHT_SRC = LOGO_LIGHT_SRC;
|
|
10970
11378
|
exports.LimitReachedError = LimitReachedError;
|
|
10971
11379
|
exports.MANDATORY_CSV_SETTINGS = MANDATORY_CSV_SETTINGS;
|
|
10972
11380
|
exports.MAX_FILENAME_LENGTH = MAX_FILENAME_LENGTH;
|
|
11381
|
+
exports.MODEL_ORDER = MODEL_ORDER;
|
|
11382
|
+
exports.MODEL_TRUST_LEVEL = MODEL_TRUST_LEVEL;
|
|
10973
11383
|
exports.MODEL_VARIANTS = MODEL_VARIANTS;
|
|
10974
11384
|
exports.MatcherFormfactorDefinition = MatcherFormfactorDefinition;
|
|
10975
11385
|
exports.MemoryStorage = MemoryStorage;
|
|
@@ -10989,6 +11399,7 @@
|
|
|
10989
11399
|
exports.PipelineUrlError = PipelineUrlError;
|
|
10990
11400
|
exports.PrefixStorage = PrefixStorage;
|
|
10991
11401
|
exports.PromptbookFetchError = PromptbookFetchError;
|
|
11402
|
+
exports.REMOTE_SERVER_URLS = REMOTE_SERVER_URLS;
|
|
10992
11403
|
exports.RESERVED_PARAMETER_NAMES = RESERVED_PARAMETER_NAMES;
|
|
10993
11404
|
exports.SET_IS_VERBOSE = SET_IS_VERBOSE;
|
|
10994
11405
|
exports.SectionTypes = SectionTypes;
|
|
@@ -10999,6 +11410,7 @@
|
|
|
10999
11410
|
exports.UNCERTAIN_USAGE = UNCERTAIN_USAGE;
|
|
11000
11411
|
exports.UNCERTAIN_ZERO_VALUE = UNCERTAIN_ZERO_VALUE;
|
|
11001
11412
|
exports.UnexpectedError = UnexpectedError;
|
|
11413
|
+
exports.WrappedError = WrappedError;
|
|
11002
11414
|
exports.ZERO_USAGE = ZERO_USAGE;
|
|
11003
11415
|
exports.ZERO_VALUE = ZERO_VALUE;
|
|
11004
11416
|
exports._AnthropicClaudeMetadataRegistration = _AnthropicClaudeMetadataRegistration;
|
|
@@ -11029,7 +11441,9 @@
|
|
|
11029
11441
|
exports.embeddingVectorToString = embeddingVectorToString;
|
|
11030
11442
|
exports.executionReportJsonToString = executionReportJsonToString;
|
|
11031
11443
|
exports.extractParameterNamesFromTask = extractParameterNamesFromTask;
|
|
11444
|
+
exports.filterModels = filterModels;
|
|
11032
11445
|
exports.getPipelineInterface = getPipelineInterface;
|
|
11446
|
+
exports.identificationToPromptbookToken = identificationToPromptbookToken;
|
|
11033
11447
|
exports.isPassingExpectations = isPassingExpectations;
|
|
11034
11448
|
exports.isPipelineImplementingInterface = isPipelineImplementingInterface;
|
|
11035
11449
|
exports.isPipelineInterfacesEqual = isPipelineInterfacesEqual;
|
|
@@ -11046,6 +11460,7 @@
|
|
|
11046
11460
|
exports.prepareTasks = prepareTasks;
|
|
11047
11461
|
exports.prettifyPipelineString = prettifyPipelineString;
|
|
11048
11462
|
exports.promptbookFetch = promptbookFetch;
|
|
11463
|
+
exports.promptbookTokenToIdentification = promptbookTokenToIdentification;
|
|
11049
11464
|
exports.unpreparePipeline = unpreparePipeline;
|
|
11050
11465
|
exports.usageToHuman = usageToHuman;
|
|
11051
11466
|
exports.usageToWorktime = usageToWorktime;
|