@promptbook/legacy-documents 0.101.0-18 → 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +44 -39
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  5. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  6. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  7. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  8. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  9. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  10. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  11. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  12. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  13. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  15. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  16. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  17. package/esm/typings/src/version.d.ts +1 -1
  18. package/package.json +2 -2
  19. package/umd/index.umd.js +44 -39
  20. package/umd/index.umd.js.map +1 -1
  21. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  22. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  23. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  24. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  25. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -26,7 +26,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
26
26
  * @generated
27
27
  * @see https://github.com/webgptorg/promptbook
28
28
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-18';
29
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
30
30
  /**
31
31
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
32
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3121,6 +3121,25 @@ function countUsage(llmTools) {
3121
3121
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3122
3122
  */
3123
3123
 
3124
+ /**
3125
+ * Takes an item or an array of items and returns an array of items
3126
+ *
3127
+ * 1) Any item except array and undefined returns array with that one item (also null)
3128
+ * 2) Undefined returns empty array
3129
+ * 3) Array returns itself
3130
+ *
3131
+ * @private internal utility
3132
+ */
3133
+ function arrayableToArray(input) {
3134
+ if (input === undefined) {
3135
+ return [];
3136
+ }
3137
+ if (input instanceof Array) {
3138
+ return input;
3139
+ }
3140
+ return [input];
3141
+ }
3142
+
3124
3143
  /**
3125
3144
  * Predefined profiles for LLM providers to maintain consistency across the application
3126
3145
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -3201,12 +3220,10 @@ class MultipleLlmExecutionTools {
3201
3220
  /**
3202
3221
  * Gets array of execution tools in order of priority
3203
3222
  */
3204
- constructor(...llmExecutionTools) {
3223
+ constructor(title, ...llmExecutionTools) {
3224
+ this.title = title;
3205
3225
  this.llmExecutionTools = llmExecutionTools;
3206
3226
  }
3207
- get title() {
3208
- return 'Multiple LLM Providers';
3209
- }
3210
3227
  get description() {
3211
3228
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3212
3229
  .map(({ title, description }, index) => {
@@ -3292,7 +3309,7 @@ class MultipleLlmExecutionTools {
3292
3309
  return await llmExecutionTools.callEmbeddingModel(prompt);
3293
3310
  // <- case [🤖]:
3294
3311
  default:
3295
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3312
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3296
3313
  }
3297
3314
  }
3298
3315
  catch (error) {
@@ -3313,7 +3330,7 @@ class MultipleLlmExecutionTools {
3313
3330
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3314
3331
  // 3) ...
3315
3332
  spaceTrim$1((block) => `
3316
- All execution tools failed:
3333
+ All execution tools of ${this.title} failed:
3317
3334
 
3318
3335
  ${block(errors
3319
3336
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3322,11 +3339,11 @@ class MultipleLlmExecutionTools {
3322
3339
  `));
3323
3340
  }
3324
3341
  else if (this.llmExecutionTools.length === 0) {
3325
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3342
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3326
3343
  }
3327
3344
  else {
3328
3345
  throw new PipelineExecutionError(spaceTrim$1((block) => `
3329
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3346
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3330
3347
 
3331
3348
  Available \`LlmExecutionTools\`:
3332
3349
  ${block(this.description)}
@@ -3356,7 +3373,7 @@ class MultipleLlmExecutionTools {
3356
3373
  *
3357
3374
  * @public exported from `@promptbook/core`
3358
3375
  */
3359
- function joinLlmExecutionTools(...llmExecutionTools) {
3376
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3360
3377
  if (llmExecutionTools.length === 0) {
3361
3378
  const warningMessage = spaceTrim$1(`
3362
3379
  You have not provided any \`LlmExecutionTools\`
@@ -3388,30 +3405,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3388
3405
  };
3389
3406
  */
3390
3407
  }
3391
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3408
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3392
3409
  }
3393
3410
  /**
3394
3411
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3395
3412
  */
3396
3413
 
3397
3414
  /**
3398
- * Takes an item or an array of items and returns an array of items
3399
- *
3400
- * 1) Any item except array and undefined returns array with that one item (also null)
3401
- * 2) Undefined returns empty array
3402
- * 3) Array returns itself
3415
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3403
3416
  *
3404
- * @private internal utility
3417
+ * @public exported from `@promptbook/core`
3405
3418
  */
3406
- function arrayableToArray(input) {
3407
- if (input === undefined) {
3408
- return [];
3409
- }
3410
- if (input instanceof Array) {
3411
- return input;
3412
- }
3413
- return [input];
3419
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3420
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3421
+ const llmTools = _llms.length === 1
3422
+ ? _llms[0]
3423
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3424
+ return llmTools;
3414
3425
  }
3426
+ /**
3427
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3428
+ */
3415
3429
 
3416
3430
  /**
3417
3431
  * Prepares the persona for the pipeline
@@ -3430,8 +3444,7 @@ async function preparePersona(personaDescription, tools, options) {
3430
3444
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3431
3445
  tools,
3432
3446
  });
3433
- const _llms = arrayableToArray(tools.llm);
3434
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3447
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3435
3448
  const availableModels = (await llmTools.listModels())
3436
3449
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3437
3450
  .map(({ modelName, modelDescription }) => ({
@@ -4193,9 +4206,7 @@ async function preparePipeline(pipeline, tools, options) {
4193
4206
  if (tools === undefined || tools.llm === undefined) {
4194
4207
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4195
4208
  }
4196
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4197
- const _llms = arrayableToArray(tools.llm);
4198
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4209
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4199
4210
  const llmToolsWithUsage = countUsage(llmTools);
4200
4211
  // <- TODO: [🌯]
4201
4212
  /*
@@ -5338,9 +5349,7 @@ async function executeAttempts(options) {
5338
5349
  $scriptPipelineExecutionErrors: [],
5339
5350
  $failedResults: [], // Track all failed attempts
5340
5351
  };
5341
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5342
- const _llms = arrayableToArray(tools.llm);
5343
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5352
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5344
5353
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5345
5354
  const isJokerAttempt = attemptIndex < 0;
5346
5355
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5860,9 +5869,7 @@ async function getKnowledgeForTask(options) {
5860
5869
  return ''; // <- Note: Np knowledge present, return empty string
5861
5870
  }
5862
5871
  try {
5863
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5864
- const _llms = arrayableToArray(tools.llm);
5865
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5872
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5866
5873
  const taskEmbeddingPrompt = {
5867
5874
  title: 'Knowledge Search',
5868
5875
  modelRequirements: {
@@ -6579,9 +6586,7 @@ class MarkdownScraper {
6579
6586
  throw new MissingToolsError('LLM tools are required for scraping external files');
6580
6587
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6581
6588
  }
6582
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6583
- const _llms = arrayableToArray(llm);
6584
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6589
+ const llmTools = getSingleLlmExecutionTools(llm);
6585
6590
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
6586
6591
  const collection = createCollectionFromJson(...PipelineCollection);
6587
6592
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({