@promptbook/cli 0.101.0-18 → 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +41 -31
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  5. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  6. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  7. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  8. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  9. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  10. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  11. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  12. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  13. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  15. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  16. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  17. package/esm/typings/src/version.d.ts +1 -1
  18. package/package.json +1 -1
  19. package/umd/index.umd.js +41 -31
  20. package/umd/index.umd.js.map +1 -1
  21. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  22. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  23. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  24. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  25. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -45,7 +45,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-18';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3646,12 +3646,10 @@ class MultipleLlmExecutionTools {
3646
3646
  /**
3647
3647
  * Gets array of execution tools in order of priority
3648
3648
  */
3649
- constructor(...llmExecutionTools) {
3649
+ constructor(title, ...llmExecutionTools) {
3650
+ this.title = title;
3650
3651
  this.llmExecutionTools = llmExecutionTools;
3651
3652
  }
3652
- get title() {
3653
- return 'Multiple LLM Providers';
3654
- }
3655
3653
  get description() {
3656
3654
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3657
3655
  .map(({ title, description }, index) => {
@@ -3737,7 +3735,7 @@ class MultipleLlmExecutionTools {
3737
3735
  return await llmExecutionTools.callEmbeddingModel(prompt);
3738
3736
  // <- case [🤖]:
3739
3737
  default:
3740
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3738
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3741
3739
  }
3742
3740
  }
3743
3741
  catch (error) {
@@ -3758,7 +3756,7 @@ class MultipleLlmExecutionTools {
3758
3756
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3759
3757
  // 3) ...
3760
3758
  spaceTrim((block) => `
3761
- All execution tools failed:
3759
+ All execution tools of ${this.title} failed:
3762
3760
 
3763
3761
  ${block(errors
3764
3762
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3767,11 +3765,11 @@ class MultipleLlmExecutionTools {
3767
3765
  `));
3768
3766
  }
3769
3767
  else if (this.llmExecutionTools.length === 0) {
3770
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3768
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3771
3769
  }
3772
3770
  else {
3773
3771
  throw new PipelineExecutionError(spaceTrim((block) => `
3774
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3772
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3775
3773
 
3776
3774
  Available \`LlmExecutionTools\`:
3777
3775
  ${block(this.description)}
@@ -3801,7 +3799,7 @@ class MultipleLlmExecutionTools {
3801
3799
  *
3802
3800
  * @public exported from `@promptbook/core`
3803
3801
  */
3804
- function joinLlmExecutionTools(...llmExecutionTools) {
3802
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3805
3803
  if (llmExecutionTools.length === 0) {
3806
3804
  const warningMessage = spaceTrim(`
3807
3805
  You have not provided any \`LlmExecutionTools\`
@@ -3833,7 +3831,7 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3833
3831
  };
3834
3832
  */
3835
3833
  }
3836
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3834
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3837
3835
  }
3838
3836
  /**
3839
3837
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -3853,7 +3851,7 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3853
3851
  * @public exported from `@promptbook/core`
3854
3852
  */
3855
3853
  function createLlmToolsFromConfiguration(configuration, options = {}) {
3856
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
3854
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
3857
3855
  const llmTools = configuration.map((llmConfiguration) => {
3858
3856
  const registeredItem = $llmToolsRegister
3859
3857
  .list()
@@ -3885,7 +3883,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
3885
3883
  ...llmConfiguration.options,
3886
3884
  });
3887
3885
  });
3888
- return joinLlmExecutionTools(...llmTools);
3886
+ return joinLlmExecutionTools(title, ...llmTools);
3889
3887
  }
3890
3888
  /**
3891
3889
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -4002,7 +4000,9 @@ async function $provideLlmToolsForWizardOrCli(options) {
4002
4000
  });
4003
4001
  }
4004
4002
  else if (strategy === 'BRING_YOUR_OWN_KEYS') {
4005
- llmExecutionTools = await $provideLlmToolsFromEnv();
4003
+ llmExecutionTools = await $provideLlmToolsFromEnv({
4004
+ title: 'LLM Tools for wizard or CLI with BYOK strategy',
4005
+ });
4006
4006
  }
4007
4007
  else {
4008
4008
  throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
@@ -6508,6 +6508,22 @@ function arrayableToArray(input) {
6508
6508
  return [input];
6509
6509
  }
6510
6510
 
6511
+ /**
6512
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
6513
+ *
6514
+ * @public exported from `@promptbook/core`
6515
+ */
6516
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
6517
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
6518
+ const llmTools = _llms.length === 1
6519
+ ? _llms[0]
6520
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
6521
+ return llmTools;
6522
+ }
6523
+ /**
6524
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
6525
+ */
6526
+
6511
6527
  /**
6512
6528
  * Replaces parameters in template with values from parameters object
6513
6529
  *
@@ -6598,9 +6614,7 @@ async function executeAttempts(options) {
6598
6614
  $scriptPipelineExecutionErrors: [],
6599
6615
  $failedResults: [], // Track all failed attempts
6600
6616
  };
6601
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6602
- const _llms = arrayableToArray(tools.llm);
6603
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6617
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6604
6618
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
6605
6619
  const isJokerAttempt = attemptIndex < 0;
6606
6620
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -7120,9 +7134,7 @@ async function getKnowledgeForTask(options) {
7120
7134
  return ''; // <- Note: Np knowledge present, return empty string
7121
7135
  }
7122
7136
  try {
7123
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
7124
- const _llms = arrayableToArray(tools.llm);
7125
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
7137
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
7126
7138
  const taskEmbeddingPrompt = {
7127
7139
  title: 'Knowledge Search',
7128
7140
  modelRequirements: {
@@ -7829,8 +7841,7 @@ async function preparePersona(personaDescription, tools, options) {
7829
7841
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
7830
7842
  tools,
7831
7843
  });
7832
- const _llms = arrayableToArray(tools.llm);
7833
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
7844
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
7834
7845
  const availableModels = (await llmTools.listModels())
7835
7846
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
7836
7847
  .map(({ modelName, modelDescription }) => ({
@@ -8327,9 +8338,7 @@ async function preparePipeline(pipeline, tools, options) {
8327
8338
  if (tools === undefined || tools.llm === undefined) {
8328
8339
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
8329
8340
  }
8330
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
8331
- const _llms = arrayableToArray(tools.llm);
8332
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8341
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
8333
8342
  const llmToolsWithUsage = countUsage(llmTools);
8334
8343
  // <- TODO: [🌯]
8335
8344
  /*
@@ -12523,7 +12532,7 @@ async function $provideExecutionToolsForNode(options) {
12523
12532
  throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
12524
12533
  }
12525
12534
  const fs = $provideFilesystemForNode();
12526
- const llm = await $provideLlmToolsFromEnv(options);
12535
+ const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
12527
12536
  const executables = await $provideExecutablesForNode();
12528
12537
  const tools = {
12529
12538
  llm,
@@ -14791,8 +14800,11 @@ function startRemoteServer(options) {
14791
14800
  if (isAnonymous === true) {
14792
14801
  // Note: Anonymous mode
14793
14802
  // TODO: Maybe check that configuration is not empty
14794
- const { llmToolsConfiguration } = identification;
14795
- llm = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose });
14803
+ const { userId, llmToolsConfiguration } = identification;
14804
+ llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
14805
+ title: `LLM Tools for anonymous user "${userId}" on server`,
14806
+ isVerbose,
14807
+ });
14796
14808
  }
14797
14809
  else if (isAnonymous === false && createLlmExecutionTools !== null) {
14798
14810
  // Note: Application mode
@@ -19517,9 +19529,7 @@ class MarkdownScraper {
19517
19529
  throw new MissingToolsError('LLM tools are required for scraping external files');
19518
19530
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
19519
19531
  }
19520
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
19521
- const _llms = arrayableToArray(llm);
19522
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
19532
+ const llmTools = getSingleLlmExecutionTools(llm);
19523
19533
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
19524
19534
  const collection = createCollectionFromJson(...PipelineCollection);
19525
19535
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({