@promptbook/markdown-utils 0.101.0-17 → 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/esm/index.es.js +44 -39
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  6. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  7. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  8. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  9. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  10. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  11. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  13. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  14. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  15. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  16. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  17. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  18. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  20. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  21. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  22. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +1 -1
  25. package/umd/index.umd.js +44 -39
  26. package/umd/index.umd.js.map +1 -1
  27. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  28. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  29. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  30. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  31. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -23,7 +23,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
23
23
  * @generated
24
24
  * @see https://github.com/webgptorg/promptbook
25
25
  */
26
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-17';
26
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
27
27
  /**
28
28
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
29
29
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2625,6 +2625,25 @@ function countUsage(llmTools) {
2625
2625
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2626
2626
  */
2627
2627
 
2628
+ /**
2629
+ * Takes an item or an array of items and returns an array of items
2630
+ *
2631
+ * 1) Any item except array and undefined returns array with that one item (also null)
2632
+ * 2) Undefined returns empty array
2633
+ * 3) Array returns itself
2634
+ *
2635
+ * @private internal utility
2636
+ */
2637
+ function arrayableToArray(input) {
2638
+ if (input === undefined) {
2639
+ return [];
2640
+ }
2641
+ if (input instanceof Array) {
2642
+ return input;
2643
+ }
2644
+ return [input];
2645
+ }
2646
+
2628
2647
  /**
2629
2648
  * Predefined profiles for LLM providers to maintain consistency across the application
2630
2649
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -2705,12 +2724,10 @@ class MultipleLlmExecutionTools {
2705
2724
  /**
2706
2725
  * Gets array of execution tools in order of priority
2707
2726
  */
2708
- constructor(...llmExecutionTools) {
2727
+ constructor(title, ...llmExecutionTools) {
2728
+ this.title = title;
2709
2729
  this.llmExecutionTools = llmExecutionTools;
2710
2730
  }
2711
- get title() {
2712
- return 'Multiple LLM Providers';
2713
- }
2714
2731
  get description() {
2715
2732
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2716
2733
  .map(({ title, description }, index) => {
@@ -2796,7 +2813,7 @@ class MultipleLlmExecutionTools {
2796
2813
  return await llmExecutionTools.callEmbeddingModel(prompt);
2797
2814
  // <- case [🤖]:
2798
2815
  default:
2799
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
2816
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
2800
2817
  }
2801
2818
  }
2802
2819
  catch (error) {
@@ -2817,7 +2834,7 @@ class MultipleLlmExecutionTools {
2817
2834
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
2818
2835
  // 3) ...
2819
2836
  spaceTrim((block) => `
2820
- All execution tools failed:
2837
+ All execution tools of ${this.title} failed:
2821
2838
 
2822
2839
  ${block(errors
2823
2840
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -2826,11 +2843,11 @@ class MultipleLlmExecutionTools {
2826
2843
  `));
2827
2844
  }
2828
2845
  else if (this.llmExecutionTools.length === 0) {
2829
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
2846
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
2830
2847
  }
2831
2848
  else {
2832
2849
  throw new PipelineExecutionError(spaceTrim((block) => `
2833
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
2850
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
2834
2851
 
2835
2852
  Available \`LlmExecutionTools\`:
2836
2853
  ${block(this.description)}
@@ -2860,7 +2877,7 @@ class MultipleLlmExecutionTools {
2860
2877
  *
2861
2878
  * @public exported from `@promptbook/core`
2862
2879
  */
2863
- function joinLlmExecutionTools(...llmExecutionTools) {
2880
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
2864
2881
  if (llmExecutionTools.length === 0) {
2865
2882
  const warningMessage = spaceTrim(`
2866
2883
  You have not provided any \`LlmExecutionTools\`
@@ -2892,30 +2909,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
2892
2909
  };
2893
2910
  */
2894
2911
  }
2895
- return new MultipleLlmExecutionTools(...llmExecutionTools);
2912
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
2896
2913
  }
2897
2914
  /**
2898
2915
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2899
2916
  */
2900
2917
 
2901
2918
  /**
2902
- * Takes an item or an array of items and returns an array of items
2903
- *
2904
- * 1) Any item except array and undefined returns array with that one item (also null)
2905
- * 2) Undefined returns empty array
2906
- * 3) Array returns itself
2919
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
2907
2920
  *
2908
- * @private internal utility
2921
+ * @public exported from `@promptbook/core`
2909
2922
  */
2910
- function arrayableToArray(input) {
2911
- if (input === undefined) {
2912
- return [];
2913
- }
2914
- if (input instanceof Array) {
2915
- return input;
2916
- }
2917
- return [input];
2923
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
2924
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
2925
+ const llmTools = _llms.length === 1
2926
+ ? _llms[0]
2927
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
2928
+ return llmTools;
2918
2929
  }
2930
+ /**
2931
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2932
+ */
2919
2933
 
2920
2934
  /**
2921
2935
  * Prepares the persona for the pipeline
@@ -2934,8 +2948,7 @@ async function preparePersona(personaDescription, tools, options) {
2934
2948
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
2935
2949
  tools,
2936
2950
  });
2937
- const _llms = arrayableToArray(tools.llm);
2938
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
2951
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
2939
2952
  const availableModels = (await llmTools.listModels())
2940
2953
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
2941
2954
  .map(({ modelName, modelDescription }) => ({
@@ -4098,9 +4111,7 @@ async function preparePipeline(pipeline, tools, options) {
4098
4111
  if (tools === undefined || tools.llm === undefined) {
4099
4112
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4100
4113
  }
4101
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4102
- const _llms = arrayableToArray(tools.llm);
4103
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4114
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4104
4115
  const llmToolsWithUsage = countUsage(llmTools);
4105
4116
  // <- TODO: [🌯]
4106
4117
  /*
@@ -5141,9 +5152,7 @@ async function executeAttempts(options) {
5141
5152
  $scriptPipelineExecutionErrors: [],
5142
5153
  $failedResults: [], // Track all failed attempts
5143
5154
  };
5144
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5145
- const _llms = arrayableToArray(tools.llm);
5146
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5155
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5147
5156
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5148
5157
  const isJokerAttempt = attemptIndex < 0;
5149
5158
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5663,9 +5672,7 @@ async function getKnowledgeForTask(options) {
5663
5672
  return ''; // <- Note: Np knowledge present, return empty string
5664
5673
  }
5665
5674
  try {
5666
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5667
- const _llms = arrayableToArray(tools.llm);
5668
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5675
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5669
5676
  const taskEmbeddingPrompt = {
5670
5677
  title: 'Knowledge Search',
5671
5678
  modelRequirements: {
@@ -6382,9 +6389,7 @@ class MarkdownScraper {
6382
6389
  throw new MissingToolsError('LLM tools are required for scraping external files');
6383
6390
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6384
6391
  }
6385
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6386
- const _llms = arrayableToArray(llm);
6387
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6392
+ const llmTools = getSingleLlmExecutionTools(llm);
6388
6393
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
6389
6394
  const collection = createCollectionFromJson(...PipelineCollection);
6390
6395
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({