@promptbook/markdown-utils 0.101.0-2 β†’ 0.101.0-20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/README.md +87 -1
  2. package/esm/index.es.js +48 -41
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +20 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +14 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +14 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
  8. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  11. package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
  12. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +8 -2
  13. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +59 -0
  14. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +8 -2
  15. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +45 -0
  16. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +1 -1
  17. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +46 -0
  18. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +47 -0
  19. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +62 -0
  20. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +31 -4
  21. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +20 -2
  22. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +8 -2
  23. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +46 -0
  24. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +8 -2
  25. package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
  26. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  27. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
  28. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
  29. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
  30. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +15 -0
  31. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +4 -0
  32. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  33. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  34. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  35. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  36. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  37. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  38. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  39. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  40. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  41. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  42. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  45. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  46. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  47. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  48. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  49. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  50. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  51. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +54 -0
  52. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  53. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  54. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  55. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  56. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  57. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  58. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  59. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
  60. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  61. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  62. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  63. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  64. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  65. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  66. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -0
  67. package/esm/typings/src/version.d.ts +1 -1
  68. package/package.json +1 -1
  69. package/umd/index.umd.js +48 -41
  70. package/umd/index.umd.js.map +1 -1
  71. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  72. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  73. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  74. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  75. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  76. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  77. /package/esm/typings/src/{cli/test/ptbk.test.d.ts β†’ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -14,4 +14,4 @@ export {};
14
14
  /**
15
15
  * TODO: [🧠] What is the better solution - `- xxx`, - `- xxx` or preserve (see also next TODO)
16
16
  * TODO: When existing commands 1) as 2) number 3) list, add 4) new command as next number
17
- */
17
+ */
@@ -9,5 +9,6 @@ import { string_markdown } from '../../types/typeAliases';
9
9
  */
10
10
  export declare function humanizeAiText(aiText: string_markdown): string_markdown;
11
11
  /**
12
+ * TODO: [🧠] Maybe this should be exported from `@promptbook/utils` not `@promptbook/markdown-utils`
12
13
  * TODO: [πŸ…ΎοΈ] !!! Use this across the project where AI text is involved
13
14
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-1`).
18
+ * It follows semantic versioning (e.g., `0.101.0-19`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/markdown-utils",
3
- "version": "0.101.0-2",
3
+ "version": "0.101.0-20",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -24,7 +24,7 @@
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-2';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-20';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
@@ -2626,6 +2626,25 @@
2626
2626
  * TODO: [πŸ‘·β€β™‚οΈ] @@@ Manual about construction of llmTools
2627
2627
  */
2628
2628
 
2629
+ /**
2630
+ * Takes an item or an array of items and returns an array of items
2631
+ *
2632
+ * 1) Any item except array and undefined returns array with that one item (also null)
2633
+ * 2) Undefined returns empty array
2634
+ * 3) Array returns itself
2635
+ *
2636
+ * @private internal utility
2637
+ */
2638
+ function arrayableToArray(input) {
2639
+ if (input === undefined) {
2640
+ return [];
2641
+ }
2642
+ if (input instanceof Array) {
2643
+ return input;
2644
+ }
2645
+ return [input];
2646
+ }
2647
+
2629
2648
  /**
2630
2649
  * Predefined profiles for LLM providers to maintain consistency across the application
2631
2650
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -2706,12 +2725,10 @@
2706
2725
  /**
2707
2726
  * Gets array of execution tools in order of priority
2708
2727
  */
2709
- constructor(...llmExecutionTools) {
2728
+ constructor(title, ...llmExecutionTools) {
2729
+ this.title = title;
2710
2730
  this.llmExecutionTools = llmExecutionTools;
2711
2731
  }
2712
- get title() {
2713
- return 'Multiple LLM Providers';
2714
- }
2715
2732
  get description() {
2716
2733
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2717
2734
  .map(({ title, description }, index) => {
@@ -2797,7 +2814,7 @@
2797
2814
  return await llmExecutionTools.callEmbeddingModel(prompt);
2798
2815
  // <- case [πŸ€–]:
2799
2816
  default:
2800
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
2817
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
2801
2818
  }
2802
2819
  }
2803
2820
  catch (error) {
@@ -2818,7 +2835,7 @@
2818
2835
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
2819
2836
  // 3) ...
2820
2837
  spaceTrim__default["default"]((block) => `
2821
- All execution tools failed:
2838
+ All execution tools of ${this.title} failed:
2822
2839
 
2823
2840
  ${block(errors
2824
2841
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -2827,11 +2844,11 @@
2827
2844
  `));
2828
2845
  }
2829
2846
  else if (this.llmExecutionTools.length === 0) {
2830
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
2847
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
2831
2848
  }
2832
2849
  else {
2833
2850
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
2834
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
2851
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
2835
2852
 
2836
2853
  Available \`LlmExecutionTools\`:
2837
2854
  ${block(this.description)}
@@ -2861,7 +2878,7 @@
2861
2878
  *
2862
2879
  * @public exported from `@promptbook/core`
2863
2880
  */
2864
- function joinLlmExecutionTools(...llmExecutionTools) {
2881
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
2865
2882
  if (llmExecutionTools.length === 0) {
2866
2883
  const warningMessage = spaceTrim__default["default"](`
2867
2884
  You have not provided any \`LlmExecutionTools\`
@@ -2893,30 +2910,27 @@
2893
2910
  };
2894
2911
  */
2895
2912
  }
2896
- return new MultipleLlmExecutionTools(...llmExecutionTools);
2913
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
2897
2914
  }
2898
2915
  /**
2899
2916
  * TODO: [πŸ‘·β€β™‚οΈ] @@@ Manual about construction of llmTools
2900
2917
  */
2901
2918
 
2902
2919
  /**
2903
- * Takes an item or an array of items and returns an array of items
2904
- *
2905
- * 1) Any item except array and undefined returns array with that one item (also null)
2906
- * 2) Undefined returns empty array
2907
- * 3) Array returns itself
2920
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
2908
2921
  *
2909
- * @private internal utility
2922
+ * @public exported from `@promptbook/core`
2910
2923
  */
2911
- function arrayableToArray(input) {
2912
- if (input === undefined) {
2913
- return [];
2914
- }
2915
- if (input instanceof Array) {
2916
- return input;
2917
- }
2918
- return [input];
2924
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
2925
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
2926
+ const llmTools = _llms.length === 1
2927
+ ? _llms[0]
2928
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
2929
+ return llmTools;
2919
2930
  }
2931
+ /**
2932
+ * TODO: [πŸ‘·β€β™‚οΈ] @@@ Manual about construction of llmTools
2933
+ */
2920
2934
 
2921
2935
  /**
2922
2936
  * Prepares the persona for the pipeline
@@ -2935,8 +2949,7 @@
2935
2949
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
2936
2950
  tools,
2937
2951
  });
2938
- const _llms = arrayableToArray(tools.llm);
2939
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
2952
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
2940
2953
  const availableModels = (await llmTools.listModels())
2941
2954
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
2942
2955
  .map(({ modelName, modelDescription }) => ({
@@ -2980,6 +2993,7 @@
2980
2993
  };
2981
2994
  }
2982
2995
  /**
2996
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
2983
2997
  * TODO: [πŸ”ƒ][main] If the persona was prepared with different version or different set of models, prepare it once again
2984
2998
  * TODO: [🏒] Check validity of `modelName` in pipeline
2985
2999
  * TODO: [🏒] Check validity of `systemMessage` in pipeline
@@ -4098,9 +4112,7 @@
4098
4112
  if (tools === undefined || tools.llm === undefined) {
4099
4113
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4100
4114
  }
4101
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4102
- const _llms = arrayableToArray(tools.llm);
4103
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4115
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4104
4116
  const llmToolsWithUsage = countUsage(llmTools);
4105
4117
  // <- TODO: [🌯]
4106
4118
  /*
@@ -5141,9 +5153,7 @@
5141
5153
  $scriptPipelineExecutionErrors: [],
5142
5154
  $failedResults: [], // Track all failed attempts
5143
5155
  };
5144
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5145
- const _llms = arrayableToArray(tools.llm);
5146
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5156
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5147
5157
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5148
5158
  const isJokerAttempt = attemptIndex < 0;
5149
5159
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5663,9 +5673,7 @@
5663
5673
  return ''; // <- Note: Np knowledge present, return empty string
5664
5674
  }
5665
5675
  try {
5666
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5667
- const _llms = arrayableToArray(tools.llm);
5668
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5676
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5669
5677
  const taskEmbeddingPrompt = {
5670
5678
  title: 'Knowledge Search',
5671
5679
  modelRequirements: {
@@ -6266,13 +6274,13 @@
6266
6274
  // Calculate and update tldr based on pipeline progress
6267
6275
  const cv = newOngoingResult;
6268
6276
  // Calculate progress based on parameters resolved vs total parameters
6269
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6277
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6270
6278
  let resolvedParameters = 0;
6271
6279
  let currentTaskTitle = '';
6272
6280
  // Get the resolved parameters from output parameters
6273
6281
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6274
6282
  // Count how many output parameters have non-empty values
6275
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6283
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6276
6284
  }
6277
6285
  // Try to determine current task from execution report
6278
6286
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6382,9 +6390,7 @@
6382
6390
  throw new MissingToolsError('LLM tools are required for scraping external files');
6383
6391
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6384
6392
  }
6385
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6386
- const _llms = arrayableToArray(llm);
6387
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6393
+ const llmTools = getSingleLlmExecutionTools(llm);
6388
6394
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
6389
6395
  const collection = createCollectionFromJson(...PipelineCollection);
6390
6396
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
@@ -6886,6 +6892,7 @@
6886
6892
  return cleanedText;
6887
6893
  }
6888
6894
  /**
6895
+ * TODO: [🧠] Maybe this should be exported from `@promptbook/utils` not `@promptbook/markdown-utils`
6889
6896
  * TODO: [πŸ…ΎοΈ] !!! Use this across the project where AI text is involved
6890
6897
  */
6891
6898