@promptbook/markdown-utils 0.101.0-9 β†’ 0.102.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/esm/index.es.js +91 -117
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +30 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +12 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
  11. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
  13. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  15. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
  16. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
  17. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  18. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
  21. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
  22. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
  23. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  24. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
  27. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  28. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  30. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  33. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  34. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  35. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +5 -1
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/Chat/utils/savePlugins.d.ts +55 -0
  43. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  44. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  45. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  46. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  47. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  48. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  51. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  52. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  53. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  54. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  55. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  56. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  57. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  58. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  59. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  60. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  61. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  62. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  63. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  64. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  65. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  66. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  67. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  68. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  69. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  70. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  71. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  72. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  73. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  74. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  75. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  76. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  77. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  78. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  79. package/esm/typings/src/version.d.ts +1 -1
  80. package/package.json +1 -1
  81. package/umd/index.umd.js +91 -117
  82. package/umd/index.umd.js.map +1 -1
  83. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  84. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  85. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  86. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  87. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  88. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  89. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts β†’ utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -2,11 +2,11 @@ import { string_markdown } from '../../types/typeAliases';
2
2
  /**
3
3
  * Function `promptbookifyAiText` will slightly modify the text so we know it was processed by Promptbook
4
4
  *
5
+ * Note: [πŸ”‚] This function is idempotent.
6
+ *
5
7
  * @public exported from `@promptbook/markdown-utils`
6
8
  */
7
9
  export declare function promptbookifyAiText(text: string_markdown): string_markdown;
8
10
  /**
9
- * TODO: !!!!! Make the function idempotent and add "Note: [πŸ”‚] This function is idempotent."
10
- * TODO: [πŸ…ΎοΈ]!!! Use this across the project where AI text is involved
11
11
  * TODO: [🧠][✌️] Make some Promptbook-native token system
12
12
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-8`).
18
+ * It follows semantic versioning (e.g., `0.101.0`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/markdown-utils",
3
- "version": "0.101.0-9",
3
+ "version": "0.102.0-0",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -24,7 +24,7 @@
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-9';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.102.0-0';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
@@ -2627,75 +2627,32 @@
2627
2627
  */
2628
2628
 
2629
2629
  /**
2630
- * Predefined profiles for LLM providers to maintain consistency across the application
2631
- * These profiles represent each provider as a virtual persona in chat interfaces
2630
+ * Takes an item or an array of items and returns an array of items
2631
+ *
2632
+ * 1) Any item except array and undefined returns array with that one item (also null)
2633
+ * 2) Undefined returns empty array
2634
+ * 3) Array returns itself
2632
2635
  *
2633
- * @private !!!!
2636
+ * @private internal utility
2634
2637
  */
2635
- const LLM_PROVIDER_PROFILES = {
2636
- OPENAI: {
2637
- name: 'OPENAI',
2638
- fullname: 'OpenAI GPT',
2639
- color: '#10a37f', // OpenAI's signature green
2640
- // Note: avatarSrc could be added when we have provider logos available
2641
- },
2642
- ANTHROPIC: {
2643
- name: 'ANTHROPIC',
2644
- fullname: 'Anthropic Claude',
2645
- color: '#d97706', // Anthropic's orange/amber color
2646
- },
2647
- AZURE_OPENAI: {
2648
- name: 'AZURE_OPENAI',
2649
- fullname: 'Azure OpenAI',
2650
- color: '#0078d4', // Microsoft Azure blue
2651
- },
2652
- GOOGLE: {
2653
- name: 'GOOGLE',
2654
- fullname: 'Google Gemini',
2655
- color: '#4285f4', // Google blue
2656
- },
2657
- DEEPSEEK: {
2658
- name: 'DEEPSEEK',
2659
- fullname: 'DeepSeek',
2660
- color: '#7c3aed', // Purple color for DeepSeek
2661
- },
2662
- OLLAMA: {
2663
- name: 'OLLAMA',
2664
- fullname: 'Ollama',
2665
- color: '#059669', // Emerald green for local models
2666
- },
2667
- REMOTE: {
2668
- name: 'REMOTE',
2669
- fullname: 'Remote Server',
2670
- color: '#6b7280', // Gray for remote/proxy connections
2671
- },
2672
- MOCKED_ECHO: {
2673
- name: 'MOCKED_ECHO',
2674
- fullname: 'Echo (Test)',
2675
- color: '#8b5cf6', // Purple for test/mock tools
2676
- },
2677
- MOCKED_FAKE: {
2678
- name: 'MOCKED_FAKE',
2679
- fullname: 'Fake LLM (Test)',
2680
- color: '#ec4899', // Pink for fake/test tools
2681
- },
2682
- VERCEL: {
2683
- name: 'VERCEL',
2684
- fullname: 'Vercel AI',
2685
- color: '#000000', // Vercel's black
2686
- },
2687
- MULTIPLE: {
2688
- name: 'MULTIPLE',
2689
- fullname: 'Multiple Providers',
2690
- color: '#6366f1', // Indigo for combined/multiple providers
2691
- },
2692
- };
2638
+ function arrayableToArray(input) {
2639
+ if (input === undefined) {
2640
+ return [];
2641
+ }
2642
+ if (input instanceof Array) {
2643
+ return input;
2644
+ }
2645
+ return [input];
2646
+ }
2647
+
2693
2648
  /**
2694
- * TODO: Refactor this - each profile must be alongside the provider definition
2695
- * TODO: [πŸ•›] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
2696
- * Note: [πŸ’ž] Ignore a discrepancy between file name and entity name
2649
+ * Profile for Multiple providers aggregation
2697
2650
  */
2698
-
2651
+ const MULTIPLE_PROVIDER_PROFILE = {
2652
+ name: 'MULTIPLE',
2653
+ fullname: 'Multiple Providers',
2654
+ color: '#6366f1',
2655
+ };
2699
2656
  /**
2700
2657
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
2701
2658
  *
@@ -2706,12 +2663,10 @@
2706
2663
  /**
2707
2664
  * Gets array of execution tools in order of priority
2708
2665
  */
2709
- constructor(...llmExecutionTools) {
2666
+ constructor(title, ...llmExecutionTools) {
2667
+ this.title = title;
2710
2668
  this.llmExecutionTools = llmExecutionTools;
2711
2669
  }
2712
- get title() {
2713
- return 'Multiple LLM Providers';
2714
- }
2715
2670
  get description() {
2716
2671
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2717
2672
  .map(({ title, description }, index) => {
@@ -2733,7 +2688,7 @@
2733
2688
  `);
2734
2689
  }
2735
2690
  get profile() {
2736
- return LLM_PROVIDER_PROFILES.MULTIPLE;
2691
+ return MULTIPLE_PROVIDER_PROFILE;
2737
2692
  }
2738
2693
  /**
2739
2694
  * Check the configuration of all execution tools
@@ -2797,7 +2752,7 @@
2797
2752
  return await llmExecutionTools.callEmbeddingModel(prompt);
2798
2753
  // <- case [πŸ€–]:
2799
2754
  default:
2800
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
2755
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
2801
2756
  }
2802
2757
  }
2803
2758
  catch (error) {
@@ -2818,7 +2773,7 @@
2818
2773
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
2819
2774
  // 3) ...
2820
2775
  spaceTrim__default["default"]((block) => `
2821
- All execution tools failed:
2776
+ All execution tools of ${this.title} failed:
2822
2777
 
2823
2778
  ${block(errors
2824
2779
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -2827,11 +2782,11 @@
2827
2782
  `));
2828
2783
  }
2829
2784
  else if (this.llmExecutionTools.length === 0) {
2830
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
2785
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
2831
2786
  }
2832
2787
  else {
2833
2788
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
2834
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
2789
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
2835
2790
 
2836
2791
  Available \`LlmExecutionTools\`:
2837
2792
  ${block(this.description)}
@@ -2861,7 +2816,7 @@
2861
2816
  *
2862
2817
  * @public exported from `@promptbook/core`
2863
2818
  */
2864
- function joinLlmExecutionTools(...llmExecutionTools) {
2819
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
2865
2820
  if (llmExecutionTools.length === 0) {
2866
2821
  const warningMessage = spaceTrim__default["default"](`
2867
2822
  You have not provided any \`LlmExecutionTools\`
@@ -2893,30 +2848,27 @@
2893
2848
  };
2894
2849
  */
2895
2850
  }
2896
- return new MultipleLlmExecutionTools(...llmExecutionTools);
2851
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
2897
2852
  }
2898
2853
  /**
2899
2854
  * TODO: [πŸ‘·β€β™‚οΈ] @@@ Manual about construction of llmTools
2900
2855
  */
2901
2856
 
2902
2857
  /**
2903
- * Takes an item or an array of items and returns an array of items
2904
- *
2905
- * 1) Any item except array and undefined returns array with that one item (also null)
2906
- * 2) Undefined returns empty array
2907
- * 3) Array returns itself
2858
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
2908
2859
  *
2909
- * @private internal utility
2860
+ * @public exported from `@promptbook/core`
2910
2861
  */
2911
- function arrayableToArray(input) {
2912
- if (input === undefined) {
2913
- return [];
2914
- }
2915
- if (input instanceof Array) {
2916
- return input;
2917
- }
2918
- return [input];
2862
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
2863
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
2864
+ const llmTools = _llms.length === 1
2865
+ ? _llms[0]
2866
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
2867
+ return llmTools;
2919
2868
  }
2869
+ /**
2870
+ * TODO: [πŸ‘·β€β™‚οΈ] @@@ Manual about construction of llmTools
2871
+ */
2920
2872
 
2921
2873
  /**
2922
2874
  * Prepares the persona for the pipeline
@@ -2935,8 +2887,7 @@
2935
2887
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
2936
2888
  tools,
2937
2889
  });
2938
- const _llms = arrayableToArray(tools.llm);
2939
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
2890
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
2940
2891
  const availableModels = (await llmTools.listModels())
2941
2892
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
2942
2893
  .map(({ modelName, modelDescription }) => ({
@@ -2980,6 +2931,7 @@
2980
2931
  };
2981
2932
  }
2982
2933
  /**
2934
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
2983
2935
  * TODO: [πŸ”ƒ][main] If the persona was prepared with different version or different set of models, prepare it once again
2984
2936
  * TODO: [🏒] Check validity of `modelName` in pipeline
2985
2937
  * TODO: [🏒] Check validity of `systemMessage` in pipeline
@@ -4098,9 +4050,7 @@
4098
4050
  if (tools === undefined || tools.llm === undefined) {
4099
4051
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4100
4052
  }
4101
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4102
- const _llms = arrayableToArray(tools.llm);
4103
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4053
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4104
4054
  const llmToolsWithUsage = countUsage(llmTools);
4105
4055
  // <- TODO: [🌯]
4106
4056
  /*
@@ -5141,9 +5091,7 @@
5141
5091
  $scriptPipelineExecutionErrors: [],
5142
5092
  $failedResults: [], // Track all failed attempts
5143
5093
  };
5144
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5145
- const _llms = arrayableToArray(tools.llm);
5146
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5094
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5147
5095
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5148
5096
  const isJokerAttempt = attemptIndex < 0;
5149
5097
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5663,9 +5611,7 @@
5663
5611
  return ''; // <- Note: Np knowledge present, return empty string
5664
5612
  }
5665
5613
  try {
5666
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5667
- const _llms = arrayableToArray(tools.llm);
5668
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5614
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5669
5615
  const taskEmbeddingPrompt = {
5670
5616
  title: 'Knowledge Search',
5671
5617
  modelRequirements: {
@@ -6266,13 +6212,13 @@
6266
6212
  // Calculate and update tldr based on pipeline progress
6267
6213
  const cv = newOngoingResult;
6268
6214
  // Calculate progress based on parameters resolved vs total parameters
6269
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6215
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6270
6216
  let resolvedParameters = 0;
6271
6217
  let currentTaskTitle = '';
6272
6218
  // Get the resolved parameters from output parameters
6273
6219
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6274
6220
  // Count how many output parameters have non-empty values
6275
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6221
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6276
6222
  }
6277
6223
  // Try to determine current task from execution report
6278
6224
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6382,9 +6328,7 @@
6382
6328
  throw new MissingToolsError('LLM tools are required for scraping external files');
6383
6329
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6384
6330
  }
6385
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6386
- const _llms = arrayableToArray(llm);
6387
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6331
+ const llmTools = getSingleLlmExecutionTools(llm);
6388
6332
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
6389
6333
  const collection = createCollectionFromJson(...PipelineCollection);
6390
6334
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
@@ -6887,27 +6831,57 @@
6887
6831
  }
6888
6832
  /**
6889
6833
  * TODO: [🧠] Maybe this should be exported from `@promptbook/utils` not `@promptbook/markdown-utils`
6890
- * TODO: [πŸ…ΎοΈ] !!! Use this across the project where AI text is involved
6891
6834
  */
6892
6835
 
6836
+ /**
6837
+ * @private
6838
+ */
6839
+ const PROMPTBOOK_PSEUDOTOKEN_SUBSTITUTION = {
6840
+ 'a ': 'a ',
6841
+ 'the ': 'the ',
6842
+ 'is ': 'is ',
6843
+ 'or ': 'or ',
6844
+ 'be ': 'be ',
6845
+ };
6893
6846
  /**
6894
6847
  * Function `promptbookifyAiText` will slightly modify the text so we know it was processed by Promptbook
6895
6848
  *
6849
+ * Note: [πŸ”‚] This function is idempotent.
6850
+ *
6896
6851
  * @public exported from `@promptbook/markdown-utils`
6897
6852
  */
6898
6853
  function promptbookifyAiText(text) {
6899
- // Note: Duplicating some spaces
6900
- const words = text.split(' '); // <- Note: [✌️] Use `splitWords` when available
6901
- const wordLength = words.length; // <- Note: [✌️] `countWords` should be just `splitWords(...).length`
6902
- for (const wordIndex of [3, 7, 11, 19].filter((i) => i < wordLength)) {
6903
- words[wordIndex] = ' ' + words[wordIndex];
6854
+ const textLength = text.length;
6855
+ let currentToken = '';
6856
+ const textTokens = [
6857
+ /* <- TODO: [✌️] Create `splitToPromptbookTokens` */
6858
+ ];
6859
+ for (let textPosition = 0; textPosition < textLength; textPosition++) {
6860
+ const currentCharacter = text[textPosition];
6861
+ if (currentToken.endsWith(' ') && currentCharacter !== ' ') {
6862
+ textTokens.push(currentToken);
6863
+ currentToken = '';
6864
+ }
6865
+ currentToken += currentCharacter;
6866
+ }
6867
+ if (currentToken.length > 0) {
6868
+ textTokens.push(currentToken);
6869
+ }
6870
+ // [✌️] <- End of `splitToPromptbookTokens`
6871
+ const promptbookifiedTextTokens = [];
6872
+ for (let i = 0; i < textTokens.length; i++) {
6873
+ const token = textTokens[i];
6874
+ const tokenSubstitute = PROMPTBOOK_PSEUDOTOKEN_SUBSTITUTION[token];
6875
+ if (tokenSubstitute !== undefined) {
6876
+ promptbookifiedTextTokens.push(tokenSubstitute);
6877
+ }
6878
+ else {
6879
+ promptbookifiedTextTokens.push(token);
6880
+ }
6904
6881
  }
6905
- const promptbookifiedText = words.join(' ');
6906
- return promptbookifiedText;
6882
+ return promptbookifiedTextTokens.join('');
6907
6883
  }
6908
6884
  /**
6909
- * TODO: !!!!! Make the function idempotent and add "Note: [πŸ”‚] This function is idempotent."
6910
- * TODO: [πŸ…ΎοΈ]!!! Use this across the project where AI text is involved
6911
6885
  * TODO: [🧠][✌️] Make some Promptbook-native token system
6912
6886
  */
6913
6887