@promptbook/markdown-utils 0.101.0-9 β 0.101.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -4
- package/esm/index.es.js +91 -117
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +14 -0
- package/esm/typings/src/_packages/core.index.d.ts +12 -0
- package/esm/typings/src/_packages/types.index.d.ts +8 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/PromptResult.d.ts +2 -4
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
- package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +91 -117
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
- package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
- package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
- package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
- /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts β utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/README.md
CHANGED
|
@@ -29,10 +29,6 @@ Write AI applications using plain human language across multiple models and plat
|
|
|
29
29
|
|
|
30
30
|
|
|
31
31
|
|
|
32
|
-
<blockquote style="color: #ff8811">
|
|
33
|
-
<b>β Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
|
|
34
|
-
</blockquote>
|
|
35
|
-
|
|
36
32
|
## π¦ Package `@promptbook/markdown-utils`
|
|
37
33
|
|
|
38
34
|
- Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
|
package/esm/index.es.js
CHANGED
|
@@ -23,7 +23,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
23
23
|
* @generated
|
|
24
24
|
* @see https://github.com/webgptorg/promptbook
|
|
25
25
|
*/
|
|
26
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0
|
|
26
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0';
|
|
27
27
|
/**
|
|
28
28
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
29
29
|
* Note: [π] Ignore a discrepancy between file name and entity name
|
|
@@ -2626,75 +2626,32 @@ function countUsage(llmTools) {
|
|
|
2626
2626
|
*/
|
|
2627
2627
|
|
|
2628
2628
|
/**
|
|
2629
|
-
*
|
|
2630
|
-
*
|
|
2629
|
+
* Takes an item or an array of items and returns an array of items
|
|
2630
|
+
*
|
|
2631
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
2632
|
+
* 2) Undefined returns empty array
|
|
2633
|
+
* 3) Array returns itself
|
|
2631
2634
|
*
|
|
2632
|
-
* @private
|
|
2635
|
+
* @private internal utility
|
|
2633
2636
|
*/
|
|
2634
|
-
|
|
2635
|
-
|
|
2636
|
-
|
|
2637
|
-
|
|
2638
|
-
|
|
2639
|
-
|
|
2640
|
-
}
|
|
2641
|
-
|
|
2642
|
-
|
|
2643
|
-
|
|
2644
|
-
color: '#d97706', // Anthropic's orange/amber color
|
|
2645
|
-
},
|
|
2646
|
-
AZURE_OPENAI: {
|
|
2647
|
-
name: 'AZURE_OPENAI',
|
|
2648
|
-
fullname: 'Azure OpenAI',
|
|
2649
|
-
color: '#0078d4', // Microsoft Azure blue
|
|
2650
|
-
},
|
|
2651
|
-
GOOGLE: {
|
|
2652
|
-
name: 'GOOGLE',
|
|
2653
|
-
fullname: 'Google Gemini',
|
|
2654
|
-
color: '#4285f4', // Google blue
|
|
2655
|
-
},
|
|
2656
|
-
DEEPSEEK: {
|
|
2657
|
-
name: 'DEEPSEEK',
|
|
2658
|
-
fullname: 'DeepSeek',
|
|
2659
|
-
color: '#7c3aed', // Purple color for DeepSeek
|
|
2660
|
-
},
|
|
2661
|
-
OLLAMA: {
|
|
2662
|
-
name: 'OLLAMA',
|
|
2663
|
-
fullname: 'Ollama',
|
|
2664
|
-
color: '#059669', // Emerald green for local models
|
|
2665
|
-
},
|
|
2666
|
-
REMOTE: {
|
|
2667
|
-
name: 'REMOTE',
|
|
2668
|
-
fullname: 'Remote Server',
|
|
2669
|
-
color: '#6b7280', // Gray for remote/proxy connections
|
|
2670
|
-
},
|
|
2671
|
-
MOCKED_ECHO: {
|
|
2672
|
-
name: 'MOCKED_ECHO',
|
|
2673
|
-
fullname: 'Echo (Test)',
|
|
2674
|
-
color: '#8b5cf6', // Purple for test/mock tools
|
|
2675
|
-
},
|
|
2676
|
-
MOCKED_FAKE: {
|
|
2677
|
-
name: 'MOCKED_FAKE',
|
|
2678
|
-
fullname: 'Fake LLM (Test)',
|
|
2679
|
-
color: '#ec4899', // Pink for fake/test tools
|
|
2680
|
-
},
|
|
2681
|
-
VERCEL: {
|
|
2682
|
-
name: 'VERCEL',
|
|
2683
|
-
fullname: 'Vercel AI',
|
|
2684
|
-
color: '#000000', // Vercel's black
|
|
2685
|
-
},
|
|
2686
|
-
MULTIPLE: {
|
|
2687
|
-
name: 'MULTIPLE',
|
|
2688
|
-
fullname: 'Multiple Providers',
|
|
2689
|
-
color: '#6366f1', // Indigo for combined/multiple providers
|
|
2690
|
-
},
|
|
2691
|
-
};
|
|
2637
|
+
function arrayableToArray(input) {
|
|
2638
|
+
if (input === undefined) {
|
|
2639
|
+
return [];
|
|
2640
|
+
}
|
|
2641
|
+
if (input instanceof Array) {
|
|
2642
|
+
return input;
|
|
2643
|
+
}
|
|
2644
|
+
return [input];
|
|
2645
|
+
}
|
|
2646
|
+
|
|
2692
2647
|
/**
|
|
2693
|
-
*
|
|
2694
|
-
* TODO: [π] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
2695
|
-
* Note: [π] Ignore a discrepancy between file name and entity name
|
|
2648
|
+
* Profile for Multiple providers aggregation
|
|
2696
2649
|
*/
|
|
2697
|
-
|
|
2650
|
+
const MULTIPLE_PROVIDER_PROFILE = {
|
|
2651
|
+
name: 'MULTIPLE',
|
|
2652
|
+
fullname: 'Multiple Providers',
|
|
2653
|
+
color: '#6366f1',
|
|
2654
|
+
};
|
|
2698
2655
|
/**
|
|
2699
2656
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
2700
2657
|
*
|
|
@@ -2705,12 +2662,10 @@ class MultipleLlmExecutionTools {
|
|
|
2705
2662
|
/**
|
|
2706
2663
|
* Gets array of execution tools in order of priority
|
|
2707
2664
|
*/
|
|
2708
|
-
constructor(...llmExecutionTools) {
|
|
2665
|
+
constructor(title, ...llmExecutionTools) {
|
|
2666
|
+
this.title = title;
|
|
2709
2667
|
this.llmExecutionTools = llmExecutionTools;
|
|
2710
2668
|
}
|
|
2711
|
-
get title() {
|
|
2712
|
-
return 'Multiple LLM Providers';
|
|
2713
|
-
}
|
|
2714
2669
|
get description() {
|
|
2715
2670
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
|
2716
2671
|
.map(({ title, description }, index) => {
|
|
@@ -2732,7 +2687,7 @@ class MultipleLlmExecutionTools {
|
|
|
2732
2687
|
`);
|
|
2733
2688
|
}
|
|
2734
2689
|
get profile() {
|
|
2735
|
-
return
|
|
2690
|
+
return MULTIPLE_PROVIDER_PROFILE;
|
|
2736
2691
|
}
|
|
2737
2692
|
/**
|
|
2738
2693
|
* Check the configuration of all execution tools
|
|
@@ -2796,7 +2751,7 @@ class MultipleLlmExecutionTools {
|
|
|
2796
2751
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
2797
2752
|
// <- case [π€]:
|
|
2798
2753
|
default:
|
|
2799
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
|
2754
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
2800
2755
|
}
|
|
2801
2756
|
}
|
|
2802
2757
|
catch (error) {
|
|
@@ -2817,7 +2772,7 @@ class MultipleLlmExecutionTools {
|
|
|
2817
2772
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
|
2818
2773
|
// 3) ...
|
|
2819
2774
|
spaceTrim((block) => `
|
|
2820
|
-
All execution tools failed:
|
|
2775
|
+
All execution tools of ${this.title} failed:
|
|
2821
2776
|
|
|
2822
2777
|
${block(errors
|
|
2823
2778
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
|
@@ -2826,11 +2781,11 @@ class MultipleLlmExecutionTools {
|
|
|
2826
2781
|
`));
|
|
2827
2782
|
}
|
|
2828
2783
|
else if (this.llmExecutionTools.length === 0) {
|
|
2829
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
|
2784
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
|
2830
2785
|
}
|
|
2831
2786
|
else {
|
|
2832
2787
|
throw new PipelineExecutionError(spaceTrim((block) => `
|
|
2833
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
|
2788
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
|
2834
2789
|
|
|
2835
2790
|
Available \`LlmExecutionTools\`:
|
|
2836
2791
|
${block(this.description)}
|
|
@@ -2860,7 +2815,7 @@ class MultipleLlmExecutionTools {
|
|
|
2860
2815
|
*
|
|
2861
2816
|
* @public exported from `@promptbook/core`
|
|
2862
2817
|
*/
|
|
2863
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
|
2818
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
|
2864
2819
|
if (llmExecutionTools.length === 0) {
|
|
2865
2820
|
const warningMessage = spaceTrim(`
|
|
2866
2821
|
You have not provided any \`LlmExecutionTools\`
|
|
@@ -2892,30 +2847,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
|
|
|
2892
2847
|
};
|
|
2893
2848
|
*/
|
|
2894
2849
|
}
|
|
2895
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
|
2850
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
|
2896
2851
|
}
|
|
2897
2852
|
/**
|
|
2898
2853
|
* TODO: [π·ββοΈ] @@@ Manual about construction of llmTools
|
|
2899
2854
|
*/
|
|
2900
2855
|
|
|
2901
2856
|
/**
|
|
2902
|
-
*
|
|
2903
|
-
*
|
|
2904
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
2905
|
-
* 2) Undefined returns empty array
|
|
2906
|
-
* 3) Array returns itself
|
|
2857
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
2907
2858
|
*
|
|
2908
|
-
* @
|
|
2859
|
+
* @public exported from `@promptbook/core`
|
|
2909
2860
|
*/
|
|
2910
|
-
function
|
|
2911
|
-
|
|
2912
|
-
|
|
2913
|
-
|
|
2914
|
-
|
|
2915
|
-
|
|
2916
|
-
}
|
|
2917
|
-
return [input];
|
|
2861
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
|
2862
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
|
2863
|
+
const llmTools = _llms.length === 1
|
|
2864
|
+
? _llms[0]
|
|
2865
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
|
2866
|
+
return llmTools;
|
|
2918
2867
|
}
|
|
2868
|
+
/**
|
|
2869
|
+
* TODO: [π·ββοΈ] @@@ Manual about construction of llmTools
|
|
2870
|
+
*/
|
|
2919
2871
|
|
|
2920
2872
|
/**
|
|
2921
2873
|
* Prepares the persona for the pipeline
|
|
@@ -2934,8 +2886,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
2934
2886
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
2935
2887
|
tools,
|
|
2936
2888
|
});
|
|
2937
|
-
const
|
|
2938
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
2889
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
2939
2890
|
const availableModels = (await llmTools.listModels())
|
|
2940
2891
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
2941
2892
|
.map(({ modelName, modelDescription }) => ({
|
|
@@ -2979,6 +2930,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
2979
2930
|
};
|
|
2980
2931
|
}
|
|
2981
2932
|
/**
|
|
2933
|
+
* TODO: [π©] DRY `preparePersona` and `selectBestModelFromAvailable`
|
|
2982
2934
|
* TODO: [π][main] If the persona was prepared with different version or different set of models, prepare it once again
|
|
2983
2935
|
* TODO: [π’] Check validity of `modelName` in pipeline
|
|
2984
2936
|
* TODO: [π’] Check validity of `systemMessage` in pipeline
|
|
@@ -4097,9 +4049,7 @@ async function preparePipeline(pipeline, tools, options) {
|
|
|
4097
4049
|
if (tools === undefined || tools.llm === undefined) {
|
|
4098
4050
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
4099
4051
|
}
|
|
4100
|
-
|
|
4101
|
-
const _llms = arrayableToArray(tools.llm);
|
|
4102
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4052
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
4103
4053
|
const llmToolsWithUsage = countUsage(llmTools);
|
|
4104
4054
|
// <- TODO: [π―]
|
|
4105
4055
|
/*
|
|
@@ -5140,9 +5090,7 @@ async function executeAttempts(options) {
|
|
|
5140
5090
|
$scriptPipelineExecutionErrors: [],
|
|
5141
5091
|
$failedResults: [], // Track all failed attempts
|
|
5142
5092
|
};
|
|
5143
|
-
|
|
5144
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5145
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5093
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5146
5094
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5147
5095
|
const isJokerAttempt = attemptIndex < 0;
|
|
5148
5096
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
@@ -5662,9 +5610,7 @@ async function getKnowledgeForTask(options) {
|
|
|
5662
5610
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5663
5611
|
}
|
|
5664
5612
|
try {
|
|
5665
|
-
|
|
5666
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5667
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5613
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5668
5614
|
const taskEmbeddingPrompt = {
|
|
5669
5615
|
title: 'Knowledge Search',
|
|
5670
5616
|
modelRequirements: {
|
|
@@ -6265,13 +6211,13 @@ function createPipelineExecutor(options) {
|
|
|
6265
6211
|
// Calculate and update tldr based on pipeline progress
|
|
6266
6212
|
const cv = newOngoingResult;
|
|
6267
6213
|
// Calculate progress based on parameters resolved vs total parameters
|
|
6268
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
|
6214
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
|
6269
6215
|
let resolvedParameters = 0;
|
|
6270
6216
|
let currentTaskTitle = '';
|
|
6271
6217
|
// Get the resolved parameters from output parameters
|
|
6272
6218
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
|
6273
6219
|
// Count how many output parameters have non-empty values
|
|
6274
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6220
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6275
6221
|
}
|
|
6276
6222
|
// Try to determine current task from execution report
|
|
6277
6223
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
|
@@ -6381,9 +6327,7 @@ class MarkdownScraper {
|
|
|
6381
6327
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
|
6382
6328
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
|
6383
6329
|
}
|
|
6384
|
-
|
|
6385
|
-
const _llms = arrayableToArray(llm);
|
|
6386
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
6330
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
|
6387
6331
|
// TODO: [πΌ] In future use `ptbk make` and made getPipelineCollection
|
|
6388
6332
|
const collection = createCollectionFromJson(...PipelineCollection);
|
|
6389
6333
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|
|
@@ -6886,27 +6830,57 @@ function humanizeAiText(aiText) {
|
|
|
6886
6830
|
}
|
|
6887
6831
|
/**
|
|
6888
6832
|
* TODO: [π§ ] Maybe this should be exported from `@promptbook/utils` not `@promptbook/markdown-utils`
|
|
6889
|
-
* TODO: [π
ΎοΈ] !!! Use this across the project where AI text is involved
|
|
6890
6833
|
*/
|
|
6891
6834
|
|
|
6835
|
+
/**
|
|
6836
|
+
* @private
|
|
6837
|
+
*/
|
|
6838
|
+
const PROMPTBOOK_PSEUDOTOKEN_SUBSTITUTION = {
|
|
6839
|
+
'a ': 'a ',
|
|
6840
|
+
'the ': 'the ',
|
|
6841
|
+
'is ': 'is ',
|
|
6842
|
+
'or ': 'or ',
|
|
6843
|
+
'be ': 'be ',
|
|
6844
|
+
};
|
|
6892
6845
|
/**
|
|
6893
6846
|
* Function `promptbookifyAiText` will slightly modify the text so we know it was processed by Promptbook
|
|
6894
6847
|
*
|
|
6848
|
+
* Note: [π] This function is idempotent.
|
|
6849
|
+
*
|
|
6895
6850
|
* @public exported from `@promptbook/markdown-utils`
|
|
6896
6851
|
*/
|
|
6897
6852
|
function promptbookifyAiText(text) {
|
|
6898
|
-
|
|
6899
|
-
|
|
6900
|
-
const
|
|
6901
|
-
|
|
6902
|
-
|
|
6853
|
+
const textLength = text.length;
|
|
6854
|
+
let currentToken = '';
|
|
6855
|
+
const textTokens = [
|
|
6856
|
+
/* <- TODO: [βοΈ] Create `splitToPromptbookTokens` */
|
|
6857
|
+
];
|
|
6858
|
+
for (let textPosition = 0; textPosition < textLength; textPosition++) {
|
|
6859
|
+
const currentCharacter = text[textPosition];
|
|
6860
|
+
if (currentToken.endsWith(' ') && currentCharacter !== ' ') {
|
|
6861
|
+
textTokens.push(currentToken);
|
|
6862
|
+
currentToken = '';
|
|
6863
|
+
}
|
|
6864
|
+
currentToken += currentCharacter;
|
|
6865
|
+
}
|
|
6866
|
+
if (currentToken.length > 0) {
|
|
6867
|
+
textTokens.push(currentToken);
|
|
6868
|
+
}
|
|
6869
|
+
// [βοΈ] <- End of `splitToPromptbookTokens`
|
|
6870
|
+
const promptbookifiedTextTokens = [];
|
|
6871
|
+
for (let i = 0; i < textTokens.length; i++) {
|
|
6872
|
+
const token = textTokens[i];
|
|
6873
|
+
const tokenSubstitute = PROMPTBOOK_PSEUDOTOKEN_SUBSTITUTION[token];
|
|
6874
|
+
if (tokenSubstitute !== undefined) {
|
|
6875
|
+
promptbookifiedTextTokens.push(tokenSubstitute);
|
|
6876
|
+
}
|
|
6877
|
+
else {
|
|
6878
|
+
promptbookifiedTextTokens.push(token);
|
|
6879
|
+
}
|
|
6903
6880
|
}
|
|
6904
|
-
|
|
6905
|
-
return promptbookifiedText;
|
|
6881
|
+
return promptbookifiedTextTokens.join('');
|
|
6906
6882
|
}
|
|
6907
6883
|
/**
|
|
6908
|
-
* TODO: !!!!! Make the function idempotent and add "Note: [π] This function is idempotent."
|
|
6909
|
-
* TODO: [π
ΎοΈ]!!! Use this across the project where AI text is involved
|
|
6910
6884
|
* TODO: [π§ ][βοΈ] Make some Promptbook-native token system
|
|
6911
6885
|
*/
|
|
6912
6886
|
|