@promptbook/wizard 0.101.0-18 → 0.101.0-19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +50 -43
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +50 -43
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
- package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
- package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
- package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
- /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js
CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
36
36
|
* @generated
|
37
37
|
* @see https://github.com/webgptorg/promptbook
|
38
38
|
*/
|
39
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
39
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
|
40
40
|
/**
|
41
41
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
42
42
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -7844,6 +7844,25 @@ function countUsage(llmTools) {
|
|
7844
7844
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
7845
7845
|
*/
|
7846
7846
|
|
7847
|
+
/**
|
7848
|
+
* Takes an item or an array of items and returns an array of items
|
7849
|
+
*
|
7850
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
7851
|
+
* 2) Undefined returns empty array
|
7852
|
+
* 3) Array returns itself
|
7853
|
+
*
|
7854
|
+
* @private internal utility
|
7855
|
+
*/
|
7856
|
+
function arrayableToArray(input) {
|
7857
|
+
if (input === undefined) {
|
7858
|
+
return [];
|
7859
|
+
}
|
7860
|
+
if (input instanceof Array) {
|
7861
|
+
return input;
|
7862
|
+
}
|
7863
|
+
return [input];
|
7864
|
+
}
|
7865
|
+
|
7847
7866
|
/**
|
7848
7867
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
7849
7868
|
*
|
@@ -7854,12 +7873,10 @@ class MultipleLlmExecutionTools {
|
|
7854
7873
|
/**
|
7855
7874
|
* Gets array of execution tools in order of priority
|
7856
7875
|
*/
|
7857
|
-
constructor(...llmExecutionTools) {
|
7876
|
+
constructor(title, ...llmExecutionTools) {
|
7877
|
+
this.title = title;
|
7858
7878
|
this.llmExecutionTools = llmExecutionTools;
|
7859
7879
|
}
|
7860
|
-
get title() {
|
7861
|
-
return 'Multiple LLM Providers';
|
7862
|
-
}
|
7863
7880
|
get description() {
|
7864
7881
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
7865
7882
|
.map(({ title, description }, index) => {
|
@@ -7945,7 +7962,7 @@ class MultipleLlmExecutionTools {
|
|
7945
7962
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
7946
7963
|
// <- case [🤖]:
|
7947
7964
|
default:
|
7948
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
7965
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
7949
7966
|
}
|
7950
7967
|
}
|
7951
7968
|
catch (error) {
|
@@ -7966,7 +7983,7 @@ class MultipleLlmExecutionTools {
|
|
7966
7983
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
7967
7984
|
// 3) ...
|
7968
7985
|
spaceTrim((block) => `
|
7969
|
-
All execution tools failed:
|
7986
|
+
All execution tools of ${this.title} failed:
|
7970
7987
|
|
7971
7988
|
${block(errors
|
7972
7989
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
@@ -7975,11 +7992,11 @@ class MultipleLlmExecutionTools {
|
|
7975
7992
|
`));
|
7976
7993
|
}
|
7977
7994
|
else if (this.llmExecutionTools.length === 0) {
|
7978
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
7995
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
7979
7996
|
}
|
7980
7997
|
else {
|
7981
7998
|
throw new PipelineExecutionError(spaceTrim((block) => `
|
7982
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
7999
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
7983
8000
|
|
7984
8001
|
Available \`LlmExecutionTools\`:
|
7985
8002
|
${block(this.description)}
|
@@ -8009,7 +8026,7 @@ class MultipleLlmExecutionTools {
|
|
8009
8026
|
*
|
8010
8027
|
* @public exported from `@promptbook/core`
|
8011
8028
|
*/
|
8012
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
8029
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
8013
8030
|
if (llmExecutionTools.length === 0) {
|
8014
8031
|
const warningMessage = spaceTrim(`
|
8015
8032
|
You have not provided any \`LlmExecutionTools\`
|
@@ -8041,30 +8058,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
|
|
8041
8058
|
};
|
8042
8059
|
*/
|
8043
8060
|
}
|
8044
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
8061
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
8045
8062
|
}
|
8046
8063
|
/**
|
8047
8064
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
8048
8065
|
*/
|
8049
8066
|
|
8050
8067
|
/**
|
8051
|
-
*
|
8052
|
-
*
|
8053
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
8054
|
-
* 2) Undefined returns empty array
|
8055
|
-
* 3) Array returns itself
|
8068
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
8056
8069
|
*
|
8057
|
-
* @
|
8070
|
+
* @public exported from `@promptbook/core`
|
8058
8071
|
*/
|
8059
|
-
function
|
8060
|
-
|
8061
|
-
|
8062
|
-
|
8063
|
-
|
8064
|
-
|
8065
|
-
}
|
8066
|
-
return [input];
|
8072
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
8073
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
8074
|
+
const llmTools = _llms.length === 1
|
8075
|
+
? _llms[0]
|
8076
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
8077
|
+
return llmTools;
|
8067
8078
|
}
|
8079
|
+
/**
|
8080
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
8081
|
+
*/
|
8068
8082
|
|
8069
8083
|
/**
|
8070
8084
|
* Prepares the persona for the pipeline
|
@@ -8083,8 +8097,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
8083
8097
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
8084
8098
|
tools,
|
8085
8099
|
});
|
8086
|
-
const
|
8087
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8100
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
8088
8101
|
const availableModels = (await llmTools.listModels())
|
8089
8102
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
8090
8103
|
.map(({ modelName, modelDescription }) => ({
|
@@ -8701,9 +8714,7 @@ async function preparePipeline(pipeline, tools, options) {
|
|
8701
8714
|
if (tools === undefined || tools.llm === undefined) {
|
8702
8715
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
8703
8716
|
}
|
8704
|
-
|
8705
|
-
const _llms = arrayableToArray(tools.llm);
|
8706
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8717
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
8707
8718
|
const llmToolsWithUsage = countUsage(llmTools);
|
8708
8719
|
// <- TODO: [🌯]
|
8709
8720
|
/*
|
@@ -9573,9 +9584,7 @@ async function executeAttempts(options) {
|
|
9573
9584
|
$scriptPipelineExecutionErrors: [],
|
9574
9585
|
$failedResults: [], // Track all failed attempts
|
9575
9586
|
};
|
9576
|
-
|
9577
|
-
const _llms = arrayableToArray(tools.llm);
|
9578
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
9587
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
9579
9588
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
9580
9589
|
const isJokerAttempt = attemptIndex < 0;
|
9581
9590
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
@@ -10095,9 +10104,7 @@ async function getKnowledgeForTask(options) {
|
|
10095
10104
|
return ''; // <- Note: Np knowledge present, return empty string
|
10096
10105
|
}
|
10097
10106
|
try {
|
10098
|
-
|
10099
|
-
const _llms = arrayableToArray(tools.llm);
|
10100
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10107
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
10101
10108
|
const taskEmbeddingPrompt = {
|
10102
10109
|
title: 'Knowledge Search',
|
10103
10110
|
modelRequirements: {
|
@@ -10814,9 +10821,7 @@ class MarkdownScraper {
|
|
10814
10821
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
10815
10822
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
10816
10823
|
}
|
10817
|
-
|
10818
|
-
const _llms = arrayableToArray(llm);
|
10819
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10824
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
10820
10825
|
// TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
|
10821
10826
|
const collection = createCollectionFromJson(...PipelineCollection);
|
10822
10827
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|
@@ -12955,7 +12960,7 @@ async function $provideLlmToolsConfigurationFromEnv() {
|
|
12955
12960
|
* @public exported from `@promptbook/core`
|
12956
12961
|
*/
|
12957
12962
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
12958
|
-
const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12963
|
+
const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12959
12964
|
const llmTools = configuration.map((llmConfiguration) => {
|
12960
12965
|
const registeredItem = $llmToolsRegister
|
12961
12966
|
.list()
|
@@ -12987,7 +12992,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
12987
12992
|
...llmConfiguration.options,
|
12988
12993
|
});
|
12989
12994
|
});
|
12990
|
-
return joinLlmExecutionTools(...llmTools);
|
12995
|
+
return joinLlmExecutionTools(title, ...llmTools);
|
12991
12996
|
}
|
12992
12997
|
/**
|
12993
12998
|
* TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
@@ -13104,7 +13109,9 @@ async function $provideLlmToolsForWizardOrCli(options) {
|
|
13104
13109
|
});
|
13105
13110
|
}
|
13106
13111
|
else if (strategy === 'BRING_YOUR_OWN_KEYS') {
|
13107
|
-
llmExecutionTools = await $provideLlmToolsFromEnv(
|
13112
|
+
llmExecutionTools = await $provideLlmToolsFromEnv({
|
13113
|
+
title: 'LLM Tools for wizard or CLI with BYOK strategy',
|
13114
|
+
});
|
13108
13115
|
}
|
13109
13116
|
else {
|
13110
13117
|
throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
|
@@ -17137,7 +17144,7 @@ async function $provideExecutionToolsForNode(options) {
|
|
17137
17144
|
throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
|
17138
17145
|
}
|
17139
17146
|
const fs = $provideFilesystemForNode();
|
17140
|
-
const llm = await $provideLlmToolsFromEnv(options);
|
17147
|
+
const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
|
17141
17148
|
const executables = await $provideExecutablesForNode();
|
17142
17149
|
const tools = {
|
17143
17150
|
llm,
|