@promptbook/wizard 0.101.0-2 → 0.101.0-20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +329 -174
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +20 -0
- package/esm/typings/src/_packages/core.index.d.ts +14 -0
- package/esm/typings/src/_packages/types.index.d.ts +14 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +59 -0
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +45 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +46 -0
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +47 -0
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +62 -0
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +31 -4
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +20 -2
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +46 -0
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +15 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +54 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +329 -174
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
- package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
- package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
- package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
- /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/umd/index.umd.js
CHANGED
@@ -48,7 +48,7 @@
|
|
48
48
|
* @generated
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
50
50
|
*/
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-20';
|
52
52
|
/**
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -2826,7 +2826,7 @@
|
|
2826
2826
|
modelVariant: 'CHAT',
|
2827
2827
|
modelTitle: 'gpt-5-mini',
|
2828
2828
|
modelName: 'gpt-5-mini',
|
2829
|
-
modelDescription:
|
2829
|
+
modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
|
2830
2830
|
pricing: {
|
2831
2831
|
prompt: pricing(`$0.25 / 1M tokens`),
|
2832
2832
|
output: pricing(`$2.00 / 1M tokens`),
|
@@ -2838,7 +2838,7 @@
|
|
2838
2838
|
modelVariant: 'CHAT',
|
2839
2839
|
modelTitle: 'gpt-5-nano',
|
2840
2840
|
modelName: 'gpt-5-nano',
|
2841
|
-
modelDescription:
|
2841
|
+
modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
|
2842
2842
|
pricing: {
|
2843
2843
|
prompt: pricing(`$0.05 / 1M tokens`),
|
2844
2844
|
output: pricing(`$0.40 / 1M tokens`),
|
@@ -2850,7 +2850,7 @@
|
|
2850
2850
|
modelVariant: 'CHAT',
|
2851
2851
|
modelTitle: 'gpt-4.1',
|
2852
2852
|
modelName: 'gpt-4.1',
|
2853
|
-
modelDescription:
|
2853
|
+
modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
|
2854
2854
|
pricing: {
|
2855
2855
|
prompt: pricing(`$3.00 / 1M tokens`),
|
2856
2856
|
output: pricing(`$12.00 / 1M tokens`),
|
@@ -2862,7 +2862,7 @@
|
|
2862
2862
|
modelVariant: 'CHAT',
|
2863
2863
|
modelTitle: 'gpt-4.1-mini',
|
2864
2864
|
modelName: 'gpt-4.1-mini',
|
2865
|
-
modelDescription:
|
2865
|
+
modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
|
2866
2866
|
pricing: {
|
2867
2867
|
prompt: pricing(`$0.80 / 1M tokens`),
|
2868
2868
|
output: pricing(`$3.20 / 1M tokens`),
|
@@ -2874,7 +2874,7 @@
|
|
2874
2874
|
modelVariant: 'CHAT',
|
2875
2875
|
modelTitle: 'gpt-4.1-nano',
|
2876
2876
|
modelName: 'gpt-4.1-nano',
|
2877
|
-
modelDescription:
|
2877
|
+
modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
|
2878
2878
|
pricing: {
|
2879
2879
|
prompt: pricing(`$0.20 / 1M tokens`),
|
2880
2880
|
output: pricing(`$0.80 / 1M tokens`),
|
@@ -2886,7 +2886,7 @@
|
|
2886
2886
|
modelVariant: 'CHAT',
|
2887
2887
|
modelTitle: 'o3',
|
2888
2888
|
modelName: 'o3',
|
2889
|
-
modelDescription:
|
2889
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
|
2890
2890
|
pricing: {
|
2891
2891
|
prompt: pricing(`$15.00 / 1M tokens`),
|
2892
2892
|
output: pricing(`$60.00 / 1M tokens`),
|
@@ -2898,7 +2898,7 @@
|
|
2898
2898
|
modelVariant: 'CHAT',
|
2899
2899
|
modelTitle: 'o3-pro',
|
2900
2900
|
modelName: 'o3-pro',
|
2901
|
-
modelDescription:
|
2901
|
+
modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
|
2902
2902
|
pricing: {
|
2903
2903
|
prompt: pricing(`$30.00 / 1M tokens`),
|
2904
2904
|
output: pricing(`$120.00 / 1M tokens`),
|
@@ -2910,7 +2910,7 @@
|
|
2910
2910
|
modelVariant: 'CHAT',
|
2911
2911
|
modelTitle: 'o4-mini',
|
2912
2912
|
modelName: 'o4-mini',
|
2913
|
-
modelDescription:
|
2913
|
+
modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
|
2914
2914
|
pricing: {
|
2915
2915
|
prompt: pricing(`$4.00 / 1M tokens`),
|
2916
2916
|
output: pricing(`$16.00 / 1M tokens`),
|
@@ -2922,7 +2922,7 @@
|
|
2922
2922
|
modelVariant: 'CHAT',
|
2923
2923
|
modelTitle: 'o3-deep-research',
|
2924
2924
|
modelName: 'o3-deep-research',
|
2925
|
-
modelDescription:
|
2925
|
+
modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
|
2926
2926
|
pricing: {
|
2927
2927
|
prompt: pricing(`$25.00 / 1M tokens`),
|
2928
2928
|
output: pricing(`$100.00 / 1M tokens`),
|
@@ -2934,7 +2934,7 @@
|
|
2934
2934
|
modelVariant: 'CHAT',
|
2935
2935
|
modelTitle: 'o4-mini-deep-research',
|
2936
2936
|
modelName: 'o4-mini-deep-research',
|
2937
|
-
modelDescription:
|
2937
|
+
modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
|
2938
2938
|
pricing: {
|
2939
2939
|
prompt: pricing(`$12.00 / 1M tokens`),
|
2940
2940
|
output: pricing(`$48.00 / 1M tokens`),
|
@@ -4634,6 +4634,62 @@
|
|
4634
4634
|
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
4635
4635
|
*/
|
4636
4636
|
|
4637
|
+
/**
|
4638
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
4639
|
+
*
|
4640
|
+
* @param errorMessage The error message from OpenAI API
|
4641
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
4642
|
+
* @private utility of LLM Tools
|
4643
|
+
*/
|
4644
|
+
function parseUnsupportedParameterError(errorMessage) {
|
4645
|
+
// Pattern to match "Unsupported value: 'parameter' does not support ..."
|
4646
|
+
const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
|
4647
|
+
if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
|
4648
|
+
return unsupportedValueMatch[1];
|
4649
|
+
}
|
4650
|
+
// Pattern to match "'parameter' of type ... is not supported with this model"
|
4651
|
+
const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
|
4652
|
+
if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
|
4653
|
+
return parameterTypeMatch[1];
|
4654
|
+
}
|
4655
|
+
return null;
|
4656
|
+
}
|
4657
|
+
/**
|
4658
|
+
* Creates a copy of model requirements with the specified parameter removed
|
4659
|
+
*
|
4660
|
+
* @param modelRequirements Original model requirements
|
4661
|
+
* @param unsupportedParameter The parameter to remove
|
4662
|
+
* @returns New model requirements without the unsupported parameter
|
4663
|
+
* @private utility of LLM Tools
|
4664
|
+
*/
|
4665
|
+
function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
|
4666
|
+
const newRequirements = { ...modelRequirements };
|
4667
|
+
// Map of parameter names that might appear in error messages to ModelRequirements properties
|
4668
|
+
const parameterMap = {
|
4669
|
+
temperature: 'temperature',
|
4670
|
+
max_tokens: 'maxTokens',
|
4671
|
+
maxTokens: 'maxTokens',
|
4672
|
+
seed: 'seed',
|
4673
|
+
};
|
4674
|
+
const propertyToRemove = parameterMap[unsupportedParameter];
|
4675
|
+
if (propertyToRemove && propertyToRemove in newRequirements) {
|
4676
|
+
delete newRequirements[propertyToRemove];
|
4677
|
+
}
|
4678
|
+
return newRequirements;
|
4679
|
+
}
|
4680
|
+
/**
|
4681
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
4682
|
+
* @param error The error to check
|
4683
|
+
* @returns true if this is an unsupported parameter error
|
4684
|
+
* @private utility of LLM Tools
|
4685
|
+
*/
|
4686
|
+
function isUnsupportedParameterError(error) {
|
4687
|
+
const errorMessage = error.message.toLowerCase();
|
4688
|
+
return (errorMessage.includes('unsupported value:') ||
|
4689
|
+
errorMessage.includes('is not supported with this model') ||
|
4690
|
+
errorMessage.includes('does not support'));
|
4691
|
+
}
|
4692
|
+
|
4637
4693
|
/**
|
4638
4694
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
4639
4695
|
*
|
@@ -4651,6 +4707,10 @@
|
|
4651
4707
|
* OpenAI API client.
|
4652
4708
|
*/
|
4653
4709
|
this.client = null;
|
4710
|
+
/**
|
4711
|
+
* Tracks models and parameters that have already been retried to prevent infinite loops
|
4712
|
+
*/
|
4713
|
+
this.retriedUnsupportedParameters = new Set();
|
4654
4714
|
// TODO: Allow configuring rate limits via options
|
4655
4715
|
this.limiter = new Bottleneck__default["default"]({
|
4656
4716
|
minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
|
@@ -4712,21 +4772,27 @@
|
|
4712
4772
|
* Calls OpenAI compatible API to use a chat model.
|
4713
4773
|
*/
|
4714
4774
|
async callChatModel(prompt) {
|
4775
|
+
return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
|
4776
|
+
}
|
4777
|
+
/**
|
4778
|
+
* Internal method that handles parameter retry for chat model calls
|
4779
|
+
*/
|
4780
|
+
async callChatModelWithRetry(prompt, currentModelRequirements) {
|
4715
4781
|
var _a;
|
4716
4782
|
if (this.options.isVerbose) {
|
4717
|
-
console.info(`💬 ${this.title} callChatModel call`, { prompt });
|
4783
|
+
console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
|
4718
4784
|
}
|
4719
|
-
const { content, parameters,
|
4785
|
+
const { content, parameters, format } = prompt;
|
4720
4786
|
const client = await this.getClient();
|
4721
4787
|
// TODO: [☂] Use here more modelRequirements
|
4722
|
-
if (
|
4788
|
+
if (currentModelRequirements.modelVariant !== 'CHAT') {
|
4723
4789
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
4724
4790
|
}
|
4725
|
-
const modelName =
|
4791
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
|
4726
4792
|
const modelSettings = {
|
4727
4793
|
model: modelName,
|
4728
|
-
max_tokens:
|
4729
|
-
temperature:
|
4794
|
+
max_tokens: currentModelRequirements.maxTokens,
|
4795
|
+
temperature: currentModelRequirements.temperature,
|
4730
4796
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4731
4797
|
// <- Note: [🧆]
|
4732
4798
|
}; // <- TODO: [💩] Guard here types better
|
@@ -4741,12 +4807,12 @@
|
|
4741
4807
|
const rawRequest = {
|
4742
4808
|
...modelSettings,
|
4743
4809
|
messages: [
|
4744
|
-
...(
|
4810
|
+
...(currentModelRequirements.systemMessage === undefined
|
4745
4811
|
? []
|
4746
4812
|
: [
|
4747
4813
|
{
|
4748
4814
|
role: 'system',
|
4749
|
-
content:
|
4815
|
+
content: currentModelRequirements.systemMessage,
|
4750
4816
|
},
|
4751
4817
|
]),
|
4752
4818
|
{
|
@@ -4760,69 +4826,110 @@
|
|
4760
4826
|
if (this.options.isVerbose) {
|
4761
4827
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4762
4828
|
}
|
4763
|
-
|
4764
|
-
|
4765
|
-
|
4766
|
-
|
4829
|
+
try {
|
4830
|
+
const rawResponse = await this.limiter
|
4831
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
|
4832
|
+
.catch((error) => {
|
4833
|
+
assertsError(error);
|
4834
|
+
if (this.options.isVerbose) {
|
4835
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
4836
|
+
}
|
4837
|
+
throw error;
|
4838
|
+
});
|
4767
4839
|
if (this.options.isVerbose) {
|
4768
|
-
console.info(colors__default["default"].
|
4840
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
4769
4841
|
}
|
4770
|
-
|
4771
|
-
|
4772
|
-
|
4773
|
-
|
4774
|
-
|
4775
|
-
|
4776
|
-
|
4777
|
-
|
4778
|
-
|
4779
|
-
|
4780
|
-
|
4781
|
-
|
4842
|
+
const complete = $getCurrentDate();
|
4843
|
+
if (!rawResponse.choices[0]) {
|
4844
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
4845
|
+
}
|
4846
|
+
if (rawResponse.choices.length > 1) {
|
4847
|
+
// TODO: This should be maybe only warning
|
4848
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
4849
|
+
}
|
4850
|
+
const resultContent = rawResponse.choices[0].message.content;
|
4851
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4852
|
+
if (resultContent === null) {
|
4853
|
+
throw new PipelineExecutionError(`No response message from ${this.title}`);
|
4854
|
+
}
|
4855
|
+
return exportJson({
|
4856
|
+
name: 'promptResult',
|
4857
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
4858
|
+
order: [],
|
4859
|
+
value: {
|
4860
|
+
content: resultContent,
|
4861
|
+
modelName: rawResponse.model || modelName,
|
4862
|
+
timing: {
|
4863
|
+
start,
|
4864
|
+
complete,
|
4865
|
+
},
|
4866
|
+
usage,
|
4867
|
+
rawPromptContent,
|
4868
|
+
rawRequest,
|
4869
|
+
rawResponse,
|
4870
|
+
// <- [🗯]
|
4871
|
+
},
|
4872
|
+
});
|
4782
4873
|
}
|
4783
|
-
|
4784
|
-
|
4785
|
-
|
4786
|
-
|
4874
|
+
catch (error) {
|
4875
|
+
assertsError(error);
|
4876
|
+
// Check if this is an unsupported parameter error
|
4877
|
+
if (!isUnsupportedParameterError(error)) {
|
4878
|
+
throw error;
|
4879
|
+
}
|
4880
|
+
// Parse which parameter is unsupported
|
4881
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
4882
|
+
if (!unsupportedParameter) {
|
4883
|
+
if (this.options.isVerbose) {
|
4884
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
4885
|
+
}
|
4886
|
+
throw error;
|
4887
|
+
}
|
4888
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
4889
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
4890
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
4891
|
+
// Already retried this parameter, throw the error
|
4892
|
+
if (this.options.isVerbose) {
|
4893
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
4894
|
+
}
|
4895
|
+
throw error;
|
4896
|
+
}
|
4897
|
+
// Mark this parameter as retried
|
4898
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
4899
|
+
// Log warning in verbose mode
|
4900
|
+
if (this.options.isVerbose) {
|
4901
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
4902
|
+
}
|
4903
|
+
// Remove the unsupported parameter and retry
|
4904
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
4905
|
+
return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
|
4787
4906
|
}
|
4788
|
-
return exportJson({
|
4789
|
-
name: 'promptResult',
|
4790
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
4791
|
-
order: [],
|
4792
|
-
value: {
|
4793
|
-
content: resultContent,
|
4794
|
-
modelName: rawResponse.model || modelName,
|
4795
|
-
timing: {
|
4796
|
-
start,
|
4797
|
-
complete,
|
4798
|
-
},
|
4799
|
-
usage,
|
4800
|
-
rawPromptContent,
|
4801
|
-
rawRequest,
|
4802
|
-
rawResponse,
|
4803
|
-
// <- [🗯]
|
4804
|
-
},
|
4805
|
-
});
|
4806
4907
|
}
|
4807
4908
|
/**
|
4808
4909
|
* Calls OpenAI API to use a complete model.
|
4809
4910
|
*/
|
4810
4911
|
async callCompletionModel(prompt) {
|
4912
|
+
return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
|
4913
|
+
}
|
4914
|
+
/**
|
4915
|
+
* Internal method that handles parameter retry for completion model calls
|
4916
|
+
*/
|
4917
|
+
async callCompletionModelWithRetry(prompt, currentModelRequirements) {
|
4811
4918
|
var _a;
|
4812
4919
|
if (this.options.isVerbose) {
|
4813
|
-
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
|
4920
|
+
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
|
4814
4921
|
}
|
4815
|
-
const { content, parameters
|
4922
|
+
const { content, parameters } = prompt;
|
4816
4923
|
const client = await this.getClient();
|
4817
4924
|
// TODO: [☂] Use here more modelRequirements
|
4818
|
-
if (
|
4925
|
+
if (currentModelRequirements.modelVariant !== 'COMPLETION') {
|
4819
4926
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
4820
4927
|
}
|
4821
|
-
const modelName =
|
4928
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
4822
4929
|
const modelSettings = {
|
4823
4930
|
model: modelName,
|
4824
|
-
max_tokens:
|
4825
|
-
temperature:
|
4931
|
+
max_tokens: currentModelRequirements.maxTokens,
|
4932
|
+
temperature: currentModelRequirements.temperature,
|
4826
4933
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4827
4934
|
// <- Note: [🧆]
|
4828
4935
|
};
|
@@ -4836,46 +4943,81 @@
|
|
4836
4943
|
if (this.options.isVerbose) {
|
4837
4944
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4838
4945
|
}
|
4839
|
-
|
4840
|
-
|
4841
|
-
|
4842
|
-
|
4946
|
+
try {
|
4947
|
+
const rawResponse = await this.limiter
|
4948
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
|
4949
|
+
.catch((error) => {
|
4950
|
+
assertsError(error);
|
4951
|
+
if (this.options.isVerbose) {
|
4952
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
4953
|
+
}
|
4954
|
+
throw error;
|
4955
|
+
});
|
4843
4956
|
if (this.options.isVerbose) {
|
4844
|
-
console.info(colors__default["default"].
|
4957
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
4845
4958
|
}
|
4846
|
-
|
4847
|
-
|
4848
|
-
|
4849
|
-
|
4850
|
-
|
4851
|
-
|
4852
|
-
|
4853
|
-
|
4959
|
+
const complete = $getCurrentDate();
|
4960
|
+
if (!rawResponse.choices[0]) {
|
4961
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
4962
|
+
}
|
4963
|
+
if (rawResponse.choices.length > 1) {
|
4964
|
+
// TODO: This should be maybe only warning
|
4965
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
4966
|
+
}
|
4967
|
+
const resultContent = rawResponse.choices[0].text;
|
4968
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4969
|
+
return exportJson({
|
4970
|
+
name: 'promptResult',
|
4971
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
4972
|
+
order: [],
|
4973
|
+
value: {
|
4974
|
+
content: resultContent,
|
4975
|
+
modelName: rawResponse.model || modelName,
|
4976
|
+
timing: {
|
4977
|
+
start,
|
4978
|
+
complete,
|
4979
|
+
},
|
4980
|
+
usage,
|
4981
|
+
rawPromptContent,
|
4982
|
+
rawRequest,
|
4983
|
+
rawResponse,
|
4984
|
+
// <- [🗯]
|
4985
|
+
},
|
4986
|
+
});
|
4854
4987
|
}
|
4855
|
-
|
4856
|
-
|
4857
|
-
|
4988
|
+
catch (error) {
|
4989
|
+
assertsError(error);
|
4990
|
+
// Check if this is an unsupported parameter error
|
4991
|
+
if (!isUnsupportedParameterError(error)) {
|
4992
|
+
throw error;
|
4993
|
+
}
|
4994
|
+
// Parse which parameter is unsupported
|
4995
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
4996
|
+
if (!unsupportedParameter) {
|
4997
|
+
if (this.options.isVerbose) {
|
4998
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
4999
|
+
}
|
5000
|
+
throw error;
|
5001
|
+
}
|
5002
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
5003
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
5004
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
5005
|
+
// Already retried this parameter, throw the error
|
5006
|
+
if (this.options.isVerbose) {
|
5007
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
5008
|
+
}
|
5009
|
+
throw error;
|
5010
|
+
}
|
5011
|
+
// Mark this parameter as retried
|
5012
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
5013
|
+
// Log warning in verbose mode
|
5014
|
+
if (this.options.isVerbose) {
|
5015
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
5016
|
+
}
|
5017
|
+
// Remove the unsupported parameter and retry
|
5018
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
5019
|
+
return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
|
4858
5020
|
}
|
4859
|
-
const resultContent = rawResponse.choices[0].text;
|
4860
|
-
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4861
|
-
return exportJson({
|
4862
|
-
name: 'promptResult',
|
4863
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
4864
|
-
order: [],
|
4865
|
-
value: {
|
4866
|
-
content: resultContent,
|
4867
|
-
modelName: rawResponse.model || modelName,
|
4868
|
-
timing: {
|
4869
|
-
start,
|
4870
|
-
complete,
|
4871
|
-
},
|
4872
|
-
usage,
|
4873
|
-
rawPromptContent,
|
4874
|
-
rawRequest,
|
4875
|
-
rawResponse,
|
4876
|
-
// <- [🗯]
|
4877
|
-
},
|
4878
|
-
});
|
4879
5021
|
}
|
4880
5022
|
/**
|
4881
5023
|
* Calls OpenAI compatible API to use a embedding model
|
@@ -4901,7 +5043,7 @@
|
|
4901
5043
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4902
5044
|
}
|
4903
5045
|
const rawResponse = await this.limiter
|
4904
|
-
.schedule(() => this.
|
5046
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
|
4905
5047
|
.catch((error) => {
|
4906
5048
|
assertsError(error);
|
4907
5049
|
if (this.options.isVerbose) {
|
@@ -4963,7 +5105,7 @@
|
|
4963
5105
|
/**
|
4964
5106
|
* Makes a request with retry logic for network errors like ECONNRESET
|
4965
5107
|
*/
|
4966
|
-
async
|
5108
|
+
async makeRequestWithNetworkRetry(requestFn) {
|
4967
5109
|
let lastError;
|
4968
5110
|
for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
|
4969
5111
|
try {
|
@@ -4975,8 +5117,8 @@
|
|
4975
5117
|
// Check if this is a retryable network error
|
4976
5118
|
const isRetryableError = this.isRetryableNetworkError(error);
|
4977
5119
|
if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
|
4978
|
-
if (this.options.isVerbose) {
|
4979
|
-
console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
5120
|
+
if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
|
5121
|
+
console.info(colors__default["default"].bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
4980
5122
|
}
|
4981
5123
|
throw error;
|
4982
5124
|
}
|
@@ -4986,7 +5128,7 @@
|
|
4986
5128
|
const jitterDelay = Math.random() * 500; // Add some randomness
|
4987
5129
|
const totalDelay = backoffDelay + jitterDelay;
|
4988
5130
|
if (this.options.isVerbose) {
|
4989
|
-
console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
5131
|
+
console.info(colors__default["default"].bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
4990
5132
|
}
|
4991
5133
|
// Wait before retrying
|
4992
5134
|
await new Promise((resolve) => setTimeout(resolve, totalDelay));
|
@@ -6367,11 +6509,12 @@
|
|
6367
6509
|
catch (error) {
|
6368
6510
|
// Note: If we can't create cache directory, continue without it
|
6369
6511
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
6370
|
-
if (error instanceof Error &&
|
6371
|
-
error.message.includes('
|
6372
|
-
|
6373
|
-
|
6374
|
-
|
6512
|
+
if (error instanceof Error &&
|
6513
|
+
(error.message.includes('EROFS') ||
|
6514
|
+
error.message.includes('read-only') ||
|
6515
|
+
error.message.includes('EACCES') ||
|
6516
|
+
error.message.includes('EPERM') ||
|
6517
|
+
error.message.includes('ENOENT'))) ;
|
6375
6518
|
else {
|
6376
6519
|
// Re-throw other unexpected errors
|
6377
6520
|
throw error;
|
@@ -7713,6 +7856,25 @@
|
|
7713
7856
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
7714
7857
|
*/
|
7715
7858
|
|
7859
|
+
/**
|
7860
|
+
* Takes an item or an array of items and returns an array of items
|
7861
|
+
*
|
7862
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
7863
|
+
* 2) Undefined returns empty array
|
7864
|
+
* 3) Array returns itself
|
7865
|
+
*
|
7866
|
+
* @private internal utility
|
7867
|
+
*/
|
7868
|
+
function arrayableToArray(input) {
|
7869
|
+
if (input === undefined) {
|
7870
|
+
return [];
|
7871
|
+
}
|
7872
|
+
if (input instanceof Array) {
|
7873
|
+
return input;
|
7874
|
+
}
|
7875
|
+
return [input];
|
7876
|
+
}
|
7877
|
+
|
7716
7878
|
/**
|
7717
7879
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
7718
7880
|
*
|
@@ -7723,12 +7885,10 @@
|
|
7723
7885
|
/**
|
7724
7886
|
* Gets array of execution tools in order of priority
|
7725
7887
|
*/
|
7726
|
-
constructor(...llmExecutionTools) {
|
7888
|
+
constructor(title, ...llmExecutionTools) {
|
7889
|
+
this.title = title;
|
7727
7890
|
this.llmExecutionTools = llmExecutionTools;
|
7728
7891
|
}
|
7729
|
-
get title() {
|
7730
|
-
return 'Multiple LLM Providers';
|
7731
|
-
}
|
7732
7892
|
get description() {
|
7733
7893
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
7734
7894
|
.map(({ title, description }, index) => {
|
@@ -7814,7 +7974,7 @@
|
|
7814
7974
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
7815
7975
|
// <- case [🤖]:
|
7816
7976
|
default:
|
7817
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
7977
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
7818
7978
|
}
|
7819
7979
|
}
|
7820
7980
|
catch (error) {
|
@@ -7835,7 +7995,7 @@
|
|
7835
7995
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
7836
7996
|
// 3) ...
|
7837
7997
|
spaceTrim__default["default"]((block) => `
|
7838
|
-
All execution tools failed:
|
7998
|
+
All execution tools of ${this.title} failed:
|
7839
7999
|
|
7840
8000
|
${block(errors
|
7841
8001
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
@@ -7844,11 +8004,11 @@
|
|
7844
8004
|
`));
|
7845
8005
|
}
|
7846
8006
|
else if (this.llmExecutionTools.length === 0) {
|
7847
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
8007
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
7848
8008
|
}
|
7849
8009
|
else {
|
7850
8010
|
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
7851
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
8011
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
7852
8012
|
|
7853
8013
|
Available \`LlmExecutionTools\`:
|
7854
8014
|
${block(this.description)}
|
@@ -7878,7 +8038,7 @@
|
|
7878
8038
|
*
|
7879
8039
|
* @public exported from `@promptbook/core`
|
7880
8040
|
*/
|
7881
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
8041
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
7882
8042
|
if (llmExecutionTools.length === 0) {
|
7883
8043
|
const warningMessage = spaceTrim__default["default"](`
|
7884
8044
|
You have not provided any \`LlmExecutionTools\`
|
@@ -7910,30 +8070,27 @@
|
|
7910
8070
|
};
|
7911
8071
|
*/
|
7912
8072
|
}
|
7913
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
8073
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
7914
8074
|
}
|
7915
8075
|
/**
|
7916
8076
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
7917
8077
|
*/
|
7918
8078
|
|
7919
8079
|
/**
|
7920
|
-
*
|
7921
|
-
*
|
7922
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
7923
|
-
* 2) Undefined returns empty array
|
7924
|
-
* 3) Array returns itself
|
8080
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
7925
8081
|
*
|
7926
|
-
* @
|
8082
|
+
* @public exported from `@promptbook/core`
|
7927
8083
|
*/
|
7928
|
-
function
|
7929
|
-
|
7930
|
-
|
7931
|
-
|
7932
|
-
|
7933
|
-
|
7934
|
-
}
|
7935
|
-
return [input];
|
8084
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
8085
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
8086
|
+
const llmTools = _llms.length === 1
|
8087
|
+
? _llms[0]
|
8088
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
8089
|
+
return llmTools;
|
7936
8090
|
}
|
8091
|
+
/**
|
8092
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
8093
|
+
*/
|
7937
8094
|
|
7938
8095
|
/**
|
7939
8096
|
* Prepares the persona for the pipeline
|
@@ -7952,8 +8109,7 @@
|
|
7952
8109
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
7953
8110
|
tools,
|
7954
8111
|
});
|
7955
|
-
const
|
7956
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8112
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
7957
8113
|
const availableModels = (await llmTools.listModels())
|
7958
8114
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
7959
8115
|
.map(({ modelName, modelDescription }) => ({
|
@@ -7997,6 +8153,7 @@
|
|
7997
8153
|
};
|
7998
8154
|
}
|
7999
8155
|
/**
|
8156
|
+
* TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
|
8000
8157
|
* TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
|
8001
8158
|
* TODO: [🏢] Check validity of `modelName` in pipeline
|
8002
8159
|
* TODO: [🏢] Check validity of `systemMessage` in pipeline
|
@@ -8569,9 +8726,7 @@
|
|
8569
8726
|
if (tools === undefined || tools.llm === undefined) {
|
8570
8727
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
8571
8728
|
}
|
8572
|
-
|
8573
|
-
const _llms = arrayableToArray(tools.llm);
|
8574
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8729
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
8575
8730
|
const llmToolsWithUsage = countUsage(llmTools);
|
8576
8731
|
// <- TODO: [🌯]
|
8577
8732
|
/*
|
@@ -9441,9 +9596,7 @@
|
|
9441
9596
|
$scriptPipelineExecutionErrors: [],
|
9442
9597
|
$failedResults: [], // Track all failed attempts
|
9443
9598
|
};
|
9444
|
-
|
9445
|
-
const _llms = arrayableToArray(tools.llm);
|
9446
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
9599
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
9447
9600
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
9448
9601
|
const isJokerAttempt = attemptIndex < 0;
|
9449
9602
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
@@ -9963,9 +10116,7 @@
|
|
9963
10116
|
return ''; // <- Note: Np knowledge present, return empty string
|
9964
10117
|
}
|
9965
10118
|
try {
|
9966
|
-
|
9967
|
-
const _llms = arrayableToArray(tools.llm);
|
9968
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10119
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
9969
10120
|
const taskEmbeddingPrompt = {
|
9970
10121
|
title: 'Knowledge Search',
|
9971
10122
|
modelRequirements: {
|
@@ -10566,13 +10717,13 @@
|
|
10566
10717
|
// Calculate and update tldr based on pipeline progress
|
10567
10718
|
const cv = newOngoingResult;
|
10568
10719
|
// Calculate progress based on parameters resolved vs total parameters
|
10569
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
10720
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
10570
10721
|
let resolvedParameters = 0;
|
10571
10722
|
let currentTaskTitle = '';
|
10572
10723
|
// Get the resolved parameters from output parameters
|
10573
10724
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
10574
10725
|
// Count how many output parameters have non-empty values
|
10575
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
10726
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
10576
10727
|
}
|
10577
10728
|
// Try to determine current task from execution report
|
10578
10729
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
@@ -10682,9 +10833,7 @@
|
|
10682
10833
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
10683
10834
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
10684
10835
|
}
|
10685
|
-
|
10686
|
-
const _llms = arrayableToArray(llm);
|
10687
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10836
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
10688
10837
|
// TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
|
10689
10838
|
const collection = createCollectionFromJson(...PipelineCollection);
|
10690
10839
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|
@@ -11461,11 +11610,12 @@
|
|
11461
11610
|
catch (error) {
|
11462
11611
|
// Note: If we can't write to cache, we'll continue without caching
|
11463
11612
|
// This handles read-only filesystems like Vercel
|
11464
|
-
if (error instanceof Error &&
|
11465
|
-
error.message.includes('
|
11466
|
-
|
11467
|
-
|
11468
|
-
|
11613
|
+
if (error instanceof Error &&
|
11614
|
+
(error.message.includes('EROFS') ||
|
11615
|
+
error.message.includes('read-only') ||
|
11616
|
+
error.message.includes('EACCES') ||
|
11617
|
+
error.message.includes('EPERM') ||
|
11618
|
+
error.message.includes('ENOENT'))) ;
|
11469
11619
|
else {
|
11470
11620
|
// Re-throw other unexpected errors
|
11471
11621
|
throw error;
|
@@ -11765,11 +11915,12 @@
|
|
11765
11915
|
catch (error) {
|
11766
11916
|
// Note: If we can't write to cache, we'll continue without caching
|
11767
11917
|
// This handles read-only filesystems like Vercel
|
11768
|
-
if (error instanceof Error &&
|
11769
|
-
error.message.includes('
|
11770
|
-
|
11771
|
-
|
11772
|
-
|
11918
|
+
if (error instanceof Error &&
|
11919
|
+
(error.message.includes('EROFS') ||
|
11920
|
+
error.message.includes('read-only') ||
|
11921
|
+
error.message.includes('EACCES') ||
|
11922
|
+
error.message.includes('EPERM') ||
|
11923
|
+
error.message.includes('ENOENT'))) ;
|
11773
11924
|
else {
|
11774
11925
|
// Re-throw other unexpected errors
|
11775
11926
|
throw error;
|
@@ -12523,11 +12674,12 @@
|
|
12523
12674
|
catch (error) {
|
12524
12675
|
// Note: If we can't write to cache, silently ignore the error
|
12525
12676
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
12526
|
-
if (error instanceof Error &&
|
12527
|
-
error.message.includes('
|
12528
|
-
|
12529
|
-
|
12530
|
-
|
12677
|
+
if (error instanceof Error &&
|
12678
|
+
(error.message.includes('EROFS') ||
|
12679
|
+
error.message.includes('read-only') ||
|
12680
|
+
error.message.includes('EACCES') ||
|
12681
|
+
error.message.includes('EPERM') ||
|
12682
|
+
error.message.includes('ENOENT'))) {
|
12531
12683
|
// Silently ignore filesystem errors - caching is optional
|
12532
12684
|
return;
|
12533
12685
|
}
|
@@ -12820,7 +12972,7 @@
|
|
12820
12972
|
* @public exported from `@promptbook/core`
|
12821
12973
|
*/
|
12822
12974
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
12823
|
-
const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12975
|
+
const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12824
12976
|
const llmTools = configuration.map((llmConfiguration) => {
|
12825
12977
|
const registeredItem = $llmToolsRegister
|
12826
12978
|
.list()
|
@@ -12852,7 +13004,7 @@
|
|
12852
13004
|
...llmConfiguration.options,
|
12853
13005
|
});
|
12854
13006
|
});
|
12855
|
-
return joinLlmExecutionTools(...llmTools);
|
13007
|
+
return joinLlmExecutionTools(title, ...llmTools);
|
12856
13008
|
}
|
12857
13009
|
/**
|
12858
13010
|
* TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
@@ -12969,7 +13121,9 @@
|
|
12969
13121
|
});
|
12970
13122
|
}
|
12971
13123
|
else if (strategy === 'BRING_YOUR_OWN_KEYS') {
|
12972
|
-
llmExecutionTools = await $provideLlmToolsFromEnv(
|
13124
|
+
llmExecutionTools = await $provideLlmToolsFromEnv({
|
13125
|
+
title: 'LLM Tools for wizard or CLI with BYOK strategy',
|
13126
|
+
});
|
12973
13127
|
}
|
12974
13128
|
else {
|
12975
13129
|
throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
|
@@ -17002,7 +17156,7 @@
|
|
17002
17156
|
throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
|
17003
17157
|
}
|
17004
17158
|
const fs = $provideFilesystemForNode();
|
17005
|
-
const llm = await $provideLlmToolsFromEnv(options);
|
17159
|
+
const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
|
17006
17160
|
const executables = await $provideExecutablesForNode();
|
17007
17161
|
const tools = {
|
17008
17162
|
llm,
|
@@ -17411,11 +17565,12 @@
|
|
17411
17565
|
catch (error) {
|
17412
17566
|
// Note: Ignore filesystem errors (like EROFS on read-only systems like Vercel)
|
17413
17567
|
// The compiled book can still be used even if it can't be cached
|
17414
|
-
if (error instanceof Error &&
|
17415
|
-
error.message.includes('
|
17416
|
-
|
17417
|
-
|
17418
|
-
|
17568
|
+
if (error instanceof Error &&
|
17569
|
+
(error.message.includes('EROFS') ||
|
17570
|
+
error.message.includes('read-only') ||
|
17571
|
+
error.message.includes('EACCES') ||
|
17572
|
+
error.message.includes('EPERM') ||
|
17573
|
+
error.message.includes('ENOENT'))) ;
|
17419
17574
|
else {
|
17420
17575
|
// Re-throw other unexpected errors
|
17421
17576
|
throw error;
|