@promptbook/wizard 0.101.0-14 → 0.101.0-16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +279 -132
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +2 -0
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +3 -25
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -2
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +14 -2
- package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +5 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +279 -132
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
36
36
|
* @generated
|
37
37
|
* @see https://github.com/webgptorg/promptbook
|
38
38
|
*/
|
39
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
39
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-16';
|
40
40
|
/**
|
41
41
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
42
42
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -2814,7 +2814,7 @@ const OPENAI_MODELS = exportJson({
|
|
2814
2814
|
modelVariant: 'CHAT',
|
2815
2815
|
modelTitle: 'gpt-5-mini',
|
2816
2816
|
modelName: 'gpt-5-mini',
|
2817
|
-
modelDescription:
|
2817
|
+
modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
|
2818
2818
|
pricing: {
|
2819
2819
|
prompt: pricing(`$0.25 / 1M tokens`),
|
2820
2820
|
output: pricing(`$2.00 / 1M tokens`),
|
@@ -2826,7 +2826,7 @@ const OPENAI_MODELS = exportJson({
|
|
2826
2826
|
modelVariant: 'CHAT',
|
2827
2827
|
modelTitle: 'gpt-5-nano',
|
2828
2828
|
modelName: 'gpt-5-nano',
|
2829
|
-
modelDescription:
|
2829
|
+
modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
|
2830
2830
|
pricing: {
|
2831
2831
|
prompt: pricing(`$0.05 / 1M tokens`),
|
2832
2832
|
output: pricing(`$0.40 / 1M tokens`),
|
@@ -2838,7 +2838,7 @@ const OPENAI_MODELS = exportJson({
|
|
2838
2838
|
modelVariant: 'CHAT',
|
2839
2839
|
modelTitle: 'gpt-4.1',
|
2840
2840
|
modelName: 'gpt-4.1',
|
2841
|
-
modelDescription:
|
2841
|
+
modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
|
2842
2842
|
pricing: {
|
2843
2843
|
prompt: pricing(`$3.00 / 1M tokens`),
|
2844
2844
|
output: pricing(`$12.00 / 1M tokens`),
|
@@ -2850,7 +2850,7 @@ const OPENAI_MODELS = exportJson({
|
|
2850
2850
|
modelVariant: 'CHAT',
|
2851
2851
|
modelTitle: 'gpt-4.1-mini',
|
2852
2852
|
modelName: 'gpt-4.1-mini',
|
2853
|
-
modelDescription:
|
2853
|
+
modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
|
2854
2854
|
pricing: {
|
2855
2855
|
prompt: pricing(`$0.80 / 1M tokens`),
|
2856
2856
|
output: pricing(`$3.20 / 1M tokens`),
|
@@ -2862,7 +2862,7 @@ const OPENAI_MODELS = exportJson({
|
|
2862
2862
|
modelVariant: 'CHAT',
|
2863
2863
|
modelTitle: 'gpt-4.1-nano',
|
2864
2864
|
modelName: 'gpt-4.1-nano',
|
2865
|
-
modelDescription:
|
2865
|
+
modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
|
2866
2866
|
pricing: {
|
2867
2867
|
prompt: pricing(`$0.20 / 1M tokens`),
|
2868
2868
|
output: pricing(`$0.80 / 1M tokens`),
|
@@ -2874,7 +2874,7 @@ const OPENAI_MODELS = exportJson({
|
|
2874
2874
|
modelVariant: 'CHAT',
|
2875
2875
|
modelTitle: 'o3',
|
2876
2876
|
modelName: 'o3',
|
2877
|
-
modelDescription:
|
2877
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
|
2878
2878
|
pricing: {
|
2879
2879
|
prompt: pricing(`$15.00 / 1M tokens`),
|
2880
2880
|
output: pricing(`$60.00 / 1M tokens`),
|
@@ -2886,7 +2886,7 @@ const OPENAI_MODELS = exportJson({
|
|
2886
2886
|
modelVariant: 'CHAT',
|
2887
2887
|
modelTitle: 'o3-pro',
|
2888
2888
|
modelName: 'o3-pro',
|
2889
|
-
modelDescription:
|
2889
|
+
modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
|
2890
2890
|
pricing: {
|
2891
2891
|
prompt: pricing(`$30.00 / 1M tokens`),
|
2892
2892
|
output: pricing(`$120.00 / 1M tokens`),
|
@@ -2898,7 +2898,7 @@ const OPENAI_MODELS = exportJson({
|
|
2898
2898
|
modelVariant: 'CHAT',
|
2899
2899
|
modelTitle: 'o4-mini',
|
2900
2900
|
modelName: 'o4-mini',
|
2901
|
-
modelDescription:
|
2901
|
+
modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
|
2902
2902
|
pricing: {
|
2903
2903
|
prompt: pricing(`$4.00 / 1M tokens`),
|
2904
2904
|
output: pricing(`$16.00 / 1M tokens`),
|
@@ -2910,7 +2910,7 @@ const OPENAI_MODELS = exportJson({
|
|
2910
2910
|
modelVariant: 'CHAT',
|
2911
2911
|
modelTitle: 'o3-deep-research',
|
2912
2912
|
modelName: 'o3-deep-research',
|
2913
|
-
modelDescription:
|
2913
|
+
modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
|
2914
2914
|
pricing: {
|
2915
2915
|
prompt: pricing(`$25.00 / 1M tokens`),
|
2916
2916
|
output: pricing(`$100.00 / 1M tokens`),
|
@@ -2922,7 +2922,7 @@ const OPENAI_MODELS = exportJson({
|
|
2922
2922
|
modelVariant: 'CHAT',
|
2923
2923
|
modelTitle: 'o4-mini-deep-research',
|
2924
2924
|
modelName: 'o4-mini-deep-research',
|
2925
|
-
modelDescription:
|
2925
|
+
modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
|
2926
2926
|
pricing: {
|
2927
2927
|
prompt: pricing(`$12.00 / 1M tokens`),
|
2928
2928
|
output: pricing(`$48.00 / 1M tokens`),
|
@@ -4622,6 +4622,62 @@ resultContent, rawResponse) {
|
|
4622
4622
|
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
4623
4623
|
*/
|
4624
4624
|
|
4625
|
+
/**
|
4626
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
4627
|
+
*
|
4628
|
+
* @param errorMessage The error message from OpenAI API
|
4629
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
4630
|
+
* @private utility of LLM Tools
|
4631
|
+
*/
|
4632
|
+
function parseUnsupportedParameterError(errorMessage) {
|
4633
|
+
// Pattern to match "Unsupported value: 'parameter' does not support ..."
|
4634
|
+
const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
|
4635
|
+
if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
|
4636
|
+
return unsupportedValueMatch[1];
|
4637
|
+
}
|
4638
|
+
// Pattern to match "'parameter' of type ... is not supported with this model"
|
4639
|
+
const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
|
4640
|
+
if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
|
4641
|
+
return parameterTypeMatch[1];
|
4642
|
+
}
|
4643
|
+
return null;
|
4644
|
+
}
|
4645
|
+
/**
|
4646
|
+
* Creates a copy of model requirements with the specified parameter removed
|
4647
|
+
*
|
4648
|
+
* @param modelRequirements Original model requirements
|
4649
|
+
* @param unsupportedParameter The parameter to remove
|
4650
|
+
* @returns New model requirements without the unsupported parameter
|
4651
|
+
* @private utility of LLM Tools
|
4652
|
+
*/
|
4653
|
+
function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
|
4654
|
+
const newRequirements = { ...modelRequirements };
|
4655
|
+
// Map of parameter names that might appear in error messages to ModelRequirements properties
|
4656
|
+
const parameterMap = {
|
4657
|
+
temperature: 'temperature',
|
4658
|
+
max_tokens: 'maxTokens',
|
4659
|
+
maxTokens: 'maxTokens',
|
4660
|
+
seed: 'seed',
|
4661
|
+
};
|
4662
|
+
const propertyToRemove = parameterMap[unsupportedParameter];
|
4663
|
+
if (propertyToRemove && propertyToRemove in newRequirements) {
|
4664
|
+
delete newRequirements[propertyToRemove];
|
4665
|
+
}
|
4666
|
+
return newRequirements;
|
4667
|
+
}
|
4668
|
+
/**
|
4669
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
4670
|
+
* @param error The error to check
|
4671
|
+
* @returns true if this is an unsupported parameter error
|
4672
|
+
* @private utility of LLM Tools
|
4673
|
+
*/
|
4674
|
+
function isUnsupportedParameterError(error) {
|
4675
|
+
const errorMessage = error.message.toLowerCase();
|
4676
|
+
return (errorMessage.includes('unsupported value:') ||
|
4677
|
+
errorMessage.includes('is not supported with this model') ||
|
4678
|
+
errorMessage.includes('does not support'));
|
4679
|
+
}
|
4680
|
+
|
4625
4681
|
/**
|
4626
4682
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
4627
4683
|
*
|
@@ -4639,6 +4695,10 @@ class OpenAiCompatibleExecutionTools {
|
|
4639
4695
|
* OpenAI API client.
|
4640
4696
|
*/
|
4641
4697
|
this.client = null;
|
4698
|
+
/**
|
4699
|
+
* Tracks models and parameters that have already been retried to prevent infinite loops
|
4700
|
+
*/
|
4701
|
+
this.retriedUnsupportedParameters = new Set();
|
4642
4702
|
// TODO: Allow configuring rate limits via options
|
4643
4703
|
this.limiter = new Bottleneck({
|
4644
4704
|
minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
|
@@ -4700,21 +4760,27 @@ class OpenAiCompatibleExecutionTools {
|
|
4700
4760
|
* Calls OpenAI compatible API to use a chat model.
|
4701
4761
|
*/
|
4702
4762
|
async callChatModel(prompt) {
|
4763
|
+
return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
|
4764
|
+
}
|
4765
|
+
/**
|
4766
|
+
* Internal method that handles parameter retry for chat model calls
|
4767
|
+
*/
|
4768
|
+
async callChatModelWithRetry(prompt, currentModelRequirements) {
|
4703
4769
|
var _a;
|
4704
4770
|
if (this.options.isVerbose) {
|
4705
|
-
console.info(`💬 ${this.title} callChatModel call`, { prompt });
|
4771
|
+
console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
|
4706
4772
|
}
|
4707
|
-
const { content, parameters,
|
4773
|
+
const { content, parameters, format } = prompt;
|
4708
4774
|
const client = await this.getClient();
|
4709
4775
|
// TODO: [☂] Use here more modelRequirements
|
4710
|
-
if (
|
4776
|
+
if (currentModelRequirements.modelVariant !== 'CHAT') {
|
4711
4777
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
4712
4778
|
}
|
4713
|
-
const modelName =
|
4779
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
|
4714
4780
|
const modelSettings = {
|
4715
4781
|
model: modelName,
|
4716
|
-
max_tokens:
|
4717
|
-
temperature:
|
4782
|
+
max_tokens: currentModelRequirements.maxTokens,
|
4783
|
+
temperature: currentModelRequirements.temperature,
|
4718
4784
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4719
4785
|
// <- Note: [🧆]
|
4720
4786
|
}; // <- TODO: [💩] Guard here types better
|
@@ -4729,12 +4795,12 @@ class OpenAiCompatibleExecutionTools {
|
|
4729
4795
|
const rawRequest = {
|
4730
4796
|
...modelSettings,
|
4731
4797
|
messages: [
|
4732
|
-
...(
|
4798
|
+
...(currentModelRequirements.systemMessage === undefined
|
4733
4799
|
? []
|
4734
4800
|
: [
|
4735
4801
|
{
|
4736
4802
|
role: 'system',
|
4737
|
-
content:
|
4803
|
+
content: currentModelRequirements.systemMessage,
|
4738
4804
|
},
|
4739
4805
|
]),
|
4740
4806
|
{
|
@@ -4748,69 +4814,110 @@ class OpenAiCompatibleExecutionTools {
|
|
4748
4814
|
if (this.options.isVerbose) {
|
4749
4815
|
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4750
4816
|
}
|
4751
|
-
|
4752
|
-
|
4753
|
-
|
4754
|
-
|
4817
|
+
try {
|
4818
|
+
const rawResponse = await this.limiter
|
4819
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
|
4820
|
+
.catch((error) => {
|
4821
|
+
assertsError(error);
|
4822
|
+
if (this.options.isVerbose) {
|
4823
|
+
console.info(colors.bgRed('error'), error);
|
4824
|
+
}
|
4825
|
+
throw error;
|
4826
|
+
});
|
4755
4827
|
if (this.options.isVerbose) {
|
4756
|
-
console.info(colors.
|
4828
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
4757
4829
|
}
|
4758
|
-
|
4759
|
-
|
4760
|
-
|
4761
|
-
|
4762
|
-
|
4763
|
-
|
4764
|
-
|
4765
|
-
|
4766
|
-
|
4767
|
-
|
4768
|
-
|
4769
|
-
|
4830
|
+
const complete = $getCurrentDate();
|
4831
|
+
if (!rawResponse.choices[0]) {
|
4832
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
4833
|
+
}
|
4834
|
+
if (rawResponse.choices.length > 1) {
|
4835
|
+
// TODO: This should be maybe only warning
|
4836
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
4837
|
+
}
|
4838
|
+
const resultContent = rawResponse.choices[0].message.content;
|
4839
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4840
|
+
if (resultContent === null) {
|
4841
|
+
throw new PipelineExecutionError(`No response message from ${this.title}`);
|
4842
|
+
}
|
4843
|
+
return exportJson({
|
4844
|
+
name: 'promptResult',
|
4845
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
4846
|
+
order: [],
|
4847
|
+
value: {
|
4848
|
+
content: resultContent,
|
4849
|
+
modelName: rawResponse.model || modelName,
|
4850
|
+
timing: {
|
4851
|
+
start,
|
4852
|
+
complete,
|
4853
|
+
},
|
4854
|
+
usage,
|
4855
|
+
rawPromptContent,
|
4856
|
+
rawRequest,
|
4857
|
+
rawResponse,
|
4858
|
+
// <- [🗯]
|
4859
|
+
},
|
4860
|
+
});
|
4770
4861
|
}
|
4771
|
-
|
4772
|
-
|
4773
|
-
|
4774
|
-
|
4862
|
+
catch (error) {
|
4863
|
+
assertsError(error);
|
4864
|
+
// Check if this is an unsupported parameter error
|
4865
|
+
if (!isUnsupportedParameterError(error)) {
|
4866
|
+
throw error;
|
4867
|
+
}
|
4868
|
+
// Parse which parameter is unsupported
|
4869
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
4870
|
+
if (!unsupportedParameter) {
|
4871
|
+
if (this.options.isVerbose) {
|
4872
|
+
console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
4873
|
+
}
|
4874
|
+
throw error;
|
4875
|
+
}
|
4876
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
4877
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
4878
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
4879
|
+
// Already retried this parameter, throw the error
|
4880
|
+
if (this.options.isVerbose) {
|
4881
|
+
console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
4882
|
+
}
|
4883
|
+
throw error;
|
4884
|
+
}
|
4885
|
+
// Mark this parameter as retried
|
4886
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
4887
|
+
// Log warning in verbose mode
|
4888
|
+
if (this.options.isVerbose) {
|
4889
|
+
console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
4890
|
+
}
|
4891
|
+
// Remove the unsupported parameter and retry
|
4892
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
4893
|
+
return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
|
4775
4894
|
}
|
4776
|
-
return exportJson({
|
4777
|
-
name: 'promptResult',
|
4778
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
4779
|
-
order: [],
|
4780
|
-
value: {
|
4781
|
-
content: resultContent,
|
4782
|
-
modelName: rawResponse.model || modelName,
|
4783
|
-
timing: {
|
4784
|
-
start,
|
4785
|
-
complete,
|
4786
|
-
},
|
4787
|
-
usage,
|
4788
|
-
rawPromptContent,
|
4789
|
-
rawRequest,
|
4790
|
-
rawResponse,
|
4791
|
-
// <- [🗯]
|
4792
|
-
},
|
4793
|
-
});
|
4794
4895
|
}
|
4795
4896
|
/**
|
4796
4897
|
* Calls OpenAI API to use a complete model.
|
4797
4898
|
*/
|
4798
4899
|
async callCompletionModel(prompt) {
|
4900
|
+
return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
|
4901
|
+
}
|
4902
|
+
/**
|
4903
|
+
* Internal method that handles parameter retry for completion model calls
|
4904
|
+
*/
|
4905
|
+
async callCompletionModelWithRetry(prompt, currentModelRequirements) {
|
4799
4906
|
var _a;
|
4800
4907
|
if (this.options.isVerbose) {
|
4801
|
-
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
|
4908
|
+
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
|
4802
4909
|
}
|
4803
|
-
const { content, parameters
|
4910
|
+
const { content, parameters } = prompt;
|
4804
4911
|
const client = await this.getClient();
|
4805
4912
|
// TODO: [☂] Use here more modelRequirements
|
4806
|
-
if (
|
4913
|
+
if (currentModelRequirements.modelVariant !== 'COMPLETION') {
|
4807
4914
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
4808
4915
|
}
|
4809
|
-
const modelName =
|
4916
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
4810
4917
|
const modelSettings = {
|
4811
4918
|
model: modelName,
|
4812
|
-
max_tokens:
|
4813
|
-
temperature:
|
4919
|
+
max_tokens: currentModelRequirements.maxTokens,
|
4920
|
+
temperature: currentModelRequirements.temperature,
|
4814
4921
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4815
4922
|
// <- Note: [🧆]
|
4816
4923
|
};
|
@@ -4824,46 +4931,81 @@ class OpenAiCompatibleExecutionTools {
|
|
4824
4931
|
if (this.options.isVerbose) {
|
4825
4932
|
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4826
4933
|
}
|
4827
|
-
|
4828
|
-
|
4829
|
-
|
4830
|
-
|
4934
|
+
try {
|
4935
|
+
const rawResponse = await this.limiter
|
4936
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
|
4937
|
+
.catch((error) => {
|
4938
|
+
assertsError(error);
|
4939
|
+
if (this.options.isVerbose) {
|
4940
|
+
console.info(colors.bgRed('error'), error);
|
4941
|
+
}
|
4942
|
+
throw error;
|
4943
|
+
});
|
4831
4944
|
if (this.options.isVerbose) {
|
4832
|
-
console.info(colors.
|
4945
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
4833
4946
|
}
|
4834
|
-
|
4835
|
-
|
4836
|
-
|
4837
|
-
|
4838
|
-
|
4839
|
-
|
4840
|
-
|
4841
|
-
|
4947
|
+
const complete = $getCurrentDate();
|
4948
|
+
if (!rawResponse.choices[0]) {
|
4949
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
4950
|
+
}
|
4951
|
+
if (rawResponse.choices.length > 1) {
|
4952
|
+
// TODO: This should be maybe only warning
|
4953
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
4954
|
+
}
|
4955
|
+
const resultContent = rawResponse.choices[0].text;
|
4956
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4957
|
+
return exportJson({
|
4958
|
+
name: 'promptResult',
|
4959
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
4960
|
+
order: [],
|
4961
|
+
value: {
|
4962
|
+
content: resultContent,
|
4963
|
+
modelName: rawResponse.model || modelName,
|
4964
|
+
timing: {
|
4965
|
+
start,
|
4966
|
+
complete,
|
4967
|
+
},
|
4968
|
+
usage,
|
4969
|
+
rawPromptContent,
|
4970
|
+
rawRequest,
|
4971
|
+
rawResponse,
|
4972
|
+
// <- [🗯]
|
4973
|
+
},
|
4974
|
+
});
|
4842
4975
|
}
|
4843
|
-
|
4844
|
-
|
4845
|
-
|
4976
|
+
catch (error) {
|
4977
|
+
assertsError(error);
|
4978
|
+
// Check if this is an unsupported parameter error
|
4979
|
+
if (!isUnsupportedParameterError(error)) {
|
4980
|
+
throw error;
|
4981
|
+
}
|
4982
|
+
// Parse which parameter is unsupported
|
4983
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
4984
|
+
if (!unsupportedParameter) {
|
4985
|
+
if (this.options.isVerbose) {
|
4986
|
+
console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
4987
|
+
}
|
4988
|
+
throw error;
|
4989
|
+
}
|
4990
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
4991
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
4992
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
4993
|
+
// Already retried this parameter, throw the error
|
4994
|
+
if (this.options.isVerbose) {
|
4995
|
+
console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
4996
|
+
}
|
4997
|
+
throw error;
|
4998
|
+
}
|
4999
|
+
// Mark this parameter as retried
|
5000
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
5001
|
+
// Log warning in verbose mode
|
5002
|
+
if (this.options.isVerbose) {
|
5003
|
+
console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
5004
|
+
}
|
5005
|
+
// Remove the unsupported parameter and retry
|
5006
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
5007
|
+
return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
|
4846
5008
|
}
|
4847
|
-
const resultContent = rawResponse.choices[0].text;
|
4848
|
-
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4849
|
-
return exportJson({
|
4850
|
-
name: 'promptResult',
|
4851
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
4852
|
-
order: [],
|
4853
|
-
value: {
|
4854
|
-
content: resultContent,
|
4855
|
-
modelName: rawResponse.model || modelName,
|
4856
|
-
timing: {
|
4857
|
-
start,
|
4858
|
-
complete,
|
4859
|
-
},
|
4860
|
-
usage,
|
4861
|
-
rawPromptContent,
|
4862
|
-
rawRequest,
|
4863
|
-
rawResponse,
|
4864
|
-
// <- [🗯]
|
4865
|
-
},
|
4866
|
-
});
|
4867
5009
|
}
|
4868
5010
|
/**
|
4869
5011
|
* Calls OpenAI compatible API to use a embedding model
|
@@ -4889,7 +5031,7 @@ class OpenAiCompatibleExecutionTools {
|
|
4889
5031
|
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4890
5032
|
}
|
4891
5033
|
const rawResponse = await this.limiter
|
4892
|
-
.schedule(() => this.
|
5034
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
|
4893
5035
|
.catch((error) => {
|
4894
5036
|
assertsError(error);
|
4895
5037
|
if (this.options.isVerbose) {
|
@@ -4951,7 +5093,7 @@ class OpenAiCompatibleExecutionTools {
|
|
4951
5093
|
/**
|
4952
5094
|
* Makes a request with retry logic for network errors like ECONNRESET
|
4953
5095
|
*/
|
4954
|
-
async
|
5096
|
+
async makeRequestWithNetworkRetry(requestFn) {
|
4955
5097
|
let lastError;
|
4956
5098
|
for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
|
4957
5099
|
try {
|
@@ -4963,8 +5105,8 @@ class OpenAiCompatibleExecutionTools {
|
|
4963
5105
|
// Check if this is a retryable network error
|
4964
5106
|
const isRetryableError = this.isRetryableNetworkError(error);
|
4965
5107
|
if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
|
4966
|
-
if (this.options.isVerbose) {
|
4967
|
-
console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
5108
|
+
if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
|
5109
|
+
console.info(colors.bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
4968
5110
|
}
|
4969
5111
|
throw error;
|
4970
5112
|
}
|
@@ -4974,7 +5116,7 @@ class OpenAiCompatibleExecutionTools {
|
|
4974
5116
|
const jitterDelay = Math.random() * 500; // Add some randomness
|
4975
5117
|
const totalDelay = backoffDelay + jitterDelay;
|
4976
5118
|
if (this.options.isVerbose) {
|
4977
|
-
console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
5119
|
+
console.info(colors.bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
4978
5120
|
}
|
4979
5121
|
// Wait before retrying
|
4980
5122
|
await new Promise((resolve) => setTimeout(resolve, totalDelay));
|
@@ -6355,11 +6497,12 @@ async function getScraperIntermediateSource(source, options) {
|
|
6355
6497
|
catch (error) {
|
6356
6498
|
// Note: If we can't create cache directory, continue without it
|
6357
6499
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
6358
|
-
if (error instanceof Error &&
|
6359
|
-
error.message.includes('
|
6360
|
-
|
6361
|
-
|
6362
|
-
|
6500
|
+
if (error instanceof Error &&
|
6501
|
+
(error.message.includes('EROFS') ||
|
6502
|
+
error.message.includes('read-only') ||
|
6503
|
+
error.message.includes('EACCES') ||
|
6504
|
+
error.message.includes('EPERM') ||
|
6505
|
+
error.message.includes('ENOENT'))) ;
|
6363
6506
|
else {
|
6364
6507
|
// Re-throw other unexpected errors
|
6365
6508
|
throw error;
|
@@ -10555,13 +10698,13 @@ function createPipelineExecutor(options) {
|
|
10555
10698
|
// Calculate and update tldr based on pipeline progress
|
10556
10699
|
const cv = newOngoingResult;
|
10557
10700
|
// Calculate progress based on parameters resolved vs total parameters
|
10558
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
10701
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
10559
10702
|
let resolvedParameters = 0;
|
10560
10703
|
let currentTaskTitle = '';
|
10561
10704
|
// Get the resolved parameters from output parameters
|
10562
10705
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
10563
10706
|
// Count how many output parameters have non-empty values
|
10564
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
10707
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
10565
10708
|
}
|
10566
10709
|
// Try to determine current task from execution report
|
10567
10710
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
@@ -11450,11 +11593,12 @@ class MarkitdownScraper {
|
|
11450
11593
|
catch (error) {
|
11451
11594
|
// Note: If we can't write to cache, we'll continue without caching
|
11452
11595
|
// This handles read-only filesystems like Vercel
|
11453
|
-
if (error instanceof Error &&
|
11454
|
-
error.message.includes('
|
11455
|
-
|
11456
|
-
|
11457
|
-
|
11596
|
+
if (error instanceof Error &&
|
11597
|
+
(error.message.includes('EROFS') ||
|
11598
|
+
error.message.includes('read-only') ||
|
11599
|
+
error.message.includes('EACCES') ||
|
11600
|
+
error.message.includes('EPERM') ||
|
11601
|
+
error.message.includes('ENOENT'))) ;
|
11458
11602
|
else {
|
11459
11603
|
// Re-throw other unexpected errors
|
11460
11604
|
throw error;
|
@@ -11754,11 +11898,12 @@ class WebsiteScraper {
|
|
11754
11898
|
catch (error) {
|
11755
11899
|
// Note: If we can't write to cache, we'll continue without caching
|
11756
11900
|
// This handles read-only filesystems like Vercel
|
11757
|
-
if (error instanceof Error &&
|
11758
|
-
error.message.includes('
|
11759
|
-
|
11760
|
-
|
11761
|
-
|
11901
|
+
if (error instanceof Error &&
|
11902
|
+
(error.message.includes('EROFS') ||
|
11903
|
+
error.message.includes('read-only') ||
|
11904
|
+
error.message.includes('EACCES') ||
|
11905
|
+
error.message.includes('EPERM') ||
|
11906
|
+
error.message.includes('ENOENT'))) ;
|
11762
11907
|
else {
|
11763
11908
|
// Re-throw other unexpected errors
|
11764
11909
|
throw error;
|
@@ -12512,11 +12657,12 @@ class FileCacheStorage {
|
|
12512
12657
|
catch (error) {
|
12513
12658
|
// Note: If we can't write to cache, silently ignore the error
|
12514
12659
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
12515
|
-
if (error instanceof Error &&
|
12516
|
-
error.message.includes('
|
12517
|
-
|
12518
|
-
|
12519
|
-
|
12660
|
+
if (error instanceof Error &&
|
12661
|
+
(error.message.includes('EROFS') ||
|
12662
|
+
error.message.includes('read-only') ||
|
12663
|
+
error.message.includes('EACCES') ||
|
12664
|
+
error.message.includes('EPERM') ||
|
12665
|
+
error.message.includes('ENOENT'))) {
|
12520
12666
|
// Silently ignore filesystem errors - caching is optional
|
12521
12667
|
return;
|
12522
12668
|
}
|
@@ -17400,11 +17546,12 @@ async function $getCompiledBook(tools, pipelineSource, options) {
|
|
17400
17546
|
catch (error) {
|
17401
17547
|
// Note: Ignore filesystem errors (like EROFS on read-only systems like Vercel)
|
17402
17548
|
// The compiled book can still be used even if it can't be cached
|
17403
|
-
if (error instanceof Error &&
|
17404
|
-
error.message.includes('
|
17405
|
-
|
17406
|
-
|
17407
|
-
|
17549
|
+
if (error instanceof Error &&
|
17550
|
+
(error.message.includes('EROFS') ||
|
17551
|
+
error.message.includes('read-only') ||
|
17552
|
+
error.message.includes('EACCES') ||
|
17553
|
+
error.message.includes('EPERM') ||
|
17554
|
+
error.message.includes('ENOENT'))) ;
|
17408
17555
|
else {
|
17409
17556
|
// Re-throw other unexpected errors
|
17410
17557
|
throw error;
|