@promptbook/wizard 0.101.0-15 → 0.101.0-16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +49 -44
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  7. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +0 -20
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  9. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -2
  10. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +14 -2
  11. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  12. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +5 -0
  13. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +1 -0
  14. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  15. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  18. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  19. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  20. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  21. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  22. package/esm/typings/src/version.d.ts +1 -1
  23. package/package.json +2 -2
  24. package/umd/index.umd.js +49 -44
  25. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
36
36
  * @generated
37
37
  * @see https://github.com/webgptorg/promptbook
38
38
  */
39
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
39
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-16';
40
40
  /**
41
41
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
42
42
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2814,7 +2814,7 @@ const OPENAI_MODELS = exportJson({
2814
2814
  modelVariant: 'CHAT',
2815
2815
  modelTitle: 'gpt-5-mini',
2816
2816
  modelName: 'gpt-5-mini',
2817
- modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
2817
+ modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
2818
2818
  pricing: {
2819
2819
  prompt: pricing(`$0.25 / 1M tokens`),
2820
2820
  output: pricing(`$2.00 / 1M tokens`),
@@ -2826,7 +2826,7 @@ const OPENAI_MODELS = exportJson({
2826
2826
  modelVariant: 'CHAT',
2827
2827
  modelTitle: 'gpt-5-nano',
2828
2828
  modelName: 'gpt-5-nano',
2829
- modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
2829
+ modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
2830
2830
  pricing: {
2831
2831
  prompt: pricing(`$0.05 / 1M tokens`),
2832
2832
  output: pricing(`$0.40 / 1M tokens`),
@@ -2838,7 +2838,7 @@ const OPENAI_MODELS = exportJson({
2838
2838
  modelVariant: 'CHAT',
2839
2839
  modelTitle: 'gpt-4.1',
2840
2840
  modelName: 'gpt-4.1',
2841
- modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
2841
+ modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
2842
2842
  pricing: {
2843
2843
  prompt: pricing(`$3.00 / 1M tokens`),
2844
2844
  output: pricing(`$12.00 / 1M tokens`),
@@ -2850,7 +2850,7 @@ const OPENAI_MODELS = exportJson({
2850
2850
  modelVariant: 'CHAT',
2851
2851
  modelTitle: 'gpt-4.1-mini',
2852
2852
  modelName: 'gpt-4.1-mini',
2853
- modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
2853
+ modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
2854
2854
  pricing: {
2855
2855
  prompt: pricing(`$0.80 / 1M tokens`),
2856
2856
  output: pricing(`$3.20 / 1M tokens`),
@@ -2862,7 +2862,7 @@ const OPENAI_MODELS = exportJson({
2862
2862
  modelVariant: 'CHAT',
2863
2863
  modelTitle: 'gpt-4.1-nano',
2864
2864
  modelName: 'gpt-4.1-nano',
2865
- modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
2865
+ modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
2866
2866
  pricing: {
2867
2867
  prompt: pricing(`$0.20 / 1M tokens`),
2868
2868
  output: pricing(`$0.80 / 1M tokens`),
@@ -2874,7 +2874,7 @@ const OPENAI_MODELS = exportJson({
2874
2874
  modelVariant: 'CHAT',
2875
2875
  modelTitle: 'o3',
2876
2876
  modelName: 'o3',
2877
- modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
2877
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
2878
2878
  pricing: {
2879
2879
  prompt: pricing(`$15.00 / 1M tokens`),
2880
2880
  output: pricing(`$60.00 / 1M tokens`),
@@ -2886,7 +2886,7 @@ const OPENAI_MODELS = exportJson({
2886
2886
  modelVariant: 'CHAT',
2887
2887
  modelTitle: 'o3-pro',
2888
2888
  modelName: 'o3-pro',
2889
- modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
2889
+ modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
2890
2890
  pricing: {
2891
2891
  prompt: pricing(`$30.00 / 1M tokens`),
2892
2892
  output: pricing(`$120.00 / 1M tokens`),
@@ -2898,7 +2898,7 @@ const OPENAI_MODELS = exportJson({
2898
2898
  modelVariant: 'CHAT',
2899
2899
  modelTitle: 'o4-mini',
2900
2900
  modelName: 'o4-mini',
2901
- modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
2901
+ modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
2902
2902
  pricing: {
2903
2903
  prompt: pricing(`$4.00 / 1M tokens`),
2904
2904
  output: pricing(`$16.00 / 1M tokens`),
@@ -2910,7 +2910,7 @@ const OPENAI_MODELS = exportJson({
2910
2910
  modelVariant: 'CHAT',
2911
2911
  modelTitle: 'o3-deep-research',
2912
2912
  modelName: 'o3-deep-research',
2913
- modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
2913
+ modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
2914
2914
  pricing: {
2915
2915
  prompt: pricing(`$25.00 / 1M tokens`),
2916
2916
  output: pricing(`$100.00 / 1M tokens`),
@@ -2922,7 +2922,7 @@ const OPENAI_MODELS = exportJson({
2922
2922
  modelVariant: 'CHAT',
2923
2923
  modelTitle: 'o4-mini-deep-research',
2924
2924
  modelName: 'o4-mini-deep-research',
2925
- modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
2925
+ modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
2926
2926
  pricing: {
2927
2927
  prompt: pricing(`$12.00 / 1M tokens`),
2928
2928
  output: pricing(`$48.00 / 1M tokens`),
@@ -4654,10 +4654,10 @@ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParamet
4654
4654
  const newRequirements = { ...modelRequirements };
4655
4655
  // Map of parameter names that might appear in error messages to ModelRequirements properties
4656
4656
  const parameterMap = {
4657
- 'temperature': 'temperature',
4658
- 'max_tokens': 'maxTokens',
4659
- 'maxTokens': 'maxTokens',
4660
- 'seed': 'seed',
4657
+ temperature: 'temperature',
4658
+ max_tokens: 'maxTokens',
4659
+ maxTokens: 'maxTokens',
4660
+ seed: 'seed',
4661
4661
  };
4662
4662
  const propertyToRemove = parameterMap[unsupportedParameter];
4663
4663
  if (propertyToRemove && propertyToRemove in newRequirements) {
@@ -4673,9 +4673,9 @@ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParamet
4673
4673
  */
4674
4674
  function isUnsupportedParameterError(error) {
4675
4675
  const errorMessage = error.message.toLowerCase();
4676
- return errorMessage.includes('unsupported value:') ||
4676
+ return (errorMessage.includes('unsupported value:') ||
4677
4677
  errorMessage.includes('is not supported with this model') ||
4678
- errorMessage.includes('does not support');
4678
+ errorMessage.includes('does not support'));
4679
4679
  }
4680
4680
 
4681
4681
  /**
@@ -6497,11 +6497,12 @@ async function getScraperIntermediateSource(source, options) {
6497
6497
  catch (error) {
6498
6498
  // Note: If we can't create cache directory, continue without it
6499
6499
  // This handles read-only filesystems, permission issues, and missing parent directories
6500
- if (error instanceof Error && (error.message.includes('EROFS') ||
6501
- error.message.includes('read-only') ||
6502
- error.message.includes('EACCES') ||
6503
- error.message.includes('EPERM') ||
6504
- error.message.includes('ENOENT'))) ;
6500
+ if (error instanceof Error &&
6501
+ (error.message.includes('EROFS') ||
6502
+ error.message.includes('read-only') ||
6503
+ error.message.includes('EACCES') ||
6504
+ error.message.includes('EPERM') ||
6505
+ error.message.includes('ENOENT'))) ;
6505
6506
  else {
6506
6507
  // Re-throw other unexpected errors
6507
6508
  throw error;
@@ -10697,13 +10698,13 @@ function createPipelineExecutor(options) {
10697
10698
  // Calculate and update tldr based on pipeline progress
10698
10699
  const cv = newOngoingResult;
10699
10700
  // Calculate progress based on parameters resolved vs total parameters
10700
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
10701
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
10701
10702
  let resolvedParameters = 0;
10702
10703
  let currentTaskTitle = '';
10703
10704
  // Get the resolved parameters from output parameters
10704
10705
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
10705
10706
  // Count how many output parameters have non-empty values
10706
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
10707
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
10707
10708
  }
10708
10709
  // Try to determine current task from execution report
10709
10710
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -11592,11 +11593,12 @@ class MarkitdownScraper {
11592
11593
  catch (error) {
11593
11594
  // Note: If we can't write to cache, we'll continue without caching
11594
11595
  // This handles read-only filesystems like Vercel
11595
- if (error instanceof Error && (error.message.includes('EROFS') ||
11596
- error.message.includes('read-only') ||
11597
- error.message.includes('EACCES') ||
11598
- error.message.includes('EPERM') ||
11599
- error.message.includes('ENOENT'))) ;
11596
+ if (error instanceof Error &&
11597
+ (error.message.includes('EROFS') ||
11598
+ error.message.includes('read-only') ||
11599
+ error.message.includes('EACCES') ||
11600
+ error.message.includes('EPERM') ||
11601
+ error.message.includes('ENOENT'))) ;
11600
11602
  else {
11601
11603
  // Re-throw other unexpected errors
11602
11604
  throw error;
@@ -11896,11 +11898,12 @@ class WebsiteScraper {
11896
11898
  catch (error) {
11897
11899
  // Note: If we can't write to cache, we'll continue without caching
11898
11900
  // This handles read-only filesystems like Vercel
11899
- if (error instanceof Error && (error.message.includes('EROFS') ||
11900
- error.message.includes('read-only') ||
11901
- error.message.includes('EACCES') ||
11902
- error.message.includes('EPERM') ||
11903
- error.message.includes('ENOENT'))) ;
11901
+ if (error instanceof Error &&
11902
+ (error.message.includes('EROFS') ||
11903
+ error.message.includes('read-only') ||
11904
+ error.message.includes('EACCES') ||
11905
+ error.message.includes('EPERM') ||
11906
+ error.message.includes('ENOENT'))) ;
11904
11907
  else {
11905
11908
  // Re-throw other unexpected errors
11906
11909
  throw error;
@@ -12654,11 +12657,12 @@ class FileCacheStorage {
12654
12657
  catch (error) {
12655
12658
  // Note: If we can't write to cache, silently ignore the error
12656
12659
  // This handles read-only filesystems, permission issues, and missing parent directories
12657
- if (error instanceof Error && (error.message.includes('EROFS') ||
12658
- error.message.includes('read-only') ||
12659
- error.message.includes('EACCES') ||
12660
- error.message.includes('EPERM') ||
12661
- error.message.includes('ENOENT'))) {
12660
+ if (error instanceof Error &&
12661
+ (error.message.includes('EROFS') ||
12662
+ error.message.includes('read-only') ||
12663
+ error.message.includes('EACCES') ||
12664
+ error.message.includes('EPERM') ||
12665
+ error.message.includes('ENOENT'))) {
12662
12666
  // Silently ignore filesystem errors - caching is optional
12663
12667
  return;
12664
12668
  }
@@ -17542,11 +17546,12 @@ async function $getCompiledBook(tools, pipelineSource, options) {
17542
17546
  catch (error) {
17543
17547
  // Note: Ignore filesystem errors (like EROFS on read-only systems like Vercel)
17544
17548
  // The compiled book can still be used even if it can't be cached
17545
- if (error instanceof Error && (error.message.includes('EROFS') ||
17546
- error.message.includes('read-only') ||
17547
- error.message.includes('EACCES') ||
17548
- error.message.includes('EPERM') ||
17549
- error.message.includes('ENOENT'))) ;
17549
+ if (error instanceof Error &&
17550
+ (error.message.includes('EROFS') ||
17551
+ error.message.includes('read-only') ||
17552
+ error.message.includes('EACCES') ||
17553
+ error.message.includes('EPERM') ||
17554
+ error.message.includes('ENOENT'))) ;
17550
17555
  else {
17551
17556
  // Re-throw other unexpected errors
17552
17557
  throw error;