@promptbook/wizard 0.101.0-15 → 0.101.0-17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/esm/index.es.js +49 -44
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  7. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +0 -20
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  9. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -2
  10. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +14 -2
  11. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  12. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
  13. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +5 -0
  14. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +1 -0
  15. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  16. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  19. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  20. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  21. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  22. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +2 -2
  25. package/umd/index.umd.js +49 -44
  26. package/umd/index.umd.js.map +1 -1
  27. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  28. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-17';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2826,7 +2826,7 @@
2826
2826
  modelVariant: 'CHAT',
2827
2827
  modelTitle: 'gpt-5-mini',
2828
2828
  modelName: 'gpt-5-mini',
2829
- modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
2829
+ modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
2830
2830
  pricing: {
2831
2831
  prompt: pricing(`$0.25 / 1M tokens`),
2832
2832
  output: pricing(`$2.00 / 1M tokens`),
@@ -2838,7 +2838,7 @@
2838
2838
  modelVariant: 'CHAT',
2839
2839
  modelTitle: 'gpt-5-nano',
2840
2840
  modelName: 'gpt-5-nano',
2841
- modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
2841
+ modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
2842
2842
  pricing: {
2843
2843
  prompt: pricing(`$0.05 / 1M tokens`),
2844
2844
  output: pricing(`$0.40 / 1M tokens`),
@@ -2850,7 +2850,7 @@
2850
2850
  modelVariant: 'CHAT',
2851
2851
  modelTitle: 'gpt-4.1',
2852
2852
  modelName: 'gpt-4.1',
2853
- modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
2853
+ modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
2854
2854
  pricing: {
2855
2855
  prompt: pricing(`$3.00 / 1M tokens`),
2856
2856
  output: pricing(`$12.00 / 1M tokens`),
@@ -2862,7 +2862,7 @@
2862
2862
  modelVariant: 'CHAT',
2863
2863
  modelTitle: 'gpt-4.1-mini',
2864
2864
  modelName: 'gpt-4.1-mini',
2865
- modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
2865
+ modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
2866
2866
  pricing: {
2867
2867
  prompt: pricing(`$0.80 / 1M tokens`),
2868
2868
  output: pricing(`$3.20 / 1M tokens`),
@@ -2874,7 +2874,7 @@
2874
2874
  modelVariant: 'CHAT',
2875
2875
  modelTitle: 'gpt-4.1-nano',
2876
2876
  modelName: 'gpt-4.1-nano',
2877
- modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
2877
+ modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
2878
2878
  pricing: {
2879
2879
  prompt: pricing(`$0.20 / 1M tokens`),
2880
2880
  output: pricing(`$0.80 / 1M tokens`),
@@ -2886,7 +2886,7 @@
2886
2886
  modelVariant: 'CHAT',
2887
2887
  modelTitle: 'o3',
2888
2888
  modelName: 'o3',
2889
- modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
2889
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
2890
2890
  pricing: {
2891
2891
  prompt: pricing(`$15.00 / 1M tokens`),
2892
2892
  output: pricing(`$60.00 / 1M tokens`),
@@ -2898,7 +2898,7 @@
2898
2898
  modelVariant: 'CHAT',
2899
2899
  modelTitle: 'o3-pro',
2900
2900
  modelName: 'o3-pro',
2901
- modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
2901
+ modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
2902
2902
  pricing: {
2903
2903
  prompt: pricing(`$30.00 / 1M tokens`),
2904
2904
  output: pricing(`$120.00 / 1M tokens`),
@@ -2910,7 +2910,7 @@
2910
2910
  modelVariant: 'CHAT',
2911
2911
  modelTitle: 'o4-mini',
2912
2912
  modelName: 'o4-mini',
2913
- modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
2913
+ modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
2914
2914
  pricing: {
2915
2915
  prompt: pricing(`$4.00 / 1M tokens`),
2916
2916
  output: pricing(`$16.00 / 1M tokens`),
@@ -2922,7 +2922,7 @@
2922
2922
  modelVariant: 'CHAT',
2923
2923
  modelTitle: 'o3-deep-research',
2924
2924
  modelName: 'o3-deep-research',
2925
- modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
2925
+ modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
2926
2926
  pricing: {
2927
2927
  prompt: pricing(`$25.00 / 1M tokens`),
2928
2928
  output: pricing(`$100.00 / 1M tokens`),
@@ -2934,7 +2934,7 @@
2934
2934
  modelVariant: 'CHAT',
2935
2935
  modelTitle: 'o4-mini-deep-research',
2936
2936
  modelName: 'o4-mini-deep-research',
2937
- modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
2937
+ modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
2938
2938
  pricing: {
2939
2939
  prompt: pricing(`$12.00 / 1M tokens`),
2940
2940
  output: pricing(`$48.00 / 1M tokens`),
@@ -4666,10 +4666,10 @@
4666
4666
  const newRequirements = { ...modelRequirements };
4667
4667
  // Map of parameter names that might appear in error messages to ModelRequirements properties
4668
4668
  const parameterMap = {
4669
- 'temperature': 'temperature',
4670
- 'max_tokens': 'maxTokens',
4671
- 'maxTokens': 'maxTokens',
4672
- 'seed': 'seed',
4669
+ temperature: 'temperature',
4670
+ max_tokens: 'maxTokens',
4671
+ maxTokens: 'maxTokens',
4672
+ seed: 'seed',
4673
4673
  };
4674
4674
  const propertyToRemove = parameterMap[unsupportedParameter];
4675
4675
  if (propertyToRemove && propertyToRemove in newRequirements) {
@@ -4685,9 +4685,9 @@
4685
4685
  */
4686
4686
  function isUnsupportedParameterError(error) {
4687
4687
  const errorMessage = error.message.toLowerCase();
4688
- return errorMessage.includes('unsupported value:') ||
4688
+ return (errorMessage.includes('unsupported value:') ||
4689
4689
  errorMessage.includes('is not supported with this model') ||
4690
- errorMessage.includes('does not support');
4690
+ errorMessage.includes('does not support'));
4691
4691
  }
4692
4692
 
4693
4693
  /**
@@ -6509,11 +6509,12 @@
6509
6509
  catch (error) {
6510
6510
  // Note: If we can't create cache directory, continue without it
6511
6511
  // This handles read-only filesystems, permission issues, and missing parent directories
6512
- if (error instanceof Error && (error.message.includes('EROFS') ||
6513
- error.message.includes('read-only') ||
6514
- error.message.includes('EACCES') ||
6515
- error.message.includes('EPERM') ||
6516
- error.message.includes('ENOENT'))) ;
6512
+ if (error instanceof Error &&
6513
+ (error.message.includes('EROFS') ||
6514
+ error.message.includes('read-only') ||
6515
+ error.message.includes('EACCES') ||
6516
+ error.message.includes('EPERM') ||
6517
+ error.message.includes('ENOENT'))) ;
6517
6518
  else {
6518
6519
  // Re-throw other unexpected errors
6519
6520
  throw error;
@@ -10709,13 +10710,13 @@
10709
10710
  // Calculate and update tldr based on pipeline progress
10710
10711
  const cv = newOngoingResult;
10711
10712
  // Calculate progress based on parameters resolved vs total parameters
10712
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
10713
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
10713
10714
  let resolvedParameters = 0;
10714
10715
  let currentTaskTitle = '';
10715
10716
  // Get the resolved parameters from output parameters
10716
10717
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
10717
10718
  // Count how many output parameters have non-empty values
10718
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
10719
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
10719
10720
  }
10720
10721
  // Try to determine current task from execution report
10721
10722
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -11604,11 +11605,12 @@
11604
11605
  catch (error) {
11605
11606
  // Note: If we can't write to cache, we'll continue without caching
11606
11607
  // This handles read-only filesystems like Vercel
11607
- if (error instanceof Error && (error.message.includes('EROFS') ||
11608
- error.message.includes('read-only') ||
11609
- error.message.includes('EACCES') ||
11610
- error.message.includes('EPERM') ||
11611
- error.message.includes('ENOENT'))) ;
11608
+ if (error instanceof Error &&
11609
+ (error.message.includes('EROFS') ||
11610
+ error.message.includes('read-only') ||
11611
+ error.message.includes('EACCES') ||
11612
+ error.message.includes('EPERM') ||
11613
+ error.message.includes('ENOENT'))) ;
11612
11614
  else {
11613
11615
  // Re-throw other unexpected errors
11614
11616
  throw error;
@@ -11908,11 +11910,12 @@
11908
11910
  catch (error) {
11909
11911
  // Note: If we can't write to cache, we'll continue without caching
11910
11912
  // This handles read-only filesystems like Vercel
11911
- if (error instanceof Error && (error.message.includes('EROFS') ||
11912
- error.message.includes('read-only') ||
11913
- error.message.includes('EACCES') ||
11914
- error.message.includes('EPERM') ||
11915
- error.message.includes('ENOENT'))) ;
11913
+ if (error instanceof Error &&
11914
+ (error.message.includes('EROFS') ||
11915
+ error.message.includes('read-only') ||
11916
+ error.message.includes('EACCES') ||
11917
+ error.message.includes('EPERM') ||
11918
+ error.message.includes('ENOENT'))) ;
11916
11919
  else {
11917
11920
  // Re-throw other unexpected errors
11918
11921
  throw error;
@@ -12666,11 +12669,12 @@
12666
12669
  catch (error) {
12667
12670
  // Note: If we can't write to cache, silently ignore the error
12668
12671
  // This handles read-only filesystems, permission issues, and missing parent directories
12669
- if (error instanceof Error && (error.message.includes('EROFS') ||
12670
- error.message.includes('read-only') ||
12671
- error.message.includes('EACCES') ||
12672
- error.message.includes('EPERM') ||
12673
- error.message.includes('ENOENT'))) {
12672
+ if (error instanceof Error &&
12673
+ (error.message.includes('EROFS') ||
12674
+ error.message.includes('read-only') ||
12675
+ error.message.includes('EACCES') ||
12676
+ error.message.includes('EPERM') ||
12677
+ error.message.includes('ENOENT'))) {
12674
12678
  // Silently ignore filesystem errors - caching is optional
12675
12679
  return;
12676
12680
  }
@@ -17554,11 +17558,12 @@
17554
17558
  catch (error) {
17555
17559
  // Note: Ignore filesystem errors (like EROFS on read-only systems like Vercel)
17556
17560
  // The compiled book can still be used even if it can't be cached
17557
- if (error instanceof Error && (error.message.includes('EROFS') ||
17558
- error.message.includes('read-only') ||
17559
- error.message.includes('EACCES') ||
17560
- error.message.includes('EPERM') ||
17561
- error.message.includes('ENOENT'))) ;
17561
+ if (error instanceof Error &&
17562
+ (error.message.includes('EROFS') ||
17563
+ error.message.includes('read-only') ||
17564
+ error.message.includes('EACCES') ||
17565
+ error.message.includes('EPERM') ||
17566
+ error.message.includes('ENOENT'))) ;
17562
17567
  else {
17563
17568
  // Re-throw other unexpected errors
17564
17569
  throw error;