@promptbook/wizard 0.101.0-14 → 0.101.0-15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
36
36
  * @generated
37
37
  * @see https://github.com/webgptorg/promptbook
38
38
  */
39
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-14';
39
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
40
40
  /**
41
41
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
42
42
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4622,6 +4622,62 @@ resultContent, rawResponse) {
4622
4622
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
4623
4623
  */
4624
4624
 
4625
+ /**
4626
+ * Parses an OpenAI error message to identify which parameter is unsupported
4627
+ *
4628
+ * @param errorMessage The error message from OpenAI API
4629
+ * @returns The parameter name that is unsupported, or null if not an unsupported parameter error
4630
+ * @private utility of LLM Tools
4631
+ */
4632
+ function parseUnsupportedParameterError(errorMessage) {
4633
+ // Pattern to match "Unsupported value: 'parameter' does not support ..."
4634
+ const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
4635
+ if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
4636
+ return unsupportedValueMatch[1];
4637
+ }
4638
+ // Pattern to match "'parameter' of type ... is not supported with this model"
4639
+ const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
4640
+ if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
4641
+ return parameterTypeMatch[1];
4642
+ }
4643
+ return null;
4644
+ }
4645
+ /**
4646
+ * Creates a copy of model requirements with the specified parameter removed
4647
+ *
4648
+ * @param modelRequirements Original model requirements
4649
+ * @param unsupportedParameter The parameter to remove
4650
+ * @returns New model requirements without the unsupported parameter
4651
+ * @private utility of LLM Tools
4652
+ */
4653
+ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
4654
+ const newRequirements = { ...modelRequirements };
4655
+ // Map of parameter names that might appear in error messages to ModelRequirements properties
4656
+ const parameterMap = {
4657
+ 'temperature': 'temperature',
4658
+ 'max_tokens': 'maxTokens',
4659
+ 'maxTokens': 'maxTokens',
4660
+ 'seed': 'seed',
4661
+ };
4662
+ const propertyToRemove = parameterMap[unsupportedParameter];
4663
+ if (propertyToRemove && propertyToRemove in newRequirements) {
4664
+ delete newRequirements[propertyToRemove];
4665
+ }
4666
+ return newRequirements;
4667
+ }
4668
+ /**
4669
+ * Checks if an error is an "Unsupported value" error from OpenAI
4670
+ * @param error The error to check
4671
+ * @returns true if this is an unsupported parameter error
4672
+ * @private utility of LLM Tools
4673
+ */
4674
+ function isUnsupportedParameterError(error) {
4675
+ const errorMessage = error.message.toLowerCase();
4676
+ return errorMessage.includes('unsupported value:') ||
4677
+ errorMessage.includes('is not supported with this model') ||
4678
+ errorMessage.includes('does not support');
4679
+ }
4680
+
4625
4681
  /**
4626
4682
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
4627
4683
  *
@@ -4639,6 +4695,10 @@ class OpenAiCompatibleExecutionTools {
4639
4695
  * OpenAI API client.
4640
4696
  */
4641
4697
  this.client = null;
4698
+ /**
4699
+ * Tracks models and parameters that have already been retried to prevent infinite loops
4700
+ */
4701
+ this.retriedUnsupportedParameters = new Set();
4642
4702
  // TODO: Allow configuring rate limits via options
4643
4703
  this.limiter = new Bottleneck({
4644
4704
  minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
@@ -4700,21 +4760,27 @@ class OpenAiCompatibleExecutionTools {
4700
4760
  * Calls OpenAI compatible API to use a chat model.
4701
4761
  */
4702
4762
  async callChatModel(prompt) {
4763
+ return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
4764
+ }
4765
+ /**
4766
+ * Internal method that handles parameter retry for chat model calls
4767
+ */
4768
+ async callChatModelWithRetry(prompt, currentModelRequirements) {
4703
4769
  var _a;
4704
4770
  if (this.options.isVerbose) {
4705
- console.info(`💬 ${this.title} callChatModel call`, { prompt });
4771
+ console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
4706
4772
  }
4707
- const { content, parameters, modelRequirements, format } = prompt;
4773
+ const { content, parameters, format } = prompt;
4708
4774
  const client = await this.getClient();
4709
4775
  // TODO: [☂] Use here more modelRequirements
4710
- if (modelRequirements.modelVariant !== 'CHAT') {
4776
+ if (currentModelRequirements.modelVariant !== 'CHAT') {
4711
4777
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
4712
4778
  }
4713
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
4779
+ const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
4714
4780
  const modelSettings = {
4715
4781
  model: modelName,
4716
- max_tokens: modelRequirements.maxTokens,
4717
- temperature: modelRequirements.temperature,
4782
+ max_tokens: currentModelRequirements.maxTokens,
4783
+ temperature: currentModelRequirements.temperature,
4718
4784
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4719
4785
  // <- Note: [🧆]
4720
4786
  }; // <- TODO: [💩] Guard here types better
@@ -4729,12 +4795,12 @@ class OpenAiCompatibleExecutionTools {
4729
4795
  const rawRequest = {
4730
4796
  ...modelSettings,
4731
4797
  messages: [
4732
- ...(modelRequirements.systemMessage === undefined
4798
+ ...(currentModelRequirements.systemMessage === undefined
4733
4799
  ? []
4734
4800
  : [
4735
4801
  {
4736
4802
  role: 'system',
4737
- content: modelRequirements.systemMessage,
4803
+ content: currentModelRequirements.systemMessage,
4738
4804
  },
4739
4805
  ]),
4740
4806
  {
@@ -4748,69 +4814,110 @@ class OpenAiCompatibleExecutionTools {
4748
4814
  if (this.options.isVerbose) {
4749
4815
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4750
4816
  }
4751
- const rawResponse = await this.limiter
4752
- .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
4753
- .catch((error) => {
4754
- assertsError(error);
4817
+ try {
4818
+ const rawResponse = await this.limiter
4819
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
4820
+ .catch((error) => {
4821
+ assertsError(error);
4822
+ if (this.options.isVerbose) {
4823
+ console.info(colors.bgRed('error'), error);
4824
+ }
4825
+ throw error;
4826
+ });
4755
4827
  if (this.options.isVerbose) {
4756
- console.info(colors.bgRed('error'), error);
4828
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4757
4829
  }
4758
- throw error;
4759
- });
4760
- if (this.options.isVerbose) {
4761
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4762
- }
4763
- const complete = $getCurrentDate();
4764
- if (!rawResponse.choices[0]) {
4765
- throw new PipelineExecutionError(`No choises from ${this.title}`);
4766
- }
4767
- if (rawResponse.choices.length > 1) {
4768
- // TODO: This should be maybe only warning
4769
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4830
+ const complete = $getCurrentDate();
4831
+ if (!rawResponse.choices[0]) {
4832
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
4833
+ }
4834
+ if (rawResponse.choices.length > 1) {
4835
+ // TODO: This should be maybe only warning
4836
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4837
+ }
4838
+ const resultContent = rawResponse.choices[0].message.content;
4839
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4840
+ if (resultContent === null) {
4841
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
4842
+ }
4843
+ return exportJson({
4844
+ name: 'promptResult',
4845
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
4846
+ order: [],
4847
+ value: {
4848
+ content: resultContent,
4849
+ modelName: rawResponse.model || modelName,
4850
+ timing: {
4851
+ start,
4852
+ complete,
4853
+ },
4854
+ usage,
4855
+ rawPromptContent,
4856
+ rawRequest,
4857
+ rawResponse,
4858
+ // <- [🗯]
4859
+ },
4860
+ });
4770
4861
  }
4771
- const resultContent = rawResponse.choices[0].message.content;
4772
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4773
- if (resultContent === null) {
4774
- throw new PipelineExecutionError(`No response message from ${this.title}`);
4862
+ catch (error) {
4863
+ assertsError(error);
4864
+ // Check if this is an unsupported parameter error
4865
+ if (!isUnsupportedParameterError(error)) {
4866
+ throw error;
4867
+ }
4868
+ // Parse which parameter is unsupported
4869
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
4870
+ if (!unsupportedParameter) {
4871
+ if (this.options.isVerbose) {
4872
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
4873
+ }
4874
+ throw error;
4875
+ }
4876
+ // Create a unique key for this model + parameter combination to prevent infinite loops
4877
+ const retryKey = `${modelName}-${unsupportedParameter}`;
4878
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
4879
+ // Already retried this parameter, throw the error
4880
+ if (this.options.isVerbose) {
4881
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
4882
+ }
4883
+ throw error;
4884
+ }
4885
+ // Mark this parameter as retried
4886
+ this.retriedUnsupportedParameters.add(retryKey);
4887
+ // Log warning in verbose mode
4888
+ if (this.options.isVerbose) {
4889
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
4890
+ }
4891
+ // Remove the unsupported parameter and retry
4892
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
4893
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
4775
4894
  }
4776
- return exportJson({
4777
- name: 'promptResult',
4778
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
4779
- order: [],
4780
- value: {
4781
- content: resultContent,
4782
- modelName: rawResponse.model || modelName,
4783
- timing: {
4784
- start,
4785
- complete,
4786
- },
4787
- usage,
4788
- rawPromptContent,
4789
- rawRequest,
4790
- rawResponse,
4791
- // <- [🗯]
4792
- },
4793
- });
4794
4895
  }
4795
4896
  /**
4796
4897
  * Calls OpenAI API to use a complete model.
4797
4898
  */
4798
4899
  async callCompletionModel(prompt) {
4900
+ return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
4901
+ }
4902
+ /**
4903
+ * Internal method that handles parameter retry for completion model calls
4904
+ */
4905
+ async callCompletionModelWithRetry(prompt, currentModelRequirements) {
4799
4906
  var _a;
4800
4907
  if (this.options.isVerbose) {
4801
- console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
4908
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
4802
4909
  }
4803
- const { content, parameters, modelRequirements } = prompt;
4910
+ const { content, parameters } = prompt;
4804
4911
  const client = await this.getClient();
4805
4912
  // TODO: [☂] Use here more modelRequirements
4806
- if (modelRequirements.modelVariant !== 'COMPLETION') {
4913
+ if (currentModelRequirements.modelVariant !== 'COMPLETION') {
4807
4914
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
4808
4915
  }
4809
- const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4916
+ const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4810
4917
  const modelSettings = {
4811
4918
  model: modelName,
4812
- max_tokens: modelRequirements.maxTokens,
4813
- temperature: modelRequirements.temperature,
4919
+ max_tokens: currentModelRequirements.maxTokens,
4920
+ temperature: currentModelRequirements.temperature,
4814
4921
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4815
4922
  // <- Note: [🧆]
4816
4923
  };
@@ -4824,46 +4931,81 @@ class OpenAiCompatibleExecutionTools {
4824
4931
  if (this.options.isVerbose) {
4825
4932
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4826
4933
  }
4827
- const rawResponse = await this.limiter
4828
- .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
4829
- .catch((error) => {
4830
- assertsError(error);
4934
+ try {
4935
+ const rawResponse = await this.limiter
4936
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
4937
+ .catch((error) => {
4938
+ assertsError(error);
4939
+ if (this.options.isVerbose) {
4940
+ console.info(colors.bgRed('error'), error);
4941
+ }
4942
+ throw error;
4943
+ });
4831
4944
  if (this.options.isVerbose) {
4832
- console.info(colors.bgRed('error'), error);
4945
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4833
4946
  }
4834
- throw error;
4835
- });
4836
- if (this.options.isVerbose) {
4837
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
4838
- }
4839
- const complete = $getCurrentDate();
4840
- if (!rawResponse.choices[0]) {
4841
- throw new PipelineExecutionError(`No choises from ${this.title}`);
4947
+ const complete = $getCurrentDate();
4948
+ if (!rawResponse.choices[0]) {
4949
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
4950
+ }
4951
+ if (rawResponse.choices.length > 1) {
4952
+ // TODO: This should be maybe only warning
4953
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4954
+ }
4955
+ const resultContent = rawResponse.choices[0].text;
4956
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4957
+ return exportJson({
4958
+ name: 'promptResult',
4959
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
4960
+ order: [],
4961
+ value: {
4962
+ content: resultContent,
4963
+ modelName: rawResponse.model || modelName,
4964
+ timing: {
4965
+ start,
4966
+ complete,
4967
+ },
4968
+ usage,
4969
+ rawPromptContent,
4970
+ rawRequest,
4971
+ rawResponse,
4972
+ // <- [🗯]
4973
+ },
4974
+ });
4842
4975
  }
4843
- if (rawResponse.choices.length > 1) {
4844
- // TODO: This should be maybe only warning
4845
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
4976
+ catch (error) {
4977
+ assertsError(error);
4978
+ // Check if this is an unsupported parameter error
4979
+ if (!isUnsupportedParameterError(error)) {
4980
+ throw error;
4981
+ }
4982
+ // Parse which parameter is unsupported
4983
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
4984
+ if (!unsupportedParameter) {
4985
+ if (this.options.isVerbose) {
4986
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
4987
+ }
4988
+ throw error;
4989
+ }
4990
+ // Create a unique key for this model + parameter combination to prevent infinite loops
4991
+ const retryKey = `${modelName}-${unsupportedParameter}`;
4992
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
4993
+ // Already retried this parameter, throw the error
4994
+ if (this.options.isVerbose) {
4995
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
4996
+ }
4997
+ throw error;
4998
+ }
4999
+ // Mark this parameter as retried
5000
+ this.retriedUnsupportedParameters.add(retryKey);
5001
+ // Log warning in verbose mode
5002
+ if (this.options.isVerbose) {
5003
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
5004
+ }
5005
+ // Remove the unsupported parameter and retry
5006
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
5007
+ return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
4846
5008
  }
4847
- const resultContent = rawResponse.choices[0].text;
4848
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
4849
- return exportJson({
4850
- name: 'promptResult',
4851
- message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
4852
- order: [],
4853
- value: {
4854
- content: resultContent,
4855
- modelName: rawResponse.model || modelName,
4856
- timing: {
4857
- start,
4858
- complete,
4859
- },
4860
- usage,
4861
- rawPromptContent,
4862
- rawRequest,
4863
- rawResponse,
4864
- // <- [🗯]
4865
- },
4866
- });
4867
5009
  }
4868
5010
  /**
4869
5011
  * Calls OpenAI compatible API to use a embedding model
@@ -4889,7 +5031,7 @@ class OpenAiCompatibleExecutionTools {
4889
5031
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4890
5032
  }
4891
5033
  const rawResponse = await this.limiter
4892
- .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
5034
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
4893
5035
  .catch((error) => {
4894
5036
  assertsError(error);
4895
5037
  if (this.options.isVerbose) {
@@ -4951,7 +5093,7 @@ class OpenAiCompatibleExecutionTools {
4951
5093
  /**
4952
5094
  * Makes a request with retry logic for network errors like ECONNRESET
4953
5095
  */
4954
- async makeRequestWithRetry(requestFn) {
5096
+ async makeRequestWithNetworkRetry(requestFn) {
4955
5097
  let lastError;
4956
5098
  for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
4957
5099
  try {
@@ -4963,8 +5105,8 @@ class OpenAiCompatibleExecutionTools {
4963
5105
  // Check if this is a retryable network error
4964
5106
  const isRetryableError = this.isRetryableNetworkError(error);
4965
5107
  if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
4966
- if (this.options.isVerbose) {
4967
- console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
5108
+ if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
5109
+ console.info(colors.bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
4968
5110
  }
4969
5111
  throw error;
4970
5112
  }
@@ -4974,7 +5116,7 @@ class OpenAiCompatibleExecutionTools {
4974
5116
  const jitterDelay = Math.random() * 500; // Add some randomness
4975
5117
  const totalDelay = backoffDelay + jitterDelay;
4976
5118
  if (this.options.isVerbose) {
4977
- console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
5119
+ console.info(colors.bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
4978
5120
  }
4979
5121
  // Wait before retrying
4980
5122
  await new Promise((resolve) => setTimeout(resolve, totalDelay));