@promptbook/wizard 0.98.0-5 โ†’ 0.98.0-8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -38,7 +38,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
38
38
  * @generated
39
39
  * @see https://github.com/webgptorg/promptbook
40
40
  */
41
- const PROMPTBOOK_ENGINE_VERSION = '0.98.0-5';
41
+ const PROMPTBOOK_ENGINE_VERSION = '0.98.0-8';
42
42
  /**
43
43
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
44
44
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -232,7 +232,7 @@ const DEFAULT_MAX_PARALLEL_COUNT = 5; // <- TODO: [๐Ÿคนโ€โ™‚๏ธ]
232
232
  *
233
233
  * @public exported from `@promptbook/core`
234
234
  */
235
- const DEFAULT_MAX_EXECUTION_ATTEMPTS = 3; // <- TODO: [๐Ÿคนโ€โ™‚๏ธ]
235
+ const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7; // <- TODO: [๐Ÿคนโ€โ™‚๏ธ]
236
236
  // <- TODO: [๐Ÿ]
237
237
  /**
238
238
  * Where to store your books
@@ -4345,7 +4345,7 @@ resultContent, rawResponse) {
4345
4345
  */
4346
4346
 
4347
4347
  /**
4348
- * Execution Tools for calling OpenAI API or other OpeenAI compatible provider
4348
+ * Execution Tools for calling OpenAI API or other OpenAI compatible provider
4349
4349
  *
4350
4350
  * @public exported from `@promptbook/openai`
4351
4351
  */
@@ -4915,6 +4915,7 @@ class OllamaExecutionTools extends OpenAiCompatibleExecutionTools {
4915
4915
  baseURL: DEFAULT_OLLAMA_BASE_URL,
4916
4916
  ...ollamaOptions,
4917
4917
  apiKey: 'ollama',
4918
+ isProxied: false, // <- Note: Ollama is always local
4918
4919
  };
4919
4920
  super(openAiCompatibleOptions);
4920
4921
  }
@@ -5101,7 +5102,7 @@ const _OpenAiCompatibleMetadataRegistration = $llmToolsMetadataRegister.register
5101
5102
  title: 'Open AI Compatible',
5102
5103
  packageName: '@promptbook/openai',
5103
5104
  className: 'OpenAiCompatibleExecutionTools',
5104
- envVariables: ['OPENAI_API_KEY'],
5105
+ envVariables: ['OPENAI_API_KEY', 'OPENAI_BASE_URL'],
5105
5106
  trustLevel: 'CLOSED',
5106
5107
  order: MODEL_ORDERS.TOP_TIER,
5107
5108
  getBoilerplateConfiguration() {
@@ -5111,11 +5112,35 @@ const _OpenAiCompatibleMetadataRegistration = $llmToolsMetadataRegister.register
5111
5112
  className: 'OpenAiCompatibleExecutionTools',
5112
5113
  options: {
5113
5114
  apiKey: 'sk-',
5115
+ baseURL: 'https://api.openai.com/v1',
5116
+ isProxied: false,
5117
+ remoteServerUrl: DEFAULT_REMOTE_SERVER_URL,
5114
5118
  maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
5115
5119
  },
5116
5120
  };
5117
5121
  },
5118
5122
  createConfigurationFromEnv(env) {
5123
+ // Note: OpenAiCompatibleExecutionTools is an abstract class and cannot be instantiated directly
5124
+ // However, we can provide configuration for users who want to manually instantiate it
5125
+ if (typeof env.OPENAI_API_KEY === 'string') {
5126
+ const options = {
5127
+ apiKey: env.OPENAI_API_KEY,
5128
+ isProxied: false,
5129
+ remoteServerUrl: DEFAULT_REMOTE_SERVER_URL,
5130
+ maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
5131
+ defaultModelName: 'gpt-4-turbo',
5132
+ };
5133
+ // Add baseURL if provided in environment
5134
+ if (typeof env.OPENAI_BASE_URL === 'string') {
5135
+ options.baseURL = env.OPENAI_BASE_URL;
5136
+ }
5137
+ return {
5138
+ title: 'Open AI Compatible (from env)',
5139
+ packageName: '@promptbook/openai',
5140
+ className: 'OpenAiCompatibleExecutionTools',
5141
+ options,
5142
+ };
5143
+ }
5119
5144
  return null;
5120
5145
  },
5121
5146
  });
@@ -5168,7 +5193,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
5168
5193
  * Default model for chat variant.
5169
5194
  */
5170
5195
  getDefaultChatModel() {
5171
- return this.getDefaultModel('gpt-4o');
5196
+ return this.getDefaultModel('gpt-4-turbo');
5172
5197
  }
5173
5198
  /**
5174
5199
  * Default model for completion variant.
@@ -5198,6 +5223,9 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
5198
5223
  * @param options which are relevant are directly passed to the OpenAI client
5199
5224
  */
5200
5225
  constructor(options) {
5226
+ if (options.isProxied) {
5227
+ throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI assistants`);
5228
+ }
5201
5229
  super(options);
5202
5230
  this.assistantId = options.assistantId;
5203
5231
  // TODO: [๐Ÿ‘ฑ] Make limiter same as in `OpenAiExecutionTools`
@@ -5379,14 +5407,97 @@ const createOpenAiAssistantExecutionTools = Object.assign((options) => {
5379
5407
  * @public exported from `@promptbook/openai`
5380
5408
  */
5381
5409
  const createOpenAiCompatibleExecutionTools = Object.assign((options) => {
5410
+ if (options.isProxied) {
5411
+ return new RemoteLlmExecutionTools({
5412
+ ...options,
5413
+ identification: {
5414
+ isAnonymous: true,
5415
+ llmToolsConfiguration: [
5416
+ {
5417
+ title: 'OpenAI Compatible (proxied)',
5418
+ packageName: '@promptbook/openai',
5419
+ className: 'OpenAiCompatibleExecutionTools',
5420
+ options: {
5421
+ ...options,
5422
+ isProxied: false,
5423
+ },
5424
+ },
5425
+ ],
5426
+ },
5427
+ });
5428
+ }
5382
5429
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
5383
5430
  options = { ...options, dangerouslyAllowBrowser: true };
5384
5431
  }
5385
- return new OpenAiExecutionTools(options);
5432
+ return new HardcodedOpenAiCompatibleExecutionTools(options.defaultModelName, options);
5386
5433
  }, {
5387
5434
  packageName: '@promptbook/openai',
5388
5435
  className: 'OpenAiCompatibleExecutionTools',
5389
5436
  });
5437
+ /**
5438
+ * Execution Tools for calling ONE SPECIFIC PRECONFIGURED OpenAI compatible provider
5439
+ *
5440
+ * @private for `createOpenAiCompatibleExecutionTools`
5441
+ */
5442
+ class HardcodedOpenAiCompatibleExecutionTools extends OpenAiCompatibleExecutionTools {
5443
+ /**
5444
+ * Creates OpenAI compatible Execution Tools.
5445
+ *
5446
+ * @param options which are relevant are directly passed to the OpenAI compatible client
5447
+ */
5448
+ constructor(defaultModelName, options) {
5449
+ super(options);
5450
+ this.defaultModelName = defaultModelName;
5451
+ this.options = options;
5452
+ }
5453
+ get title() {
5454
+ return `${this.defaultModelName} on ${this.options.baseURL}`;
5455
+ }
5456
+ get description() {
5457
+ return `OpenAI compatible connected to "${this.options.baseURL}" model "${this.defaultModelName}"`;
5458
+ }
5459
+ /**
5460
+ * List all available models (non dynamically)
5461
+ *
5462
+ * Note: Purpose of this is to provide more information about models than standard listing from API
5463
+ */
5464
+ get HARDCODED_MODELS() {
5465
+ return [
5466
+ {
5467
+ modelName: this.defaultModelName,
5468
+ modelVariant: 'CHAT',
5469
+ modelDescription: '', // <- TODO: What is the best value here, maybe `this.description`?
5470
+ },
5471
+ ];
5472
+ }
5473
+ /**
5474
+ * Computes the usage
5475
+ */
5476
+ computeUsage(...args) {
5477
+ return {
5478
+ ...computeOpenAiUsage(...args),
5479
+ price: UNCERTAIN_ZERO_VALUE, // <- TODO: Maybe in future pass this counting mechanism, but for now, we dont know
5480
+ };
5481
+ }
5482
+ /**
5483
+ * Default model for chat variant.
5484
+ */
5485
+ getDefaultChatModel() {
5486
+ return this.getDefaultModel(this.defaultModelName);
5487
+ }
5488
+ /**
5489
+ * Default model for completion variant.
5490
+ */
5491
+ getDefaultCompletionModel() {
5492
+ throw new PipelineExecutionError(`${this.title} does not support COMPLETION model variant`);
5493
+ }
5494
+ /**
5495
+ * Default model for completion variant.
5496
+ */
5497
+ getDefaultEmbeddingModel() {
5498
+ throw new PipelineExecutionError(`${this.title} does not support EMBEDDING model variant`);
5499
+ }
5500
+ }
5390
5501
  /**
5391
5502
  * TODO: [๐Ÿฆบ] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
5392
5503
  * TODO: [๐ŸŽถ] Naming "constructor" vs "creator" vs "factory"
@@ -5403,6 +5514,9 @@ const createOpenAiExecutionTools = Object.assign((options) => {
5403
5514
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
5404
5515
  options = { ...options, dangerouslyAllowBrowser: true };
5405
5516
  }
5517
+ if (options.isProxied) {
5518
+ throw new NotYetImplementedError(`Proxy mode is not yet implemented in createOpenAiExecutionTools`);
5519
+ }
5406
5520
  return new OpenAiExecutionTools(options);
5407
5521
  }, {
5408
5522
  packageName: '@promptbook/openai',
@@ -6782,7 +6896,7 @@ function jsonParse(value) {
6782
6896
  throw new Error(spaceTrim((block) => `
6783
6897
  ${block(error.message)}
6784
6898
 
6785
- The JSON text:
6899
+ The expected JSON text:
6786
6900
  ${block(value)}
6787
6901
  `));
6788
6902
  }
@@ -8691,6 +8805,68 @@ function checkExpectations(expectations, value) {
8691
8805
  * Note: [๐Ÿ’] and [๐Ÿค ] are interconnected together
8692
8806
  */
8693
8807
 
8808
+ /**
8809
+ * Validates a prompt result against expectations and format requirements.
8810
+ * This function provides a common abstraction for result validation that can be used
8811
+ * by both execution logic and caching logic to ensure consistency.
8812
+ *
8813
+ * @param options - The validation options including result string, expectations, and format
8814
+ * @returns Validation result with processed string and validity status
8815
+ * @private internal function of `createPipelineExecutor` and `cacheLlmTools`
8816
+ */
8817
+ function validatePromptResult(options) {
8818
+ const { resultString, expectations, format } = options;
8819
+ let processedResultString = resultString;
8820
+ let validationError;
8821
+ try {
8822
+ // TODO: [๐Ÿ’] Unite object for expecting amount and format
8823
+ if (format) {
8824
+ if (format === 'JSON') {
8825
+ if (!isValidJsonString(processedResultString)) {
8826
+ // TODO: [๐Ÿข] Do more universally via `FormatParser`
8827
+ try {
8828
+ processedResultString = extractJsonBlock(processedResultString);
8829
+ }
8830
+ catch (error) {
8831
+ keepUnused(error);
8832
+ throw new ExpectError(spaceTrim$1((block) => `
8833
+ Expected valid JSON string
8834
+
8835
+ The expected JSON text:
8836
+ ${block(processedResultString)}
8837
+ `));
8838
+ }
8839
+ }
8840
+ }
8841
+ else {
8842
+ throw new UnexpectedError(`Unknown format "${format}"`);
8843
+ }
8844
+ }
8845
+ // TODO: [๐Ÿ’] Unite object for expecting amount and format
8846
+ if (expectations) {
8847
+ checkExpectations(expectations, processedResultString);
8848
+ }
8849
+ return {
8850
+ isValid: true,
8851
+ processedResultString,
8852
+ };
8853
+ }
8854
+ catch (error) {
8855
+ if (error instanceof ExpectError) {
8856
+ validationError = error;
8857
+ }
8858
+ else {
8859
+ // Re-throw non-ExpectError errors (like UnexpectedError)
8860
+ throw error;
8861
+ }
8862
+ return {
8863
+ isValid: false,
8864
+ processedResultString,
8865
+ error: validationError,
8866
+ };
8867
+ }
8868
+ }
8869
+
8694
8870
  /**
8695
8871
  * Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
8696
8872
  * (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
@@ -8713,13 +8889,13 @@ async function executeAttempts(options) {
8713
8889
  // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
8714
8890
  const _llms = arrayableToArray(tools.llm);
8715
8891
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8716
- attempts: for (let attempt = -jokerParameterNames.length; attempt < maxAttempts; attempt++) {
8717
- const isJokerAttempt = attempt < 0;
8718
- const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attempt];
8892
+ attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
8893
+ const isJokerAttempt = attemptIndex < 0;
8894
+ const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
8719
8895
  // TODO: [๐Ÿง ][๐Ÿญ] JOKERS, EXPECTATIONS, POSTPROCESSING and FOREACH
8720
8896
  if (isJokerAttempt && !jokerParameterName) {
8721
8897
  throw new UnexpectedError(spaceTrim$1((block) => `
8722
- Joker not found in attempt ${attempt}
8898
+ Joker not found in attempt ${attemptIndex}
8723
8899
 
8724
8900
  ${block(pipelineIdentification)}
8725
8901
  `));
@@ -8917,35 +9093,18 @@ async function executeAttempts(options) {
8917
9093
  }
8918
9094
  }
8919
9095
  // TODO: [๐Ÿ’] Unite object for expecting amount and format
8920
- if (task.format) {
8921
- if (task.format === 'JSON') {
8922
- if (!isValidJsonString($ongoingTaskResult.$resultString || '')) {
8923
- // TODO: [๐Ÿข] Do more universally via `FormatParser`
8924
- try {
8925
- $ongoingTaskResult.$resultString = extractJsonBlock($ongoingTaskResult.$resultString || '');
8926
- }
8927
- catch (error) {
8928
- keepUnused(error);
8929
- throw new ExpectError(spaceTrim$1((block) => `
8930
- Expected valid JSON string
8931
-
8932
- ${block(
8933
- /*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ '')}
8934
- `));
8935
- }
8936
- }
8937
- }
8938
- else {
8939
- throw new UnexpectedError(spaceTrim$1((block) => `
8940
- Unknown format "${task.format}"
8941
-
8942
- ${block(pipelineIdentification)}
8943
- `));
9096
+ // Use the common validation function for both format and expectations
9097
+ if (task.format || task.expectations) {
9098
+ const validationResult = validatePromptResult({
9099
+ resultString: $ongoingTaskResult.$resultString || '',
9100
+ expectations: task.expectations,
9101
+ format: task.format,
9102
+ });
9103
+ if (!validationResult.isValid) {
9104
+ throw validationResult.error;
8944
9105
  }
8945
- }
8946
- // TODO: [๐Ÿ’] Unite object for expecting amount and format
8947
- if (task.expectations) {
8948
- checkExpectations(task.expectations, $ongoingTaskResult.$resultString || '');
9106
+ // Update the result string in case format processing modified it (e.g., JSON extraction)
9107
+ $ongoingTaskResult.$resultString = validationResult.processedResultString;
8949
9108
  }
8950
9109
  break attempts;
8951
9110
  }
@@ -8959,6 +9118,7 @@ async function executeAttempts(options) {
8959
9118
  $ongoingTaskResult.$failedResults = [];
8960
9119
  }
8961
9120
  $ongoingTaskResult.$failedResults.push({
9121
+ attemptIndex,
8962
9122
  result: $ongoingTaskResult.$resultString,
8963
9123
  error: error,
8964
9124
  });
@@ -8983,19 +9143,13 @@ async function executeAttempts(options) {
8983
9143
  });
8984
9144
  }
8985
9145
  }
8986
- if ($ongoingTaskResult.$expectError !== null && attempt === maxAttempts - 1) {
8987
- // Store the current failure before throwing
8988
- $ongoingTaskResult.$failedResults = $ongoingTaskResult.$failedResults || [];
8989
- $ongoingTaskResult.$failedResults.push({
8990
- result: $ongoingTaskResult.$resultString,
8991
- error: $ongoingTaskResult.$expectError,
8992
- });
8993
- // Create a summary of all failures
9146
+ if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
9147
+ // Note: Create a summary of all failures
8994
9148
  const failuresSummary = $ongoingTaskResult.$failedResults
8995
- .map((failure, index) => spaceTrim$1((block) => {
9149
+ .map((failure) => spaceTrim$1((block) => {
8996
9150
  var _a, _b;
8997
9151
  return `
8998
- Attempt ${index + 1}:
9152
+ Attempt ${failure.attemptIndex + 1}:
8999
9153
  Error ${((_a = failure.error) === null || _a === void 0 ? void 0 : _a.name) || ''}:
9000
9154
  ${block((_b = failure.error) === null || _b === void 0 ? void 0 : _b.message.split('\n').map((line) => `> ${line}`).join('\n'))}
9001
9155
 
@@ -11802,6 +11956,7 @@ function cacheLlmTools(llmTools, options = {}) {
11802
11956
  },
11803
11957
  };
11804
11958
  const callCommonModel = async (prompt) => {
11959
+ var _a;
11805
11960
  const { parameters, content, modelRequirements } = prompt;
11806
11961
  // <- Note: These are relevant things from the prompt that the cache key should depend on.
11807
11962
  // TODO: Maybe some standalone function for normalization of content for cache
@@ -11857,11 +12012,42 @@ function cacheLlmTools(llmTools, options = {}) {
11857
12012
  // 1. It has a content property that is null or undefined
11858
12013
  // 2. It has an error property that is truthy
11859
12014
  // 3. It has a success property that is explicitly false
11860
- const isFailedResult = promptResult.content === null ||
12015
+ // 4. It doesn't meet the prompt's expectations or format requirements
12016
+ const isBasicFailedResult = promptResult.content === null ||
11861
12017
  promptResult.content === undefined ||
11862
12018
  promptResult.error ||
11863
12019
  promptResult.success === false;
11864
- if (!isFailedResult) {
12020
+ let shouldCache = !isBasicFailedResult;
12021
+ // If the basic result is valid, check against expectations and format
12022
+ if (shouldCache && promptResult.content) {
12023
+ try {
12024
+ const validationResult = validatePromptResult({
12025
+ resultString: promptResult.content,
12026
+ expectations: prompt.expectations,
12027
+ format: prompt.format,
12028
+ });
12029
+ shouldCache = validationResult.isValid;
12030
+ if (!shouldCache && isVerbose) {
12031
+ console.info('Not caching result that fails expectations/format validation for key:', key, {
12032
+ content: promptResult.content,
12033
+ expectations: prompt.expectations,
12034
+ format: prompt.format,
12035
+ validationError: (_a = validationResult.error) === null || _a === void 0 ? void 0 : _a.message,
12036
+ });
12037
+ }
12038
+ }
12039
+ catch (error) {
12040
+ // If validation throws an unexpected error, don't cache
12041
+ shouldCache = false;
12042
+ if (isVerbose) {
12043
+ console.info('Not caching result due to validation error for key:', key, {
12044
+ content: promptResult.content,
12045
+ validationError: error instanceof Error ? error.message : String(error),
12046
+ });
12047
+ }
12048
+ }
12049
+ }
12050
+ if (shouldCache) {
11865
12051
  await storage.setItem(key, {
11866
12052
  date: $getCurrentDate(),
11867
12053
  promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
@@ -11878,7 +12064,7 @@ function cacheLlmTools(llmTools, options = {}) {
11878
12064
  promptResult,
11879
12065
  });
11880
12066
  }
11881
- else if (isVerbose) {
12067
+ else if (isVerbose && isBasicFailedResult) {
11882
12068
  console.info('Not caching failed result for key:', key, {
11883
12069
  content: promptResult.content,
11884
12070
  error: promptResult.error,