@promptbook/wizard 0.100.0-25 → 0.100.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -38,7 +38,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
38
38
  * @generated
39
39
  * @see https://github.com/webgptorg/promptbook
40
40
  */
41
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-25';
41
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-28';
42
42
  /**
43
43
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
44
44
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2423,8 +2423,7 @@ class AnthropicClaudeExecutionTools {
2423
2423
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2424
2424
  const rawRequest = {
2425
2425
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
2426
- max_tokens: modelRequirements.maxTokens || 4096,
2427
- // <- TODO: [🌾] Make some global max cap for maxTokens
2426
+ max_tokens: modelRequirements.maxTokens || 8192,
2428
2427
  temperature: modelRequirements.temperature,
2429
2428
  system: modelRequirements.systemMessage,
2430
2429
  messages: [
@@ -2483,59 +2482,6 @@ class AnthropicClaudeExecutionTools {
2483
2482
  },
2484
2483
  });
2485
2484
  }
2486
- /**
2487
- * Calls Anthropic Claude API to use a completion model.
2488
- */
2489
- async callCompletionModel(prompt) {
2490
- if (this.options.isVerbose) {
2491
- console.info('🖋 Anthropic Claude callCompletionModel call');
2492
- }
2493
- const { content, parameters, modelRequirements } = prompt;
2494
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2495
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2496
- }
2497
- const client = await this.getClient();
2498
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2499
- const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2500
- const rawRequest = {
2501
- model: modelName,
2502
- max_tokens_to_sample: modelRequirements.maxTokens || 2000,
2503
- temperature: modelRequirements.temperature,
2504
- prompt: rawPromptContent,
2505
- };
2506
- const start = $getCurrentDate();
2507
- const rawResponse = await this.limiter
2508
- .schedule(() => client.completions.create(rawRequest))
2509
- .catch((error) => {
2510
- if (this.options.isVerbose) {
2511
- console.info(colors.bgRed('error'), error);
2512
- }
2513
- throw error;
2514
- });
2515
- if (this.options.isVerbose) {
2516
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2517
- }
2518
- if (!rawResponse.completion) {
2519
- throw new PipelineExecutionError('No completion from Anthropic Claude');
2520
- }
2521
- const resultContent = rawResponse.completion;
2522
- const complete = $getCurrentDate();
2523
- const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
2524
- return exportJson({
2525
- name: 'promptResult',
2526
- message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
2527
- order: [],
2528
- value: {
2529
- content: resultContent,
2530
- modelName: rawResponse.model || modelName,
2531
- timing: { start, complete },
2532
- usage,
2533
- rawPromptContent,
2534
- rawRequest,
2535
- rawResponse,
2536
- },
2537
- });
2538
- }
2539
2485
  // <- Note: [🤖] callXxxModel
2540
2486
  /**
2541
2487
  * Get the model that should be used as default
@@ -3241,7 +3187,6 @@ class AzureOpenAiExecutionTools {
3241
3187
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3242
3188
  const modelSettings = {
3243
3189
  maxTokens: modelRequirements.maxTokens,
3244
- // <- TODO: [🌾] Make some global max cap for maxTokens
3245
3190
  temperature: modelRequirements.temperature,
3246
3191
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3247
3192
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3347,8 +3292,7 @@ class AzureOpenAiExecutionTools {
3347
3292
  try {
3348
3293
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3349
3294
  const modelSettings = {
3350
- maxTokens: modelRequirements.maxTokens || 2000,
3351
- // <- TODO: [🌾] Make some global max cap for maxTokens
3295
+ maxTokens: modelRequirements.maxTokens,
3352
3296
  temperature: modelRequirements.temperature,
3353
3297
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3354
3298
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -4437,7 +4381,6 @@ class OpenAiCompatibleExecutionTools {
4437
4381
  const modelSettings = {
4438
4382
  model: modelName,
4439
4383
  max_tokens: modelRequirements.maxTokens,
4440
- // <- TODO: [🌾] Make some global max cap for maxTokens
4441
4384
  temperature: modelRequirements.temperature,
4442
4385
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4443
4386
  // <- Note: [🧆]
@@ -4533,8 +4476,7 @@ class OpenAiCompatibleExecutionTools {
4533
4476
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4534
4477
  const modelSettings = {
4535
4478
  model: modelName,
4536
- max_tokens: modelRequirements.maxTokens || 2000,
4537
- // <- TODO: [🌾] Make some global max cap for maxTokens
4479
+ max_tokens: modelRequirements.maxTokens,
4538
4480
  temperature: modelRequirements.temperature,
4539
4481
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4540
4482
  // <- Note: [🧆]
@@ -5253,8 +5195,6 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
5253
5195
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5254
5196
  const modelSettings = {
5255
5197
  model: modelName,
5256
- max_tokens: modelRequirements.maxTokens,
5257
- // <- TODO: [🌾] Make some global max cap for maxTokens
5258
5198
 
5259
5199
  temperature: modelRequirements.temperature,
5260
5200
 
@@ -8932,7 +8872,7 @@ function validatePromptResult(options) {
8932
8872
  */
8933
8873
  async function executeAttempts(options) {
8934
8874
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
8935
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
8875
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
8936
8876
  const $ongoingTaskResult = {
8937
8877
  $result: null,
8938
8878
  $resultString: null,
@@ -9176,6 +9116,10 @@ async function executeAttempts(options) {
9176
9116
  result: $ongoingTaskResult.$resultString,
9177
9117
  error: error,
9178
9118
  });
9119
+ // Report failed attempt
9120
+ onProgress({
9121
+ errors: [error],
9122
+ });
9179
9123
  }
9180
9124
  finally {
9181
9125
  if (!isJokerAttempt &&