@promptbook/wizard 0.100.0-24 → 0.100.0-26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -38,7 +38,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
38
38
  * @generated
39
39
  * @see https://github.com/webgptorg/promptbook
40
40
  */
41
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-24';
41
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-26';
42
42
  /**
43
43
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
44
44
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -183,6 +183,12 @@ const VALUE_STRINGS = {
183
183
  unserializable: '(unserializable value)',
184
184
  circular: '(circular JSON)',
185
185
  };
186
+ /**
187
+ * Default cap for the number of tokens in a single request to the LLM
188
+ *
189
+ * @public exported from `@promptbook/utils`
190
+ */
191
+ const MAX_TOKENS = 1048576;
186
192
  /**
187
193
  * Small number limit
188
194
  *
@@ -2423,8 +2429,7 @@ class AnthropicClaudeExecutionTools {
2423
2429
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2424
2430
  const rawRequest = {
2425
2431
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
2426
- max_tokens: modelRequirements.maxTokens || 4096,
2427
- // <- TODO: [🌾] Make some global max cap for maxTokens
2432
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
2428
2433
  temperature: modelRequirements.temperature,
2429
2434
  system: modelRequirements.systemMessage,
2430
2435
  messages: [
@@ -2499,7 +2504,7 @@ class AnthropicClaudeExecutionTools {
2499
2504
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2500
2505
  const rawRequest = {
2501
2506
  model: modelName,
2502
- max_tokens_to_sample: modelRequirements.maxTokens || 2000,
2507
+ max_tokens_to_sample: modelRequirements.maxTokens || MAX_TOKENS,
2503
2508
  temperature: modelRequirements.temperature,
2504
2509
  prompt: rawPromptContent,
2505
2510
  };
@@ -3240,8 +3245,7 @@ class AzureOpenAiExecutionTools {
3240
3245
  try {
3241
3246
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3242
3247
  const modelSettings = {
3243
- maxTokens: modelRequirements.maxTokens,
3244
- // <- TODO: [🌾] Make some global max cap for maxTokens
3248
+ maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
3245
3249
  temperature: modelRequirements.temperature,
3246
3250
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3247
3251
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3347,8 +3351,7 @@ class AzureOpenAiExecutionTools {
3347
3351
  try {
3348
3352
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3349
3353
  const modelSettings = {
3350
- maxTokens: modelRequirements.maxTokens || 2000,
3351
- // <- TODO: [🌾] Make some global max cap for maxTokens
3354
+ maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
3352
3355
  temperature: modelRequirements.temperature,
3353
3356
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3354
3357
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -4436,8 +4439,7 @@ class OpenAiCompatibleExecutionTools {
4436
4439
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
4437
4440
  const modelSettings = {
4438
4441
  model: modelName,
4439
- max_tokens: modelRequirements.maxTokens,
4440
- // <- TODO: [🌾] Make some global max cap for maxTokens
4442
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
4441
4443
  temperature: modelRequirements.temperature,
4442
4444
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4443
4445
  // <- Note: [🧆]
@@ -4533,8 +4535,7 @@ class OpenAiCompatibleExecutionTools {
4533
4535
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4534
4536
  const modelSettings = {
4535
4537
  model: modelName,
4536
- max_tokens: modelRequirements.maxTokens || 2000,
4537
- // <- TODO: [🌾] Make some global max cap for maxTokens
4538
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
4538
4539
  temperature: modelRequirements.temperature,
4539
4540
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4540
4541
  // <- Note: [🧆]
@@ -5253,8 +5254,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
5253
5254
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5254
5255
  const modelSettings = {
5255
5256
  model: modelName,
5256
- max_tokens: modelRequirements.maxTokens,
5257
- // <- TODO: [🌾] Make some global max cap for maxTokens
5257
+ max_tokens: MAX_TOKENS
5258
5258
 
5259
5259
  temperature: modelRequirements.temperature,
5260
5260
 
@@ -8932,7 +8932,7 @@ function validatePromptResult(options) {
8932
8932
  */
8933
8933
  async function executeAttempts(options) {
8934
8934
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
8935
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
8935
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
8936
8936
  const $ongoingTaskResult = {
8937
8937
  $result: null,
8938
8938
  $resultString: null,
@@ -9176,6 +9176,8 @@ async function executeAttempts(options) {
9176
9176
  result: $ongoingTaskResult.$resultString,
9177
9177
  error: error,
9178
9178
  });
9179
+ // Note: Calling void function to signal progress (mutation of `$ongoingTaskResult`) - TODO: !!!! Is this working
9180
+ onProgress({});
9179
9181
  }
9180
9182
  finally {
9181
9183
  if (!isJokerAttempt &&