@promptbook/cli 0.100.0-25 → 0.100.0-26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -47,7 +47,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
47
47
  * @generated
48
48
  * @see https://github.com/webgptorg/promptbook
49
49
  */
50
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-25';
50
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-26';
51
51
  /**
52
52
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
53
53
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -212,6 +212,12 @@ const VALUE_STRINGS = {
212
212
  unserializable: '(unserializable value)',
213
213
  circular: '(circular JSON)',
214
214
  };
215
+ /**
216
+ * Default cap for the number of tokens in a single request to the LLM
217
+ *
218
+ * @public exported from `@promptbook/utils`
219
+ */
220
+ const MAX_TOKENS = 1048576;
215
221
  /**
216
222
  * Small number limit
217
223
  *
@@ -6393,7 +6399,7 @@ function templateParameters(template, parameters) {
6393
6399
  */
6394
6400
  async function executeAttempts(options) {
6395
6401
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
6396
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
6402
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
6397
6403
  const $ongoingTaskResult = {
6398
6404
  $result: null,
6399
6405
  $resultString: null,
@@ -6637,6 +6643,8 @@ async function executeAttempts(options) {
6637
6643
  result: $ongoingTaskResult.$resultString,
6638
6644
  error: error,
6639
6645
  });
6646
+ // Note: Calling void function to signal progress (mutation of `$ongoingTaskResult`) - TODO: !!!! Is this working
6647
+ onProgress({});
6640
6648
  }
6641
6649
  finally {
6642
6650
  if (!isJokerAttempt &&
@@ -15613,8 +15621,7 @@ class AnthropicClaudeExecutionTools {
15613
15621
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
15614
15622
  const rawRequest = {
15615
15623
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
15616
- max_tokens: modelRequirements.maxTokens || 4096,
15617
- // <- TODO: [🌾] Make some global max cap for maxTokens
15624
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
15618
15625
  temperature: modelRequirements.temperature,
15619
15626
  system: modelRequirements.systemMessage,
15620
15627
  messages: [
@@ -15689,7 +15696,7 @@ class AnthropicClaudeExecutionTools {
15689
15696
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
15690
15697
  const rawRequest = {
15691
15698
  model: modelName,
15692
- max_tokens_to_sample: modelRequirements.maxTokens || 2000,
15699
+ max_tokens_to_sample: modelRequirements.maxTokens || MAX_TOKENS,
15693
15700
  temperature: modelRequirements.temperature,
15694
15701
  prompt: rawPromptContent,
15695
15702
  };
@@ -16430,8 +16437,7 @@ class AzureOpenAiExecutionTools {
16430
16437
  try {
16431
16438
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
16432
16439
  const modelSettings = {
16433
- maxTokens: modelRequirements.maxTokens,
16434
- // <- TODO: [🌾] Make some global max cap for maxTokens
16440
+ maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
16435
16441
  temperature: modelRequirements.temperature,
16436
16442
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
16437
16443
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -16537,8 +16543,7 @@ class AzureOpenAiExecutionTools {
16537
16543
  try {
16538
16544
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
16539
16545
  const modelSettings = {
16540
- maxTokens: modelRequirements.maxTokens || 2000,
16541
- // <- TODO: [🌾] Make some global max cap for maxTokens
16546
+ maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
16542
16547
  temperature: modelRequirements.temperature,
16543
16548
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
16544
16549
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -17568,8 +17573,7 @@ class OpenAiCompatibleExecutionTools {
17568
17573
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
17569
17574
  const modelSettings = {
17570
17575
  model: modelName,
17571
- max_tokens: modelRequirements.maxTokens,
17572
- // <- TODO: [🌾] Make some global max cap for maxTokens
17576
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
17573
17577
  temperature: modelRequirements.temperature,
17574
17578
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
17575
17579
  // <- Note: [🧆]
@@ -17665,8 +17669,7 @@ class OpenAiCompatibleExecutionTools {
17665
17669
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
17666
17670
  const modelSettings = {
17667
17671
  model: modelName,
17668
- max_tokens: modelRequirements.maxTokens || 2000,
17669
- // <- TODO: [🌾] Make some global max cap for maxTokens
17672
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
17670
17673
  temperature: modelRequirements.temperature,
17671
17674
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
17672
17675
  // <- Note: [🧆]
@@ -18385,8 +18388,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
18385
18388
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
18386
18389
  const modelSettings = {
18387
18390
  model: modelName,
18388
- max_tokens: modelRequirements.maxTokens,
18389
- // <- TODO: [🌾] Make some global max cap for maxTokens
18391
+ max_tokens: MAX_TOKENS
18390
18392
 
18391
18393
  temperature: modelRequirements.temperature,
18392
18394