@promptbook/core 0.112.0-12 → 0.112.0-15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/esm/index.es.js +275 -194
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/src/cli/cli-commands/coder/{find-fresh-emoji-tag.d.ts → find-fresh-emoji-tags.d.ts} +1 -1
  4. package/esm/src/cli/cli-commands/coder.d.ts +1 -1
  5. package/esm/src/commitments/USE_BROWSER/resolveRunBrowserToolForNode.d.ts +1 -1
  6. package/esm/src/commitments/USE_TIMEOUT/TimeoutToolNames.d.ts +1 -0
  7. package/esm/src/commitments/USE_TIMEOUT/TimeoutToolRuntimeAdapter.d.ts +51 -2
  8. package/esm/src/commitments/USE_TIMEOUT/USE_TIMEOUT.d.ts +2 -2
  9. package/esm/src/commitments/USE_TIMEOUT/getTimeoutToolRuntimeAdapterOrDisabledResult.d.ts +2 -2
  10. package/esm/src/commitments/USE_TIMEOUT/parseTimeoutToolArgs.d.ts +14 -1
  11. package/esm/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +1 -1
  12. package/esm/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  13. package/esm/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  14. package/esm/src/llm-providers/google/google-models.d.ts +1 -1
  15. package/esm/src/llm-providers/openai/openai-models.d.ts +1 -1
  16. package/esm/src/scrapers/_boilerplate/BoilerplateScraper.d.ts +1 -2
  17. package/esm/src/scrapers/document/DocumentScraper.d.ts +1 -2
  18. package/esm/src/scrapers/document-legacy/LegacyDocumentScraper.d.ts +1 -2
  19. package/esm/src/scripting/javascript/postprocessing-functions.d.ts +1 -1
  20. package/esm/src/utils/parameters/mapAvailableToExpectedParameters.d.ts +1 -2
  21. package/esm/src/version.d.ts +1 -1
  22. package/package.json +1 -1
  23. package/umd/index.umd.js +472 -392
  24. package/umd/index.umd.js.map +1 -1
  25. package/umd/src/cli/cli-commands/coder/{find-fresh-emoji-tag.d.ts → find-fresh-emoji-tags.d.ts} +1 -1
  26. package/umd/src/cli/cli-commands/coder.d.ts +1 -1
  27. package/umd/src/commitments/USE_BROWSER/resolveRunBrowserToolForNode.d.ts +1 -1
  28. package/umd/src/commitments/USE_TIMEOUT/TimeoutToolNames.d.ts +1 -0
  29. package/umd/src/commitments/USE_TIMEOUT/TimeoutToolRuntimeAdapter.d.ts +51 -2
  30. package/umd/src/commitments/USE_TIMEOUT/USE_TIMEOUT.d.ts +2 -2
  31. package/umd/src/commitments/USE_TIMEOUT/getTimeoutToolRuntimeAdapterOrDisabledResult.d.ts +2 -2
  32. package/umd/src/commitments/USE_TIMEOUT/parseTimeoutToolArgs.d.ts +14 -1
  33. package/umd/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +1 -1
  34. package/umd/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  35. package/umd/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  36. package/umd/src/llm-providers/google/google-models.d.ts +1 -1
  37. package/umd/src/llm-providers/openai/openai-models.d.ts +1 -1
  38. package/umd/src/scrapers/_boilerplate/BoilerplateScraper.d.ts +1 -2
  39. package/umd/src/scrapers/document/DocumentScraper.d.ts +1 -2
  40. package/umd/src/scrapers/document-legacy/LegacyDocumentScraper.d.ts +1 -2
  41. package/umd/src/scripting/javascript/postprocessing-functions.d.ts +1 -1
  42. package/umd/src/utils/parameters/mapAvailableToExpectedParameters.d.ts +1 -2
  43. package/umd/src/version.d.ts +1 -1
package/esm/index.es.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import { SHA256 } from 'crypto-js';
2
2
  import hexEncoder from 'crypto-js/enc-hex';
3
- import spaceTrim$2, { spaceTrim as spaceTrim$1 } from 'spacetrim';
3
+ import { spaceTrim as spaceTrim$1 } from 'spacetrim';
4
4
  import { randomBytes } from 'crypto';
5
5
  import { Subject, BehaviorSubject } from 'rxjs';
6
6
  import { forTime } from 'waitasecond';
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.112.0-12';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.112.0-15';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1353,7 +1353,7 @@ false);
1353
1353
  function getErrorReportUrl(error) {
1354
1354
  const report = {
1355
1355
  title: `🐜 Error report from ${NAME}`,
1356
- body: spaceTrim$2((block) => `
1356
+ body: spaceTrim$1((block) => `
1357
1357
 
1358
1358
 
1359
1359
  \`${error.name || 'Error'}\` has occurred in the [${NAME}], please look into it @${ADMIN_GITHUB_NAME}.
@@ -1548,7 +1548,7 @@ function valueToString(value) {
1548
1548
  * @public exported from `@promptbook/utils`
1549
1549
  */
1550
1550
  function computeHash(value) {
1551
- return SHA256(hexEncoder.parse(spaceTrim$2(valueToString(value)))).toString( /* hex */);
1551
+ return SHA256(hexEncoder.parse(spaceTrim$1(valueToString(value)))).toString( /* hex */);
1552
1552
  }
1553
1553
  /**
1554
1554
  * TODO: [🥬][🥬] Use this ACRY
@@ -1870,7 +1870,7 @@ function pipelineJsonToString(pipelineJson) {
1870
1870
  pipelineString += '\n\n';
1871
1871
  pipelineString += '```' + contentLanguage;
1872
1872
  pipelineString += '\n';
1873
- pipelineString += spaceTrim$2(content);
1873
+ pipelineString += spaceTrim$1(content);
1874
1874
  // <- TODO: [main] !!3 Escape
1875
1875
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
1876
1876
  pipelineString += '\n';
@@ -1991,7 +1991,7 @@ function checkSerializableAsJson(options) {
1991
1991
  }
1992
1992
  else if (typeof value === 'object') {
1993
1993
  if (value instanceof Date) {
1994
- throw new UnexpectedError(spaceTrim$2((block) => `
1994
+ throw new UnexpectedError(spaceTrim$1((block) => `
1995
1995
  \`${name}\` is Date
1996
1996
 
1997
1997
  Use \`string_date_iso8601\` instead
@@ -2010,7 +2010,7 @@ function checkSerializableAsJson(options) {
2010
2010
  throw new UnexpectedError(`${name} is RegExp`);
2011
2011
  }
2012
2012
  else if (value instanceof Error) {
2013
- throw new UnexpectedError(spaceTrim$2((block) => `
2013
+ throw new UnexpectedError(spaceTrim$1((block) => `
2014
2014
  \`${name}\` is unserialized Error
2015
2015
 
2016
2016
  Use function \`serializeError\`
@@ -2033,7 +2033,7 @@ function checkSerializableAsJson(options) {
2033
2033
  }
2034
2034
  catch (error) {
2035
2035
  assertsError(error);
2036
- throw new UnexpectedError(spaceTrim$2((block) => `
2036
+ throw new UnexpectedError(spaceTrim$1((block) => `
2037
2037
  \`${name}\` is not serializable
2038
2038
 
2039
2039
  ${block(error.stack || error.message)}
@@ -2065,7 +2065,7 @@ function checkSerializableAsJson(options) {
2065
2065
  }
2066
2066
  }
2067
2067
  else {
2068
- throw new UnexpectedError(spaceTrim$2((block) => `
2068
+ throw new UnexpectedError(spaceTrim$1((block) => `
2069
2069
  \`${name}\` is unknown type
2070
2070
 
2071
2071
  Additional message for \`${name}\`:
@@ -3314,7 +3314,7 @@ function serializeError(error) {
3314
3314
  const { name, message, stack } = error;
3315
3315
  const { id } = error;
3316
3316
  if (!Object.keys(ALL_ERRORS).includes(name)) {
3317
- console.error(spaceTrim$2((block) => `
3317
+ console.error(spaceTrim$1((block) => `
3318
3318
 
3319
3319
  Cannot serialize error with name "${name}"
3320
3320
 
@@ -3347,7 +3347,7 @@ function jsonParse(value) {
3347
3347
  }
3348
3348
  else if (typeof value !== 'string') {
3349
3349
  console.error('Can not parse JSON from non-string value.', { text: value });
3350
- throw new Error(spaceTrim$2(`
3350
+ throw new Error(spaceTrim$1(`
3351
3351
  Can not parse JSON from non-string value.
3352
3352
 
3353
3353
  The value type: ${typeof value}
@@ -3361,7 +3361,7 @@ function jsonParse(value) {
3361
3361
  if (!(error instanceof Error)) {
3362
3362
  throw error;
3363
3363
  }
3364
- throw new Error(spaceTrim$2((block) => `
3364
+ throw new Error(spaceTrim$1((block) => `
3365
3365
  ${block(error.message)}
3366
3366
 
3367
3367
  The expected JSON text:
@@ -3414,7 +3414,7 @@ function deserializeError(error, isStackAddedToMessage = true) {
3414
3414
  message = `${name}: ${message}`;
3415
3415
  }
3416
3416
  if (isStackAddedToMessage && stack !== undefined && stack !== '') {
3417
- message = spaceTrim$2((block) => `
3417
+ message = spaceTrim$1((block) => `
3418
3418
  ${block(message)}
3419
3419
 
3420
3420
  Original stack trace:
@@ -3964,14 +3964,14 @@ class MultipleLlmExecutionTools {
3964
3964
  if (description === undefined) {
3965
3965
  return headLine;
3966
3966
  }
3967
- return spaceTrim$2((block) => `
3967
+ return spaceTrim$1((block) => `
3968
3968
  ${headLine}
3969
3969
 
3970
3970
  ${ /* <- Note: Indenting the description: */block(description)}
3971
3971
  `);
3972
3972
  })
3973
3973
  .join('\n\n');
3974
- return spaceTrim$2((block) => `
3974
+ return spaceTrim$1((block) => `
3975
3975
  Multiple LLM Providers:
3976
3976
 
3977
3977
  ${block(innerModelsTitlesAndDescriptions)}
@@ -4073,7 +4073,7 @@ class MultipleLlmExecutionTools {
4073
4073
  // 1) OpenAI throw PipelineExecutionError: Parameter `{knowledge}` is not defined
4074
4074
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
4075
4075
  // 3) ...
4076
- spaceTrim$2((block) => `
4076
+ spaceTrim$1((block) => `
4077
4077
  All execution tools of ${this.title} failed:
4078
4078
 
4079
4079
  ${block(errors
@@ -4086,7 +4086,7 @@ class MultipleLlmExecutionTools {
4086
4086
  throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
4087
4087
  }
4088
4088
  else {
4089
- throw new PipelineExecutionError(spaceTrim$2((block) => `
4089
+ throw new PipelineExecutionError(spaceTrim$1((block) => `
4090
4090
  You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
4091
4091
 
4092
4092
  Available \`LlmExecutionTools\`:
@@ -4123,7 +4123,7 @@ class MultipleLlmExecutionTools {
4123
4123
  */
4124
4124
  function joinLlmExecutionTools(title, ...llmExecutionTools) {
4125
4125
  if (llmExecutionTools.length === 0) {
4126
- const warningMessage = spaceTrim$2(`
4126
+ const warningMessage = spaceTrim$1(`
4127
4127
  You have not provided any \`LlmExecutionTools\`
4128
4128
  This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.
4129
4129
 
@@ -4378,14 +4378,14 @@ function $registeredScrapersMessage(availableScrapers) {
4378
4378
  return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
4379
4379
  });
4380
4380
  if (metadata.length === 0) {
4381
- return spaceTrim$2(`
4381
+ return spaceTrim$1(`
4382
4382
  **No scrapers are available**
4383
4383
 
4384
4384
  This is a unexpected behavior, you are probably using some broken version of Promptbook
4385
4385
  At least there should be available the metadata of the scrapers
4386
4386
  `);
4387
4387
  }
4388
- return spaceTrim$2((block) => `
4388
+ return spaceTrim$1((block) => `
4389
4389
  Available scrapers are:
4390
4390
  ${block(metadata
4391
4391
  .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
@@ -4891,7 +4891,7 @@ const promptbookFetch = async (urlOrRequest, init) => {
4891
4891
  else if (urlOrRequest instanceof Request) {
4892
4892
  url = urlOrRequest.url;
4893
4893
  }
4894
- throw new PromptbookFetchError(spaceTrim$2((block) => `
4894
+ throw new PromptbookFetchError(spaceTrim$1((block) => `
4895
4895
  Can not fetch "${url}"
4896
4896
 
4897
4897
  Fetch error:
@@ -5051,7 +5051,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
5051
5051
  const fileExtension = getFileExtension(filename);
5052
5052
  const mimeType = extensionToMimeType(fileExtension || '');
5053
5053
  if (!(await isFileExisting(filename, tools.fs))) {
5054
- throw new NotFoundError(spaceTrim$2((block) => `
5054
+ throw new NotFoundError(spaceTrim$1((block) => `
5055
5055
  Can not make source handler for file which does not exist:
5056
5056
 
5057
5057
  File:
@@ -5144,7 +5144,7 @@ async function prepareKnowledgePieces(knowledgeSources, tools, options) {
5144
5144
  // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
5145
5145
  break;
5146
5146
  }
5147
- console.warn(spaceTrim$2((block) => `
5147
+ console.warn(spaceTrim$1((block) => `
5148
5148
  Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
5149
5149
 
5150
5150
  The source:
@@ -5160,7 +5160,7 @@ async function prepareKnowledgePieces(knowledgeSources, tools, options) {
5160
5160
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
5161
5161
  }
5162
5162
  if (partialPieces === null) {
5163
- throw new KnowledgeScrapeError(spaceTrim$2((block) => `
5163
+ throw new KnowledgeScrapeError(spaceTrim$1((block) => `
5164
5164
  Cannot scrape knowledge
5165
5165
 
5166
5166
  The source:
@@ -5654,7 +5654,7 @@ const CsvFormatParser = {
5654
5654
  const { value, outputParameterName, settings, mapCallback, onProgress } = options;
5655
5655
  const csv = csvParse(value, settings);
5656
5656
  if (csv.errors.length !== 0) {
5657
- throw new CsvFormatError(spaceTrim$2((block) => `
5657
+ throw new CsvFormatError(spaceTrim$1((block) => `
5658
5658
  CSV parsing error
5659
5659
 
5660
5660
  Error(s) from CSV parsing:
@@ -5699,7 +5699,7 @@ const CsvFormatParser = {
5699
5699
  const { value, settings, mapCallback, onProgress } = options;
5700
5700
  const csv = csvParse(value, settings);
5701
5701
  if (csv.errors.length !== 0) {
5702
- throw new CsvFormatError(spaceTrim$2((block) => `
5702
+ throw new CsvFormatError(spaceTrim$1((block) => `
5703
5703
  CSV parsing error
5704
5704
 
5705
5705
  Error(s) from CSV parsing:
@@ -5909,7 +5909,7 @@ function mapAvailableToExpectedParameters(options) {
5909
5909
  }
5910
5910
  // Phase 2️⃣: Non-matching mapping
5911
5911
  if (expectedParameterNames.size !== availableParametersNames.size) {
5912
- throw new PipelineExecutionError(spaceTrim$2((block) => `
5912
+ throw new PipelineExecutionError(spaceTrim$1((block) => `
5913
5913
  Can not map available parameters to expected parameters
5914
5914
 
5915
5915
  Mapped parameters:
@@ -6734,7 +6734,7 @@ async function executeFormatSubvalues(options) {
6734
6734
  return /* not await */ executeAttempts({ ...options, logLlmCall });
6735
6735
  }
6736
6736
  if (jokerParameterNames.length !== 0) {
6737
- throw new UnexpectedError(spaceTrim$2((block) => `
6737
+ throw new UnexpectedError(spaceTrim$1((block) => `
6738
6738
  JOKER parameters are not supported together with FOREACH command
6739
6739
 
6740
6740
  [🧞‍♀️] This should be prevented in \`validatePipeline\`
@@ -6747,7 +6747,7 @@ async function executeFormatSubvalues(options) {
6747
6747
  if (formatDefinition === undefined) {
6748
6748
  throw new UnexpectedError(
6749
6749
  // <- TODO: [🧠][🧐] Should be formats fixed per promptbook version or behave as plugins (=> change UnexpectedError)
6750
- spaceTrim$2((block) => `
6750
+ spaceTrim$1((block) => `
6751
6751
  Unsupported format "${task.foreach.formatName}"
6752
6752
 
6753
6753
  Available formats:
@@ -6764,7 +6764,7 @@ async function executeFormatSubvalues(options) {
6764
6764
  if (subvalueParser === undefined) {
6765
6765
  throw new UnexpectedError(
6766
6766
  // <- TODO: [🧠][🧐] Should be formats fixed per promptbook version or behave as plugins (=> change UnexpectedError)
6767
- spaceTrim$2((block) => `
6767
+ spaceTrim$1((block) => `
6768
6768
  Unsupported subformat name "${task.foreach.subformatName}" for format "${task.foreach.formatName}"
6769
6769
 
6770
6770
  Available subformat names for format "${formatDefinition.formatName}":
@@ -6804,7 +6804,7 @@ async function executeFormatSubvalues(options) {
6804
6804
  if (!(error instanceof PipelineExecutionError)) {
6805
6805
  throw error;
6806
6806
  }
6807
- const highLevelError = new PipelineExecutionError(spaceTrim$2((block) => `
6807
+ const highLevelError = new PipelineExecutionError(spaceTrim$1((block) => `
6808
6808
  ${error.message}
6809
6809
 
6810
6810
  This is error in FOREACH command when mapping ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
@@ -6828,7 +6828,7 @@ async function executeFormatSubvalues(options) {
6828
6828
  ...options,
6829
6829
  priority: priority + index,
6830
6830
  parameters: allSubparameters,
6831
- pipelineIdentification: spaceTrim$2((block) => `
6831
+ pipelineIdentification: spaceTrim$1((block) => `
6832
6832
  ${block(pipelineIdentification)}
6833
6833
  Subparameter index: ${index}
6834
6834
  `),
@@ -6837,7 +6837,7 @@ async function executeFormatSubvalues(options) {
6837
6837
  }
6838
6838
  catch (error) {
6839
6839
  if (length > BIG_DATASET_TRESHOLD) {
6840
- console.error(spaceTrim$2((block) => `
6840
+ console.error(spaceTrim$1((block) => `
6841
6841
  ${error.message}
6842
6842
 
6843
6843
  This is error in FOREACH command when processing ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
@@ -8834,7 +8834,7 @@ function buildParametersSection(items) {
8834
8834
  const entries = items
8835
8835
  .flatMap((item) => formatParameterListItem(item).split(/\r?\n/))
8836
8836
  .filter((line) => line !== '');
8837
- return spaceTrim$2((block) => `
8837
+ return spaceTrim$1((block) => `
8838
8838
  **Parameters:**
8839
8839
  ${block(entries.join('\n'))}
8840
8840
 
@@ -8907,7 +8907,7 @@ function isPromptString(value) {
8907
8907
  */
8908
8908
  function prompt(strings, ...values) {
8909
8909
  if (values.length === 0) {
8910
- return new PromptString(spaceTrim$2(strings.join('')));
8910
+ return new PromptString(spaceTrim$1(strings.join('')));
8911
8911
  }
8912
8912
  const stringsWithHiddenParameters = strings.map((stringsItem) => ParameterEscaping.hideBrackets(stringsItem));
8913
8913
  const parameterMetadata = values.map((value) => {
@@ -8948,7 +8948,7 @@ function prompt(strings, ...values) {
8948
8948
  ? `${result}${stringsItem}`
8949
8949
  : `${result}${stringsItem}${ParameterSection.formatParameterPlaceholder(parameterName)}`;
8950
8950
  }, '');
8951
- pipelineString = spaceTrim$2(pipelineString);
8951
+ pipelineString = spaceTrim$1(pipelineString);
8952
8952
  try {
8953
8953
  pipelineString = templateParameters(pipelineString, parameters);
8954
8954
  }
@@ -8957,7 +8957,7 @@ function prompt(strings, ...values) {
8957
8957
  throw error;
8958
8958
  }
8959
8959
  console.error({ pipelineString, parameters, parameterNames: parameterNamesOrdered, error });
8960
- throw new UnexpectedError(spaceTrim$2((block) => `
8960
+ throw new UnexpectedError(spaceTrim$1((block) => `
8961
8961
  Internal error in prompt template literal
8962
8962
 
8963
8963
  ${block(JSON.stringify({ strings, values }, null, 4))}}
@@ -19795,9 +19795,9 @@ function createTimeoutSystemMessage(extraInstructions) {
19795
19795
  return spaceTrim$1((block) => `
19796
19796
  Timeout scheduling:
19797
19797
  - Use "set_timeout" to wake this same chat thread in the future.
19798
- - Timers are thread-scoped, not global for the whole agent.
19798
+ - Use "list_timeouts" to review timeouts across all chats for the same user+agent scope.
19799
+ - "cancel_timeout" accepts a timeout id from any chat in this same user+agent scope.
19799
19800
  - When one timeout elapses, you will receive a new user-like message that explicitly says it is a timeout wake-up and includes the \`timeoutId\`.
19800
- - Use "cancel_timeout" when a previously scheduled timeout is no longer relevant.
19801
19801
  - Do not claim a timer was set or cancelled unless the tool confirms it.
19802
19802
  ${block(extraInstructions)}
19803
19803
  `);
@@ -19858,13 +19858,6 @@ function parseToolExecutionEnvelope(rawValue) {
19858
19858
  * @private internal utility of USE TIMEOUT
19859
19859
  */
19860
19860
  function createDisabledTimeoutResult(action, message) {
19861
- if (action === 'set') {
19862
- return {
19863
- action,
19864
- status: 'disabled',
19865
- message,
19866
- };
19867
- }
19868
19861
  return {
19869
19862
  action,
19870
19863
  status: 'disabled',
@@ -19891,6 +19884,18 @@ function getTimeoutToolRuntimeAdapterOrDisabledResult(action, runtimeContext) {
19891
19884
  }
19892
19885
  }
19893
19886
 
19887
+ /**
19888
+ * Default number of rows returned by `list_timeouts`.
19889
+ *
19890
+ * @private internal USE TIMEOUT constant
19891
+ */
19892
+ const DEFAULT_LIST_TIMEOUTS_LIMIT = 20;
19893
+ /**
19894
+ * Hard cap for `list_timeouts` page size.
19895
+ *
19896
+ * @private internal USE TIMEOUT constant
19897
+ */
19898
+ const MAX_LIST_TIMEOUTS_LIMIT = 100;
19894
19899
  /**
19895
19900
  * Parses and validates `USE TIMEOUT` tool arguments.
19896
19901
  *
@@ -19925,6 +19930,31 @@ const parseTimeoutToolArgs = {
19925
19930
  }
19926
19931
  return { timeoutId };
19927
19932
  },
19933
+ /**
19934
+ * Parses `list_timeouts` input.
19935
+ */
19936
+ list(args) {
19937
+ if (args.includeFinished !== undefined && typeof args.includeFinished !== 'boolean') {
19938
+ throw new PipelineExecutionError(spaceTrim$1(`
19939
+ Timeout \`includeFinished\` must be a boolean when provided.
19940
+ `));
19941
+ }
19942
+ const parsedLimit = args.limit === undefined ? DEFAULT_LIST_TIMEOUTS_LIMIT : Math.floor(Number(args.limit));
19943
+ if (!Number.isFinite(parsedLimit) || parsedLimit <= 0) {
19944
+ throw new PipelineExecutionError(spaceTrim$1(`
19945
+ Timeout \`limit\` must be a positive number.
19946
+ `));
19947
+ }
19948
+ if (parsedLimit > MAX_LIST_TIMEOUTS_LIMIT) {
19949
+ throw new PipelineExecutionError(spaceTrim$1(`
19950
+ Timeout \`limit\` must be at most \`${MAX_LIST_TIMEOUTS_LIMIT}\`.
19951
+ `));
19952
+ }
19953
+ return {
19954
+ includeFinished: args.includeFinished === true,
19955
+ limit: parsedLimit,
19956
+ };
19957
+ },
19928
19958
  };
19929
19959
 
19930
19960
  /**
@@ -19935,6 +19965,7 @@ const parseTimeoutToolArgs = {
19935
19965
  const TimeoutToolNames = {
19936
19966
  set: 'set_timeout',
19937
19967
  cancel: 'cancel_timeout',
19968
+ list: 'list_timeouts',
19938
19969
  };
19939
19970
 
19940
19971
  /**
@@ -20034,6 +20065,35 @@ function createTimeoutToolFunctions() {
20034
20065
  return JSON.stringify(result);
20035
20066
  }
20036
20067
  },
20068
+ async [TimeoutToolNames.list](args) {
20069
+ const runtimeContext = resolveTimeoutRuntimeContext(args);
20070
+ const { adapter, disabledResult } = getTimeoutToolRuntimeAdapterOrDisabledResult('list', runtimeContext);
20071
+ if (!adapter || disabledResult) {
20072
+ return JSON.stringify(disabledResult);
20073
+ }
20074
+ try {
20075
+ const parsedArgs = parseTimeoutToolArgs.list(args);
20076
+ const listedTimeouts = await adapter.listTimeouts(parsedArgs, runtimeContext);
20077
+ const result = {
20078
+ action: 'list',
20079
+ status: 'listed',
20080
+ items: listedTimeouts.items,
20081
+ total: listedTimeouts.total,
20082
+ };
20083
+ return createToolExecutionEnvelope({
20084
+ assistantMessage: listedTimeouts.total === 1 ? 'Found 1 timeout.' : `Found ${listedTimeouts.total} timeouts.`,
20085
+ toolResult: result,
20086
+ });
20087
+ }
20088
+ catch (error) {
20089
+ const result = {
20090
+ action: 'list',
20091
+ status: 'error',
20092
+ message: error instanceof Error ? error.message : String(error),
20093
+ };
20094
+ return JSON.stringify(result);
20095
+ }
20096
+ },
20037
20097
  };
20038
20098
  }
20039
20099
 
@@ -20067,26 +20127,45 @@ function createTimeoutTools(existingTools = []) {
20067
20127
  if (!tools.some((tool) => tool.name === TimeoutToolNames.cancel)) {
20068
20128
  tools.push({
20069
20129
  name: TimeoutToolNames.cancel,
20070
- description: 'Cancel one previously scheduled timeout in the current chat thread.',
20130
+ description: 'Cancel one previously scheduled timeout within the same user+agent scope, even if it was set in another chat.',
20071
20131
  parameters: {
20072
20132
  type: 'object',
20073
20133
  properties: {
20074
20134
  timeoutId: {
20075
20135
  type: 'string',
20076
- description: 'Identifier returned earlier by `set_timeout`.',
20136
+ description: 'Identifier returned earlier by `set_timeout` or `list_timeouts`.',
20077
20137
  },
20078
20138
  },
20079
20139
  required: ['timeoutId'],
20080
20140
  },
20081
20141
  });
20082
20142
  }
20143
+ if (!tools.some((tool) => tool.name === TimeoutToolNames.list)) {
20144
+ tools.push({
20145
+ name: TimeoutToolNames.list,
20146
+ description: 'List scheduled timeouts across all chats for this same user+agent scope so they can be reviewed or cancelled.',
20147
+ parameters: {
20148
+ type: 'object',
20149
+ properties: {
20150
+ includeFinished: {
20151
+ type: 'boolean',
20152
+ description: 'When true, include completed, failed, and cancelled rows in addition to active timeouts.',
20153
+ },
20154
+ limit: {
20155
+ type: 'number',
20156
+ description: 'Maximum number of rows to return (default 20, max 100).',
20157
+ },
20158
+ },
20159
+ },
20160
+ });
20161
+ }
20083
20162
  return tools;
20084
20163
  }
20085
20164
 
20086
20165
  /**
20087
20166
  * `USE TIMEOUT` commitment definition.
20088
20167
  *
20089
- * The `USE TIMEOUT` commitment enables thread-scoped timers that wake the same chat later.
20168
+ * The `USE TIMEOUT` commitment enables timeout wake-ups and scoped timeout management.
20090
20169
  *
20091
20170
  * @private [🪔] Maybe export the commitments through some package
20092
20171
  */
@@ -20101,7 +20180,7 @@ class UseTimeoutCommitmentDefinition extends BaseCommitmentDefinition {
20101
20180
  * Short one-line description of `USE TIMEOUT`.
20102
20181
  */
20103
20182
  get description() {
20104
- return 'Enable thread-scoped timers that can wake the same chat in the future.';
20183
+ return 'Enable timeout wake-ups plus scoped timeout listing/cancellation across chats.';
20105
20184
  }
20106
20185
  /**
20107
20186
  * Icon for this commitment.
@@ -20116,14 +20195,15 @@ class UseTimeoutCommitmentDefinition extends BaseCommitmentDefinition {
20116
20195
  return spaceTrim$1(`
20117
20196
  # USE TIMEOUT
20118
20197
 
20119
- Enables the agent to schedule thread-scoped timeout wake-ups.
20198
+ Enables timeout wake-ups and timeout management for the same user+agent scope.
20120
20199
 
20121
20200
  ## Key aspects
20122
20201
 
20123
20202
  - The agent uses \`set_timeout\` to schedule a future wake-up in the same chat thread.
20124
20203
  - The tool returns immediately while the timeout is stored and executed by the runtime later.
20125
20204
  - The wake-up arrives as a new user-like timeout message in the same conversation.
20126
- - The agent can cancel an existing timeout by \`timeoutId\` via \`cancel_timeout\`.
20205
+ - The agent can inspect known timeouts via \`list_timeouts\`.
20206
+ - The agent can cancel an existing timeout by \`timeoutId\` via \`cancel_timeout\`, including timeouts created in another chat.
20127
20207
  - Commitment content is treated as optional timeout policy instructions.
20128
20208
 
20129
20209
  ## Examples
@@ -20152,6 +20232,7 @@ class UseTimeoutCommitmentDefinition extends BaseCommitmentDefinition {
20152
20232
  return {
20153
20233
  [TimeoutToolNames.set]: 'Set timer',
20154
20234
  [TimeoutToolNames.cancel]: 'Cancel timer',
20235
+ [TimeoutToolNames.list]: 'List timers',
20155
20236
  };
20156
20237
  }
20157
20238
  /**
@@ -21555,7 +21636,7 @@ function hasHttpProtocol(value) {
21555
21636
  * @public exported from `@promptbook/core`
21556
21637
  */
21557
21638
  function normalizeAgentName(rawAgentName) {
21558
- return titleToName(spaceTrim$2(rawAgentName));
21639
+ return titleToName(spaceTrim$1(rawAgentName));
21559
21640
  }
21560
21641
 
21561
21642
  /**
@@ -21730,7 +21811,7 @@ function parseAgentSource(agentSource) {
21730
21811
  continue;
21731
21812
  }
21732
21813
  if (commitment.type === 'FROM') {
21733
- const content = spaceTrim$2(commitment.content).split(/\r?\n/)[0] || '';
21814
+ const content = spaceTrim$1(commitment.content).split(/\r?\n/)[0] || '';
21734
21815
  if (content === 'Adam' || content === '' /* <- Note: Adam is implicit */) {
21735
21816
  continue;
21736
21817
  }
@@ -21753,7 +21834,7 @@ function parseAgentSource(agentSource) {
21753
21834
  continue;
21754
21835
  }
21755
21836
  if (commitment.type === 'IMPORT') {
21756
- const content = spaceTrim$2(commitment.content).split(/\r?\n/)[0] || '';
21837
+ const content = spaceTrim$1(commitment.content).split(/\r?\n/)[0] || '';
21757
21838
  let label = content;
21758
21839
  let iconName = 'ExternalLink'; // Import remote
21759
21840
  try {
@@ -21791,7 +21872,7 @@ function parseAgentSource(agentSource) {
21791
21872
  continue;
21792
21873
  }
21793
21874
  if (commitment.type === 'KNOWLEDGE') {
21794
- const content = spaceTrim$2(commitment.content);
21875
+ const content = spaceTrim$1(commitment.content);
21795
21876
  const extractedUrls = extractUrlsFromText(content);
21796
21877
  let label = content;
21797
21878
  let iconName = 'Book';
@@ -21850,7 +21931,7 @@ function parseAgentSource(agentSource) {
21850
21931
  continue;
21851
21932
  }
21852
21933
  if (commitment.type === 'META LINK') {
21853
- const linkValue = spaceTrim$2(commitment.content);
21934
+ const linkValue = spaceTrim$1(commitment.content);
21854
21935
  links.push(linkValue);
21855
21936
  meta.link = linkValue;
21856
21937
  continue;
@@ -21860,11 +21941,11 @@ function parseAgentSource(agentSource) {
21860
21941
  continue;
21861
21942
  }
21862
21943
  if (commitment.type === 'META IMAGE') {
21863
- meta.image = spaceTrim$2(commitment.content);
21944
+ meta.image = spaceTrim$1(commitment.content);
21864
21945
  continue;
21865
21946
  }
21866
21947
  if (commitment.type === 'META DESCRIPTION') {
21867
- meta.description = spaceTrim$2(commitment.content);
21948
+ meta.description = spaceTrim$1(commitment.content);
21868
21949
  continue;
21869
21950
  }
21870
21951
  if (commitment.type === 'META DISCLAIMER') {
@@ -21872,7 +21953,7 @@ function parseAgentSource(agentSource) {
21872
21953
  continue;
21873
21954
  }
21874
21955
  if (commitment.type === 'META INPUT PLACEHOLDER') {
21875
- meta.inputPlaceholder = spaceTrim$2(commitment.content);
21956
+ meta.inputPlaceholder = spaceTrim$1(commitment.content);
21876
21957
  continue;
21877
21958
  }
21878
21959
  if (commitment.type === 'MESSAGE SUFFIX') {
@@ -21888,7 +21969,7 @@ function parseAgentSource(agentSource) {
21888
21969
  continue;
21889
21970
  }
21890
21971
  if (commitment.type === 'META VOICE') {
21891
- meta.voice = spaceTrim$2(commitment.content);
21972
+ meta.voice = spaceTrim$1(commitment.content);
21892
21973
  continue;
21893
21974
  }
21894
21975
  if (commitment.type !== 'META') {
@@ -21897,10 +21978,10 @@ function parseAgentSource(agentSource) {
21897
21978
  // Parse META commitments - format is "META TYPE content"
21898
21979
  const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
21899
21980
  if (metaTypeRaw === 'LINK') {
21900
- links.push(spaceTrim$2(commitment.content.substring(metaTypeRaw.length)));
21981
+ links.push(spaceTrim$1(commitment.content.substring(metaTypeRaw.length)));
21901
21982
  }
21902
21983
  const metaType = normalizeTo_camelCase(metaTypeRaw);
21903
- meta[metaType] = spaceTrim$2(commitment.content.substring(metaTypeRaw.length));
21984
+ meta[metaType] = spaceTrim$1(commitment.content.substring(metaTypeRaw.length));
21904
21985
  }
21905
21986
  // Generate fullname fallback if no meta fullname specified
21906
21987
  if (!meta.fullname) {
@@ -21931,7 +22012,7 @@ function parseAgentSource(agentSource) {
21931
22012
  * @returns The content with normalized separators
21932
22013
  */
21933
22014
  function normalizeSeparator(content) {
21934
- const trimmed = spaceTrim$2(content);
22015
+ const trimmed = spaceTrim$1(content);
21935
22016
  if (trimmed.includes(',')) {
21936
22017
  return trimmed;
21937
22018
  }
@@ -21944,7 +22025,7 @@ function normalizeSeparator(content) {
21944
22025
  * @returns Normalized domain or a trimmed fallback.
21945
22026
  */
21946
22027
  function normalizeMetaDomain(content) {
21947
- const trimmed = spaceTrim$2(content);
22028
+ const trimmed = spaceTrim$1(content);
21948
22029
  return normalizeDomainForMatching(trimmed) || trimmed.toLowerCase();
21949
22030
  }
21950
22031
  /**
@@ -22103,7 +22184,7 @@ function validateBook(source) {
22103
22184
  * @deprecated Use `$generateBookBoilerplate` instead
22104
22185
  * @public exported from `@promptbook/core`
22105
22186
  */
22106
- const DEFAULT_BOOK = padBook(validateBook(spaceTrim$2(`
22187
+ const DEFAULT_BOOK = padBook(validateBook(spaceTrim$1(`
22107
22188
  AI Avatar
22108
22189
 
22109
22190
  PERSONA A friendly AI assistant that helps you with your tasks
@@ -22935,7 +23016,7 @@ const knowledgeCommandParser = {
22935
23016
  */
22936
23017
  parse(input) {
22937
23018
  const { args } = input;
22938
- const knowledgeSourceContent = spaceTrim$2(args[0] || '');
23019
+ const knowledgeSourceContent = spaceTrim$1(args[0] || '');
22939
23020
  if (knowledgeSourceContent === '') {
22940
23021
  throw new ParseError(`Source is not defined`);
22941
23022
  }
@@ -23079,7 +23160,7 @@ const sectionCommandParser = {
23079
23160
  normalized = normalized.split('DIALOGUE').join('DIALOG');
23080
23161
  const taskTypes = SectionTypes.filter((sectionType) => normalized.includes(sectionType.split('_TASK').join('')));
23081
23162
  if (taskTypes.length !== 1) {
23082
- throw new ParseError(spaceTrim$2((block) => `
23163
+ throw new ParseError(spaceTrim$1((block) => `
23083
23164
  Unknown section type "${normalized}"
23084
23165
 
23085
23166
  Supported section types are:
@@ -23099,7 +23180,7 @@ const sectionCommandParser = {
23099
23180
  */
23100
23181
  $applyToTaskJson(command, $taskJson, $pipelineJson) {
23101
23182
  if ($taskJson.isSectionTypeSet === true) {
23102
- throw new ParseError(spaceTrim$2(`
23183
+ throw new ParseError(spaceTrim$1(`
23103
23184
  Section type is already defined in the section.
23104
23185
  It can be defined only once.
23105
23186
  `));
@@ -23379,7 +23460,7 @@ const expectCommandParser = {
23379
23460
  /**
23380
23461
  * Description of the FORMAT command
23381
23462
  */
23382
- description: spaceTrim$2(`
23463
+ description: spaceTrim$1(`
23383
23464
  Expect command describes the desired output of the task *(after post-processing)*
23384
23465
  It can set limits for the maximum/minimum length of the output, measured in characters, words, sentences, paragraphs or some other shape of the output.
23385
23466
  `),
@@ -23453,7 +23534,7 @@ const expectCommandParser = {
23453
23534
  }
23454
23535
  catch (error) {
23455
23536
  assertsError(error);
23456
- throw new ParseError(spaceTrim$2((block) => `
23537
+ throw new ParseError(spaceTrim$1((block) => `
23457
23538
  Invalid FORMAT command
23458
23539
  ${block(error.message)}:
23459
23540
  `));
@@ -23565,7 +23646,7 @@ function validateParameterName(parameterName) {
23565
23646
  if (!(error instanceof ParseError)) {
23566
23647
  throw error;
23567
23648
  }
23568
- throw new ParseError(spaceTrim$2((block) => `
23649
+ throw new ParseError(spaceTrim$1((block) => `
23569
23650
  ${block(error.message)}
23570
23651
 
23571
23652
  Tried to validate parameter name:
@@ -23624,7 +23705,7 @@ const foreachCommandParser = {
23624
23705
  const assignSign = args[3];
23625
23706
  const formatDefinition = FORMAT_DEFINITIONS.find((formatDefinition) => [formatDefinition.formatName, ...(formatDefinition.aliases || [])].includes(formatName));
23626
23707
  if (formatDefinition === undefined) {
23627
- throw new ParseError(spaceTrim$2((block) => `
23708
+ throw new ParseError(spaceTrim$1((block) => `
23628
23709
  Unsupported format "${formatName}"
23629
23710
 
23630
23711
  Available formats:
@@ -23636,7 +23717,7 @@ const foreachCommandParser = {
23636
23717
  }
23637
23718
  const subvalueParser = formatDefinition.subvalueParsers.find((subvalueParser) => [subvalueParser.subvalueName, ...(subvalueParser.aliases || [])].includes(subformatName));
23638
23719
  if (subvalueParser === undefined) {
23639
- throw new ParseError(spaceTrim$2((block) => `
23720
+ throw new ParseError(spaceTrim$1((block) => `
23640
23721
  Unsupported subformat name "${subformatName}" for format "${formatName}"
23641
23722
 
23642
23723
  Available subformat names for format "${formatDefinition.formatName}":
@@ -23684,7 +23765,7 @@ const foreachCommandParser = {
23684
23765
  outputSubparameterName = 'newLine';
23685
23766
  }
23686
23767
  else {
23687
- throw new ParseError(spaceTrim$2(`
23768
+ throw new ParseError(spaceTrim$1(`
23688
23769
  FOREACH ${formatName} ${subformatName} must specify output subparameter
23689
23770
 
23690
23771
  Correct example:
@@ -23760,7 +23841,7 @@ const formatCommandParser = {
23760
23841
  /**
23761
23842
  * Description of the FORMAT command
23762
23843
  */
23763
- description: spaceTrim$2(`
23844
+ description: spaceTrim$1(`
23764
23845
  Format command describes the desired output of the task (after post-processing)
23765
23846
  It can set limits for the maximum/minimum length of the output, measured in characters, words, sentences, paragraphs or some other shape of the output.
23766
23847
  `),
@@ -24132,7 +24213,7 @@ const formfactorCommandParser = {
24132
24213
  const formfactorNameCandidate = args[0].toUpperCase();
24133
24214
  const formfactor = FORMFACTOR_DEFINITIONS.find((definition) => [definition.name, ...{ aliasNames: [], ...definition }.aliasNames].includes(formfactorNameCandidate));
24134
24215
  if (formfactor === undefined) {
24135
- throw new ParseError(spaceTrim$2((block) => `
24216
+ throw new ParseError(spaceTrim$1((block) => `
24136
24217
  Unknown formfactor name "${formfactorNameCandidate}"
24137
24218
 
24138
24219
  Available formfactors:
@@ -24151,7 +24232,7 @@ const formfactorCommandParser = {
24151
24232
  */
24152
24233
  $applyToPipelineJson(command, $pipelineJson) {
24153
24234
  if ($pipelineJson.formfactorName !== undefined && $pipelineJson.formfactorName !== command.formfactorName) {
24154
- throw new ParseError(spaceTrim$2(`
24235
+ throw new ParseError(spaceTrim$1(`
24155
24236
  Redefinition of \`FORMFACTOR\` in the pipeline head
24156
24237
 
24157
24238
  You have used:
@@ -24299,7 +24380,7 @@ const modelCommandParser = {
24299
24380
  */
24300
24381
  parse(input) {
24301
24382
  const { args, normalized } = input;
24302
- const availableVariantsMessage = spaceTrim$2((block) => `
24383
+ const availableVariantsMessage = spaceTrim$1((block) => `
24303
24384
  Available variants are:
24304
24385
  ${block(MODEL_VARIANTS.map((variantName) => `- ${variantName}${variantName !== 'EMBEDDING' ? '' : ' (Not available in pipeline)'}`).join('\n'))}
24305
24386
  `);
@@ -24321,14 +24402,14 @@ const modelCommandParser = {
24321
24402
  // <- Note: [🤖]
24322
24403
  }
24323
24404
  else if (normalized.startsWith('MODEL_VARIANT_EMBED')) {
24324
- spaceTrim$2((block) => `
24405
+ spaceTrim$1((block) => `
24325
24406
  Embedding model can not be used in pipeline
24326
24407
 
24327
24408
  ${block(availableVariantsMessage)}
24328
24409
  `);
24329
24410
  }
24330
24411
  else {
24331
- throw new ParseError(spaceTrim$2((block) => `
24412
+ throw new ParseError(spaceTrim$1((block) => `
24332
24413
  Unknown model variant in command:
24333
24414
 
24334
24415
  ${block(availableVariantsMessage)}
@@ -24343,7 +24424,7 @@ const modelCommandParser = {
24343
24424
  };
24344
24425
  }
24345
24426
  else {
24346
- throw new ParseError(spaceTrim$2((block) => `
24427
+ throw new ParseError(spaceTrim$1((block) => `
24347
24428
  Unknown model key in command.
24348
24429
 
24349
24430
  Supported model keys are:
@@ -24370,7 +24451,7 @@ const modelCommandParser = {
24370
24451
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
24371
24452
  }
24372
24453
  else {
24373
- throw new ParseError(spaceTrim$2(`
24454
+ throw new ParseError(spaceTrim$1(`
24374
24455
  Redefinition of \`MODEL ${command.key}\` in the pipeline head
24375
24456
 
24376
24457
  You have used:
@@ -24398,7 +24479,7 @@ const modelCommandParser = {
24398
24479
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
24399
24480
  }
24400
24481
  else {
24401
- throw new ParseError(spaceTrim$2(`
24482
+ throw new ParseError(spaceTrim$1(`
24402
24483
  Redefinition of MODEL \`${command.key}\` in the task "${$taskJson.title || $taskJson.name}"
24403
24484
 
24404
24485
  You have used:
@@ -24408,7 +24489,7 @@ const modelCommandParser = {
24408
24489
  }
24409
24490
  }
24410
24491
  if (command.value === ($pipelineJson.defaultModelRequirements || {})[command.key]) {
24411
- console.log(spaceTrim$2(`
24492
+ console.log(spaceTrim$1(`
24412
24493
  Setting MODEL \`${command.key}\` in the task "${$taskJson.title || $taskJson.name}" to the same value as in the pipeline head
24413
24494
 
24414
24495
  In pipeline head:
@@ -24491,7 +24572,7 @@ const parameterCommandParser = {
24491
24572
  // <- TODO: When [🥶] fixed, change to:
24492
24573
  // > const parameterDescriptionRaw = rawArgs.split(parameterNameRaw).join('').trim();
24493
24574
  if (parameterDescriptionRaw && parameterDescriptionRaw.match(/\{(?<embeddedParameterName>[a-z0-9_]+)\}/im)) {
24494
- throw new ParseError(spaceTrim$2((block) => `
24575
+ throw new ParseError(spaceTrim$1((block) => `
24495
24576
  Parameter \`{${parameterNameRaw}}\` can not contain another parameter in description
24496
24577
 
24497
24578
  The description:
@@ -24673,7 +24754,7 @@ function $applyToTaskJson(command, $taskJson, $pipelineJson) {
24673
24754
  persona.description = personaDescription;
24674
24755
  return;
24675
24756
  }
24676
- console.warn(spaceTrim$2(`
24757
+ console.warn(spaceTrim$1(`
24677
24758
 
24678
24759
  Persona "${personaName}" is defined multiple times with different description:
24679
24760
 
@@ -24684,7 +24765,7 @@ function $applyToTaskJson(command, $taskJson, $pipelineJson) {
24684
24765
  ${personaDescription}
24685
24766
 
24686
24767
  `));
24687
- persona.description += spaceTrim$2('\n\n' + personaDescription);
24768
+ persona.description += spaceTrim$1('\n\n' + personaDescription);
24688
24769
  }
24689
24770
 
24690
24771
  /**
@@ -25483,7 +25564,7 @@ function removeMarkdownComments(content) {
25483
25564
  */
25484
25565
  function isFlatPipeline(pipelineString) {
25485
25566
  pipelineString = removeMarkdownComments(pipelineString);
25486
- pipelineString = spaceTrim$2(pipelineString);
25567
+ pipelineString = spaceTrim$1(pipelineString);
25487
25568
  const isMarkdownBeginningWithHeadline = pipelineString.startsWith('# ');
25488
25569
  //const isLastLineReturnStatement = pipelineString.split(/\r?\n/).pop()!.split('`').join('').startsWith('->');
25489
25570
  const isBacktickBlockUsed = pipelineString.includes('```');
@@ -25509,7 +25590,7 @@ function deflatePipeline(pipelineString) {
25509
25590
  if (!isFlatPipeline(pipelineString)) {
25510
25591
  return pipelineString;
25511
25592
  }
25512
- pipelineString = spaceTrim$2(pipelineString);
25593
+ pipelineString = spaceTrim$1(pipelineString);
25513
25594
  const pipelineStringLines = pipelineString.split(/\r?\n/);
25514
25595
  const potentialReturnStatement = pipelineStringLines.pop();
25515
25596
  let returnStatement;
@@ -25522,19 +25603,19 @@ function deflatePipeline(pipelineString) {
25522
25603
  returnStatement = `-> {${DEFAULT_BOOK_OUTPUT_PARAMETER_NAME}}`;
25523
25604
  pipelineStringLines.push(potentialReturnStatement);
25524
25605
  }
25525
- const prompt = spaceTrim$2(pipelineStringLines.join('\n'));
25606
+ const prompt = spaceTrim$1(pipelineStringLines.join('\n'));
25526
25607
  let quotedPrompt;
25527
25608
  if (prompt.split(/\r?\n/).length <= 1) {
25528
25609
  quotedPrompt = `> ${prompt}`;
25529
25610
  }
25530
25611
  else {
25531
- quotedPrompt = spaceTrim$2((block) => `
25612
+ quotedPrompt = spaceTrim$1((block) => `
25532
25613
  \`\`\`
25533
25614
  ${block(prompt.split('`').join('\\`'))}
25534
25615
  \`\`\`
25535
25616
  `);
25536
25617
  }
25537
- pipelineString = validatePipelineString(spaceTrim$2((block) => `
25618
+ pipelineString = validatePipelineString(spaceTrim$1((block) => `
25538
25619
  # ${DEFAULT_BOOK_TITLE}
25539
25620
 
25540
25621
  ## Prompt
@@ -25598,7 +25679,7 @@ function extractAllListItemsFromMarkdown(markdown) {
25598
25679
  function extractOneBlockFromMarkdown(markdown) {
25599
25680
  const codeBlocks = extractAllBlocksFromMarkdown(markdown);
25600
25681
  if (codeBlocks.length !== 1) {
25601
- throw new ParseError(spaceTrim$2((block) => `
25682
+ throw new ParseError(spaceTrim$1((block) => `
25602
25683
  There should be exactly 1 code block in task section, found ${codeBlocks.length} code blocks
25603
25684
 
25604
25685
  ${block(codeBlocks.map((block, i) => `Block ${i + 1}:\n${block.content}`).join('\n\n\n'))}
@@ -25623,7 +25704,7 @@ function parseMarkdownSection(value) {
25623
25704
  }
25624
25705
  const title = lines[0].replace(/^#+\s*/, '');
25625
25706
  const level = (_b = (_a = lines[0].match(/^#+/)) === null || _a === void 0 ? void 0 : _a[0].length) !== null && _b !== void 0 ? _b : 0;
25626
- const content = spaceTrim$2(lines.slice(1).join('\n'));
25707
+ const content = spaceTrim$1(lines.slice(1).join('\n'));
25627
25708
  if (level < 1 || level > 6) {
25628
25709
  throw new ParseError('Markdown section must have heading level between 1 and 6');
25629
25710
  }
@@ -25651,7 +25732,7 @@ function splitMarkdownIntoSections(markdown) {
25651
25732
  if (buffer.length === 0) {
25652
25733
  return;
25653
25734
  }
25654
- let section = spaceTrim$2(buffer.join('\n'));
25735
+ let section = spaceTrim$1(buffer.join('\n'));
25655
25736
  if (section === '') {
25656
25737
  return;
25657
25738
  }
@@ -25726,7 +25807,7 @@ function flattenMarkdown(markdown) {
25726
25807
  flattenedMarkdown += `## ${title}` + `\n\n`;
25727
25808
  flattenedMarkdown += content + `\n\n`; // <- [🧠] Maybe 3 new lines?
25728
25809
  }
25729
- return spaceTrim$2(flattenedMarkdown);
25810
+ return spaceTrim$1(flattenedMarkdown);
25730
25811
  }
25731
25812
  /**
25732
25813
  * TODO: [🏛] This can be part of markdown builder
@@ -26681,7 +26762,7 @@ function usageToHuman(usage) {
26681
26762
  // Note: For negligible usage, we report at least something
26682
26763
  reportItems.push('Negligible');
26683
26764
  }
26684
- return spaceTrim$2((block) => `
26765
+ return spaceTrim$1((block) => `
26685
26766
  Usage:
26686
26767
  ${block(reportItems.map((item) => `- ${item}`).join('\n'))}
26687
26768
  `);
@@ -27010,13 +27091,13 @@ function $registeredLlmToolsMessage() {
27010
27091
  });
27011
27092
  const usedEnvMessage = `Unknown \`.env\` file` ;
27012
27093
  if (metadata.length === 0) {
27013
- return spaceTrim$2((block) => `
27094
+ return spaceTrim$1((block) => `
27014
27095
  No LLM providers are available.
27015
27096
 
27016
27097
  ${block(usedEnvMessage)}
27017
27098
  `);
27018
27099
  }
27019
- return spaceTrim$2((block) => `
27100
+ return spaceTrim$1((block) => `
27020
27101
 
27021
27102
  ${block(usedEnvMessage)}
27022
27103
 
@@ -27062,7 +27143,7 @@ function $registeredLlmToolsMessage() {
27062
27143
  morePieces.push(`Not configured`); // <- Note: Can not be configured via environment variables
27063
27144
  }
27064
27145
  }
27065
- let providerMessage = spaceTrim$2(`
27146
+ let providerMessage = spaceTrim$1(`
27066
27147
  ${i + 1}) **${title}** \`${className}\` from \`${packageName}\`
27067
27148
  ${morePieces.join('; ')}
27068
27149
  `);
@@ -27108,7 +27189,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
27108
27189
  .find(({ packageName, className }) => llmConfiguration.packageName === packageName && llmConfiguration.className === className);
27109
27190
  if (registeredItem === undefined) {
27110
27191
  // console.log('$llmToolsRegister.list()', $llmToolsRegister.list());
27111
- throw new Error(spaceTrim$2((block) => `
27192
+ throw new Error(spaceTrim$1((block) => `
27112
27193
  There is no constructor for LLM provider \`${llmConfiguration.className}\` from \`${llmConfiguration.packageName}\`
27113
27194
  Running in ${!$isRunningInBrowser() ? '' : 'browser environment'}${!$isRunningInNode() ? '' : 'node environment'}${!$isRunningInWebWorker() ? '' : 'worker environment'}
27114
27195
 
@@ -27216,7 +27297,7 @@ function cacheLlmTools(llmTools, options = {}) {
27216
27297
  let normalizedContent = content;
27217
27298
  normalizedContent = normalizedContent.replace(/\s+/g, ' ');
27218
27299
  normalizedContent = normalizedContent.split('\r\n').join('\n');
27219
- normalizedContent = spaceTrim$2(normalizedContent);
27300
+ normalizedContent = spaceTrim$1(normalizedContent);
27220
27301
  // Note: Do not need to save everything in the cache, just the relevant parameters
27221
27302
  const relevantParameterNames = extractParameterNames(content);
27222
27303
  const relevantParameters = Object.fromEntries(Object.entries(parameters).filter(([key]) => relevantParameterNames.has(key)));
@@ -28159,7 +28240,7 @@ function pricing(value) {
28159
28240
  /**
28160
28241
  * List of available OpenAI models with pricing
28161
28242
  *
28162
- * Note: Synced with official API docs at 2025-11-19
28243
+ * Note: Synced with official API docs at 2026-03-22
28163
28244
  *
28164
28245
  * @see https://platform.openai.com/docs/models/
28165
28246
  * @see https://openai.com/api/pricing/
@@ -28281,8 +28362,8 @@ const OPENAI_MODELS = exportJson({
28281
28362
  modelName: 'gpt-4.1',
28282
28363
  modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
28283
28364
  pricing: {
28284
- prompt: pricing(`$3.00 / 1M tokens`),
28285
- output: pricing(`$12.00 / 1M tokens`),
28365
+ prompt: pricing(`$2.00 / 1M tokens`),
28366
+ output: pricing(`$8.00 / 1M tokens`),
28286
28367
  },
28287
28368
  },
28288
28369
  /**/
@@ -28293,8 +28374,8 @@ const OPENAI_MODELS = exportJson({
28293
28374
  modelName: 'gpt-4.1-mini',
28294
28375
  modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
28295
28376
  pricing: {
28296
- prompt: pricing(`$0.80 / 1M tokens`),
28297
- output: pricing(`$3.20 / 1M tokens`),
28377
+ prompt: pricing(`$0.40 / 1M tokens`),
28378
+ output: pricing(`$1.60 / 1M tokens`),
28298
28379
  },
28299
28380
  },
28300
28381
  /**/
@@ -28305,8 +28386,8 @@ const OPENAI_MODELS = exportJson({
28305
28386
  modelName: 'gpt-4.1-nano',
28306
28387
  modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
28307
28388
  pricing: {
28308
- prompt: pricing(`$0.20 / 1M tokens`),
28309
- output: pricing(`$0.80 / 1M tokens`),
28389
+ prompt: pricing(`$0.10 / 1M tokens`),
28390
+ output: pricing(`$0.40 / 1M tokens`),
28310
28391
  },
28311
28392
  },
28312
28393
  /**/
@@ -28317,8 +28398,8 @@ const OPENAI_MODELS = exportJson({
28317
28398
  modelName: 'o3',
28318
28399
  modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
28319
28400
  pricing: {
28320
- prompt: pricing(`$15.00 / 1M tokens`),
28321
- output: pricing(`$60.00 / 1M tokens`),
28401
+ prompt: pricing(`$2.00 / 1M tokens`),
28402
+ output: pricing(`$8.00 / 1M tokens`),
28322
28403
  },
28323
28404
  },
28324
28405
  /**/
@@ -28329,8 +28410,8 @@ const OPENAI_MODELS = exportJson({
28329
28410
  modelName: 'o3-pro',
28330
28411
  modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
28331
28412
  pricing: {
28332
- prompt: pricing(`$30.00 / 1M tokens`),
28333
- output: pricing(`$120.00 / 1M tokens`),
28413
+ prompt: pricing(`$20.00 / 1M tokens`),
28414
+ output: pricing(`$80.00 / 1M tokens`),
28334
28415
  },
28335
28416
  },
28336
28417
  /**/
@@ -28341,8 +28422,8 @@ const OPENAI_MODELS = exportJson({
28341
28422
  modelName: 'o4-mini',
28342
28423
  modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
28343
28424
  pricing: {
28344
- prompt: pricing(`$4.00 / 1M tokens`),
28345
- output: pricing(`$16.00 / 1M tokens`),
28425
+ prompt: pricing(`$1.10 / 1M tokens`),
28426
+ output: pricing(`$4.40 / 1M tokens`),
28346
28427
  },
28347
28428
  },
28348
28429
  /**/
@@ -28700,8 +28781,8 @@ const OPENAI_MODELS = exportJson({
28700
28781
  modelName: 'gpt-4o-2024-05-13',
28701
28782
  modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
28702
28783
  pricing: {
28703
- prompt: pricing(`$5.00 / 1M tokens`),
28704
- output: pricing(`$15.00 / 1M tokens`),
28784
+ prompt: pricing(`$2.50 / 1M tokens`),
28785
+ output: pricing(`$10.00 / 1M tokens`),
28705
28786
  },
28706
28787
  },
28707
28788
  /**/
@@ -28712,8 +28793,8 @@ const OPENAI_MODELS = exportJson({
28712
28793
  modelName: 'gpt-4o',
28713
28794
  modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
28714
28795
  pricing: {
28715
- prompt: pricing(`$5.00 / 1M tokens`),
28716
- output: pricing(`$15.00 / 1M tokens`),
28796
+ prompt: pricing(`$2.50 / 1M tokens`),
28797
+ output: pricing(`$10.00 / 1M tokens`),
28717
28798
  },
28718
28799
  },
28719
28800
  /**/
@@ -28784,8 +28865,8 @@ const OPENAI_MODELS = exportJson({
28784
28865
  modelName: 'o3-mini',
28785
28866
  modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
28786
28867
  pricing: {
28787
- prompt: pricing(`$3.00 / 1M tokens`),
28788
- output: pricing(`$12.00 / 1M tokens`),
28868
+ prompt: pricing(`$1.10 / 1M tokens`),
28869
+ output: pricing(`$4.40 / 1M tokens`),
28789
28870
  },
28790
28871
  },
28791
28872
  /**/
@@ -28885,53 +28966,6 @@ resultContent, rawResponse, duration = ZERO_VALUE) {
28885
28966
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
28886
28967
  */
28887
28968
 
28888
- /**
28889
- * Maps Promptbook tools to OpenAI tools.
28890
- *
28891
- * @private
28892
- */
28893
- function mapToolsToOpenAi(tools) {
28894
- return tools.map((tool) => ({
28895
- type: 'function',
28896
- function: {
28897
- name: tool.name,
28898
- description: tool.description,
28899
- parameters: tool.parameters,
28900
- },
28901
- }));
28902
- }
28903
-
28904
- /**
28905
- * Builds a tool invocation script that injects hidden runtime context into tool args.
28906
- *
28907
- * @private utility of OpenAI tool execution wrappers
28908
- */
28909
- function buildToolInvocationScript(options) {
28910
- const { functionName, functionArgsExpression } = options;
28911
- return `
28912
- const args = ${functionArgsExpression};
28913
- const runtimeContextRaw =
28914
- typeof ${TOOL_RUNTIME_CONTEXT_PARAMETER} === 'undefined'
28915
- ? undefined
28916
- : ${TOOL_RUNTIME_CONTEXT_PARAMETER};
28917
-
28918
- if (runtimeContextRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
28919
- args.${TOOL_RUNTIME_CONTEXT_ARGUMENT} = runtimeContextRaw;
28920
- }
28921
-
28922
- const toolProgressTokenRaw =
28923
- typeof ${TOOL_PROGRESS_TOKEN_PARAMETER} === 'undefined'
28924
- ? undefined
28925
- : ${TOOL_PROGRESS_TOKEN_PARAMETER};
28926
-
28927
- if (toolProgressTokenRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
28928
- args.${TOOL_PROGRESS_TOKEN_ARGUMENT} = toolProgressTokenRaw;
28929
- }
28930
-
28931
- return await ${functionName}(args);
28932
- `;
28933
- }
28934
-
28935
28969
  /**
28936
28970
  * Parses an OpenAI error message to identify which parameter is unsupported
28937
28971
  *
@@ -28988,6 +29022,53 @@ function isUnsupportedParameterError(error) {
28988
29022
  errorMessage.includes('does not support'));
28989
29023
  }
28990
29024
 
29025
+ /**
29026
+ * Builds a tool invocation script that injects hidden runtime context into tool args.
29027
+ *
29028
+ * @private utility of OpenAI tool execution wrappers
29029
+ */
29030
+ function buildToolInvocationScript(options) {
29031
+ const { functionName, functionArgsExpression } = options;
29032
+ return `
29033
+ const args = ${functionArgsExpression};
29034
+ const runtimeContextRaw =
29035
+ typeof ${TOOL_RUNTIME_CONTEXT_PARAMETER} === 'undefined'
29036
+ ? undefined
29037
+ : ${TOOL_RUNTIME_CONTEXT_PARAMETER};
29038
+
29039
+ if (runtimeContextRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
29040
+ args.${TOOL_RUNTIME_CONTEXT_ARGUMENT} = runtimeContextRaw;
29041
+ }
29042
+
29043
+ const toolProgressTokenRaw =
29044
+ typeof ${TOOL_PROGRESS_TOKEN_PARAMETER} === 'undefined'
29045
+ ? undefined
29046
+ : ${TOOL_PROGRESS_TOKEN_PARAMETER};
29047
+
29048
+ if (toolProgressTokenRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
29049
+ args.${TOOL_PROGRESS_TOKEN_ARGUMENT} = toolProgressTokenRaw;
29050
+ }
29051
+
29052
+ return await ${functionName}(args);
29053
+ `;
29054
+ }
29055
+
29056
+ /**
29057
+ * Maps Promptbook tools to OpenAI tools.
29058
+ *
29059
+ * @private
29060
+ */
29061
+ function mapToolsToOpenAi(tools) {
29062
+ return tools.map((tool) => ({
29063
+ type: 'function',
29064
+ function: {
29065
+ name: tool.name,
29066
+ description: tool.description,
29067
+ parameters: tool.parameters,
29068
+ },
29069
+ }));
29070
+ }
29071
+
28991
29072
  /**
28992
29073
  * Provides access to the structured clone implementation when available.
28993
29074
  */
@@ -29954,7 +30035,7 @@ class OpenAiCompatibleExecutionTools {
29954
30035
  // Note: Match exact or prefix for model families
29955
30036
  const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
29956
30037
  if (model === undefined) {
29957
- throw new PipelineExecutionError(spaceTrim$2((block) => `
30038
+ throw new PipelineExecutionError(spaceTrim$1((block) => `
29958
30039
  Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
29959
30040
 
29960
30041
  Available models:
@@ -30880,7 +30961,7 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
30880
30961
  }
30881
30962
  }
30882
30963
 
30883
- const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5-mini-2025-08-07';
30964
+ const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.4-nano';
30884
30965
  /**
30885
30966
  * Creates one structured log entry for streamed tool-call updates.
30886
30967
  *
@@ -31375,7 +31456,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
31375
31456
  }),
31376
31457
  ],
31377
31458
  };
31378
- const errorMessage = spaceTrim$2((block) => `
31459
+ const errorMessage = spaceTrim$1((block) => `
31379
31460
 
31380
31461
  The invoked tool \`${functionName}\` failed with error:
31381
31462
 
@@ -32093,7 +32174,7 @@ class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
32093
32174
  assertsError(error);
32094
32175
  const serializedError = serializeError(error);
32095
32176
  errors = [serializedError];
32096
- functionResponse = spaceTrim$2((block) => `
32177
+ functionResponse = spaceTrim$1((block) => `
32097
32178
 
32098
32179
  The invoked tool \`${functionName}\` failed with error:
32099
32180
 
@@ -33101,7 +33182,7 @@ class SelfLearningManager {
33101
33182
  if (isJsonSchemaResponseFormat(responseFormat)) {
33102
33183
  const jsonSchema = responseFormat.json_schema;
33103
33184
  const schemaJson = JSON.stringify(jsonSchema, null, 4);
33104
- userMessageContent = spaceTrim$2((block) => `
33185
+ userMessageContent = spaceTrim$1((block) => `
33105
33186
  ${block(prompt.content)}
33106
33187
 
33107
33188
  NOTE Request was made through OpenAI Compatible API with \`response_format\` of type \`json_schema\` with the following schema:
@@ -33132,12 +33213,12 @@ class SelfLearningManager {
33132
33213
  const formattedAgentMessage = formatAgentMessageForJsonMode(result.content, usesJsonSchemaMode);
33133
33214
  const teacherInstructions = extractOpenTeacherInstructions(agentSource);
33134
33215
  const teacherInstructionsSection = teacherInstructions
33135
- ? spaceTrim$2((block) => `
33216
+ ? spaceTrim$1((block) => `
33136
33217
  **Teacher instructions:**
33137
33218
  ${block(teacherInstructions)}
33138
33219
  `)
33139
33220
  : '';
33140
- const teacherPromptContent = spaceTrim$2((block) => `
33221
+ const teacherPromptContent = spaceTrim$1((block) => `
33141
33222
 
33142
33223
  You are a teacher agent helping another agent to learn from its interactions.
33143
33224
 
@@ -33170,7 +33251,7 @@ class SelfLearningManager {
33170
33251
  ? '- This interaction used JSON mode, so the agent answer should stay as a formatted JSON code block.'
33171
33252
  : ''}
33172
33253
  ${block(isInitialMessageMissing
33173
- ? spaceTrim$2(`
33254
+ ? spaceTrim$1(`
33174
33255
  - The agent source does not have an INITIAL MESSAGE defined, generate one.
33175
33256
  - The INITIAL MESSAGE should be welcoming, informative about the agent capabilities and also should give some quick options to start the conversation with the agent.
33176
33257
  - The quick option looks like \`[👋 Hello](?message=Hello, how are you?)\`
@@ -33213,7 +33294,7 @@ class SelfLearningManager {
33213
33294
  */
33214
33295
  appendToAgentSource(section) {
33215
33296
  const currentSource = this.options.getAgentSource();
33216
- const newSource = padBook(validateBook(spaceTrim$2(currentSource) + section));
33297
+ const newSource = padBook(validateBook(spaceTrim$1(currentSource) + section));
33217
33298
  this.options.updateAgentSource(newSource);
33218
33299
  }
33219
33300
  }
@@ -33241,13 +33322,13 @@ function formatAgentMessageForJsonMode(content, isJsonMode) {
33241
33322
  }
33242
33323
  const parsedJson = tryParseJson(content);
33243
33324
  if (parsedJson === null) {
33244
- return spaceTrim$2((block) => `
33325
+ return spaceTrim$1((block) => `
33245
33326
  \`\`\`json
33246
33327
  ${block(content)}
33247
33328
  \`\`\`
33248
33329
  `);
33249
33330
  }
33250
- return spaceTrim$2((block) => `
33331
+ return spaceTrim$1((block) => `
33251
33332
  \`\`\`json
33252
33333
  ${block(JSON.stringify(parsedJson, null, 4))}
33253
33334
  \`\`\`
@@ -33279,7 +33360,7 @@ function formatSelfLearningSample(options) {
33279
33360
  const internalMessagesSection = options.internalMessages
33280
33361
  .map((internalMessage) => formatInternalLearningMessage(internalMessage))
33281
33362
  .join('\n\n');
33282
- return spaceTrim$2((block) => `
33363
+ return spaceTrim$1((block) => `
33283
33364
 
33284
33365
  USER MESSAGE
33285
33366
  ${block(options.userMessageContent)}
@@ -33297,7 +33378,7 @@ function formatSelfLearningSample(options) {
33297
33378
  * @private function of Agent
33298
33379
  */
33299
33380
  function formatInternalLearningMessage(internalMessage) {
33300
- return spaceTrim$2((block) => `
33381
+ return spaceTrim$1((block) => `
33301
33382
  INTERNAL MESSAGE
33302
33383
  ${block(stringifyInternalLearningPayload(internalMessage))}
33303
33384
  `);
@@ -33801,7 +33882,7 @@ function book(strings, ...values) {
33801
33882
  const bookString = prompt(strings, ...values).toString();
33802
33883
  if (!isValidPipelineString(bookString)) {
33803
33884
  // TODO: Make the CustomError for this
33804
- throw new Error(spaceTrim$2(`
33885
+ throw new Error(spaceTrim$1(`
33805
33886
  The string is not a valid pipeline string
33806
33887
 
33807
33888
  book\`
@@ -33811,7 +33892,7 @@ function book(strings, ...values) {
33811
33892
  }
33812
33893
  if (!isValidBook(bookString)) {
33813
33894
  // TODO: Make the CustomError for this
33814
- throw new Error(spaceTrim$2(`
33895
+ throw new Error(spaceTrim$1(`
33815
33896
  The string is not a valid book
33816
33897
 
33817
33898
  book\`
@@ -34138,7 +34219,7 @@ function buildRemoteAgentSource(profile, meta) {
34138
34219
  .filter((line) => Boolean(line))
34139
34220
  .join('\n');
34140
34221
  const personaBlock = profile.personaDescription
34141
- ? spaceTrim$2((block) => `
34222
+ ? spaceTrim$1((block) => `
34142
34223
  PERSONA
34143
34224
  ${block(profile.personaDescription || '')}
34144
34225
  `)
@@ -34174,7 +34255,7 @@ class RemoteAgent extends Agent {
34174
34255
  // <- TODO: [🐱‍🚀] What about closed-source agents?
34175
34256
  // <- TODO: [🐱‍🚀] Maybe use promptbookFetch
34176
34257
  if (!profileResponse.ok) {
34177
- throw new Error(spaceTrim$2((block) => `
34258
+ throw new Error(spaceTrim$1((block) => `
34178
34259
  Failed to fetch remote agent profile:
34179
34260
 
34180
34261
  Agent URL:
@@ -35408,7 +35489,7 @@ const OpenAiSdkTranspiler = {
35408
35489
  }
35409
35490
  const KNOWLEDGE_THRESHOLD = 1000;
35410
35491
  if (directKnowledge.join('\n').length > KNOWLEDGE_THRESHOLD || knowledgeSources.length > 0) {
35411
- return spaceTrim$2((block) => `
35492
+ return spaceTrim$1((block) => `
35412
35493
  #!/usr/bin/env node
35413
35494
 
35414
35495
  import * as dotenv from 'dotenv';
@@ -35483,7 +35564,7 @@ const OpenAiSdkTranspiler = {
35483
35564
 
35484
35565
  if (context) {
35485
35566
  question = spaceTrim(\`
35486
- ${block(spaceTrim$2(`
35567
+ ${block(spaceTrim$1(`
35487
35568
  Here is some additional context to help you answer the question:
35488
35569
  \${context}
35489
35570
 
@@ -35564,7 +35645,7 @@ const OpenAiSdkTranspiler = {
35564
35645
  })();
35565
35646
  `);
35566
35647
  }
35567
- const source = spaceTrim$2((block) => `
35648
+ const source = spaceTrim$1((block) => `
35568
35649
 
35569
35650
  #!/usr/bin/env node
35570
35651
 
@@ -35756,7 +35837,7 @@ const PUBLIC_AGENTS_SERVERS = [
35756
35837
  function aboutPromptbookInformation(options) {
35757
35838
  const { isServersInfoIncluded = true, isRuntimeEnvironmentInfoIncluded = true } = options || {};
35758
35839
  const fullInfoPieces = [];
35759
- const basicInfo = spaceTrim$2(`
35840
+ const basicInfo = spaceTrim$1(`
35760
35841
 
35761
35842
  # ${NAME}
35762
35843
 
@@ -35768,7 +35849,7 @@ function aboutPromptbookInformation(options) {
35768
35849
  `);
35769
35850
  fullInfoPieces.push(basicInfo);
35770
35851
  if (isServersInfoIncluded) {
35771
- const serversInfo = spaceTrim$2((block) => `
35852
+ const serversInfo = spaceTrim$1((block) => `
35772
35853
 
35773
35854
  ## Servers
35774
35855
 
@@ -35782,7 +35863,7 @@ function aboutPromptbookInformation(options) {
35782
35863
  ...runtimeEnvironment,
35783
35864
  isCostPrevented: IS_COST_PREVENTED,
35784
35865
  };
35785
- const environmentInfo = spaceTrim$2((block) => `
35866
+ const environmentInfo = spaceTrim$1((block) => `
35786
35867
 
35787
35868
  ## Environment
35788
35869
 
@@ -35792,7 +35873,7 @@ function aboutPromptbookInformation(options) {
35792
35873
  `);
35793
35874
  fullInfoPieces.push(environmentInfo);
35794
35875
  }
35795
- const fullInfo = spaceTrim$2(fullInfoPieces.join('\n\n'));
35876
+ const fullInfo = spaceTrim$1(fullInfoPieces.join('\n\n'));
35796
35877
  return fullInfo;
35797
35878
  }
35798
35879
  /**
@@ -36115,7 +36196,7 @@ function $generateBookBoilerplate(options) {
36115
36196
  if (initialRules.length === 0) {
36116
36197
  initialRules.push($randomAgentRule(namePool));
36117
36198
  }
36118
- const agentSource = validateBook(spaceTrim$2((block) => `
36199
+ const agentSource = validateBook(spaceTrim$1((block) => `
36119
36200
  ${agentName}
36120
36201
 
36121
36202