@promptbook/components 0.112.0-13 → 0.112.0-16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/esm/index.es.js +192 -192
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/src/cli/cli-commands/coder/{find-fresh-emoji-tag.d.ts → find-fresh-emoji-tags.d.ts} +1 -1
  4. package/esm/src/cli/cli-commands/coder.d.ts +1 -1
  5. package/esm/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +1 -1
  6. package/esm/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  7. package/esm/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  8. package/esm/src/llm-providers/google/google-models.d.ts +1 -1
  9. package/esm/src/llm-providers/openai/openai-models.d.ts +1 -1
  10. package/esm/src/scrapers/_boilerplate/BoilerplateScraper.d.ts +1 -2
  11. package/esm/src/scrapers/document/DocumentScraper.d.ts +1 -2
  12. package/esm/src/scrapers/document-legacy/LegacyDocumentScraper.d.ts +1 -2
  13. package/esm/src/scripting/javascript/postprocessing-functions.d.ts +1 -1
  14. package/esm/src/utils/parameters/mapAvailableToExpectedParameters.d.ts +1 -2
  15. package/esm/src/version.d.ts +1 -1
  16. package/package.json +1 -1
  17. package/umd/index.umd.js +362 -363
  18. package/umd/index.umd.js.map +1 -1
  19. package/umd/src/cli/cli-commands/coder/{find-fresh-emoji-tag.d.ts → find-fresh-emoji-tags.d.ts} +1 -1
  20. package/umd/src/cli/cli-commands/coder.d.ts +1 -1
  21. package/umd/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +1 -1
  22. package/umd/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  23. package/umd/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  24. package/umd/src/llm-providers/google/google-models.d.ts +1 -1
  25. package/umd/src/llm-providers/openai/openai-models.d.ts +1 -1
  26. package/umd/src/scrapers/_boilerplate/BoilerplateScraper.d.ts +1 -2
  27. package/umd/src/scrapers/document/DocumentScraper.d.ts +1 -2
  28. package/umd/src/scrapers/document-legacy/LegacyDocumentScraper.d.ts +1 -2
  29. package/umd/src/scripting/javascript/postprocessing-functions.d.ts +1 -1
  30. package/umd/src/utils/parameters/mapAvailableToExpectedParameters.d.ts +1 -2
  31. package/umd/src/version.d.ts +1 -1
package/esm/index.es.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import { jsxs, jsx, Fragment } from 'react/jsx-runtime';
2
2
  import { useMemo, useId, useState, useRef, useEffect, createContext, useContext, useCallback, forwardRef, memo } from 'react';
3
- import spaceTrim$2, { spaceTrim as spaceTrim$1 } from 'spacetrim';
3
+ import { spaceTrim as spaceTrim$1 } from 'spacetrim';
4
4
  import { SHA256 } from 'crypto-js';
5
5
  import hexEncoder from 'crypto-js/enc-hex';
6
6
  import { basename, join, dirname, isAbsolute } from 'path';
@@ -40,7 +40,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
40
40
  * @generated
41
41
  * @see https://github.com/webgptorg/promptbook
42
42
  */
43
- const PROMPTBOOK_ENGINE_VERSION = '0.112.0-13';
43
+ const PROMPTBOOK_ENGINE_VERSION = '0.112.0-16';
44
44
  /**
45
45
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
46
46
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -446,42 +446,6 @@ function normalizeTo_camelCase(text, _isFirstLetterCapital = false) {
446
446
  * TODO: [🌺] Use some intermediate util splitWords
447
447
  */
448
448
 
449
- /**
450
- * Normalizes a domain-like string into a comparable hostname form.
451
- *
452
- * The returned value is lowercased and stripped to hostname only
453
- * (protocol, path, query, hash, and port are removed).
454
- *
455
- * @param rawDomain - Raw domain value (for example `my-agent.com` or `https://my-agent.com/path`).
456
- * @returns Normalized hostname or `null` when the value cannot be normalized.
457
- * @private utility for host/domain matching
458
- */
459
- function normalizeDomainForMatching(rawDomain) {
460
- const trimmedDomain = rawDomain.trim();
461
- if (!trimmedDomain) {
462
- return null;
463
- }
464
- const candidateUrl = hasHttpProtocol(trimmedDomain) ? trimmedDomain : `https://${trimmedDomain}`;
465
- try {
466
- const parsedUrl = new URL(candidateUrl);
467
- const normalizedHostname = parsedUrl.hostname.trim().toLowerCase();
468
- return normalizedHostname || null;
469
- }
470
- catch (_a) {
471
- return null;
472
- }
473
- }
474
- /**
475
- * Checks whether the value already includes an HTTP(S) protocol prefix.
476
- *
477
- * @param value - Raw value to inspect.
478
- * @returns True when the value starts with `http://` or `https://`.
479
- * @private utility for host/domain matching
480
- */
481
- function hasHttpProtocol(value) {
482
- return value.startsWith('http://') || value.startsWith('https://');
483
- }
484
-
485
449
  /**
486
450
  * Tests if given string is valid URL.
487
451
  *
@@ -598,6 +562,42 @@ function countOccurrences(value, searchedChar) {
598
562
  return count;
599
563
  }
600
564
 
565
+ /**
566
+ * Normalizes a domain-like string into a comparable hostname form.
567
+ *
568
+ * The returned value is lowercased and stripped to hostname only
569
+ * (protocol, path, query, hash, and port are removed).
570
+ *
571
+ * @param rawDomain - Raw domain value (for example `my-agent.com` or `https://my-agent.com/path`).
572
+ * @returns Normalized hostname or `null` when the value cannot be normalized.
573
+ * @private utility for host/domain matching
574
+ */
575
+ function normalizeDomainForMatching(rawDomain) {
576
+ const trimmedDomain = rawDomain.trim();
577
+ if (!trimmedDomain) {
578
+ return null;
579
+ }
580
+ const candidateUrl = hasHttpProtocol(trimmedDomain) ? trimmedDomain : `https://${trimmedDomain}`;
581
+ try {
582
+ const parsedUrl = new URL(candidateUrl);
583
+ const normalizedHostname = parsedUrl.hostname.trim().toLowerCase();
584
+ return normalizedHostname || null;
585
+ }
586
+ catch (_a) {
587
+ return null;
588
+ }
589
+ }
590
+ /**
591
+ * Checks whether the value already includes an HTTP(S) protocol prefix.
592
+ *
593
+ * @param value - Raw value to inspect.
594
+ * @returns True when the value starts with `http://` or `https://`.
595
+ * @private utility for host/domain matching
596
+ */
597
+ function hasHttpProtocol(value) {
598
+ return value.startsWith('http://') || value.startsWith('https://');
599
+ }
600
+
601
601
  /**
602
602
  * Trims string from all 4 sides
603
603
  *
@@ -1796,7 +1796,7 @@ false);
1796
1796
  function getErrorReportUrl(error) {
1797
1797
  const report = {
1798
1798
  title: `🐜 Error report from ${NAME}`,
1799
- body: spaceTrim$2((block) => `
1799
+ body: spaceTrim$1((block) => `
1800
1800
 
1801
1801
 
1802
1802
  \`${error.name || 'Error'}\` has occurred in the [${NAME}], please look into it @${ADMIN_GITHUB_NAME}.
@@ -1991,7 +1991,7 @@ function valueToString(value) {
1991
1991
  * @public exported from `@promptbook/utils`
1992
1992
  */
1993
1993
  function computeHash(value) {
1994
- return SHA256(hexEncoder.parse(spaceTrim$2(valueToString(value)))).toString( /* hex */);
1994
+ return SHA256(hexEncoder.parse(spaceTrim$1(valueToString(value)))).toString( /* hex */);
1995
1995
  }
1996
1996
  /**
1997
1997
  * TODO: [🥬][🥬] Use this ACRY
@@ -2098,7 +2098,7 @@ function checkSerializableAsJson(options) {
2098
2098
  }
2099
2099
  else if (typeof value === 'object') {
2100
2100
  if (value instanceof Date) {
2101
- throw new UnexpectedError(spaceTrim$2((block) => `
2101
+ throw new UnexpectedError(spaceTrim$1((block) => `
2102
2102
  \`${name}\` is Date
2103
2103
 
2104
2104
  Use \`string_date_iso8601\` instead
@@ -2117,7 +2117,7 @@ function checkSerializableAsJson(options) {
2117
2117
  throw new UnexpectedError(`${name} is RegExp`);
2118
2118
  }
2119
2119
  else if (value instanceof Error) {
2120
- throw new UnexpectedError(spaceTrim$2((block) => `
2120
+ throw new UnexpectedError(spaceTrim$1((block) => `
2121
2121
  \`${name}\` is unserialized Error
2122
2122
 
2123
2123
  Use function \`serializeError\`
@@ -2140,7 +2140,7 @@ function checkSerializableAsJson(options) {
2140
2140
  }
2141
2141
  catch (error) {
2142
2142
  assertsError(error);
2143
- throw new UnexpectedError(spaceTrim$2((block) => `
2143
+ throw new UnexpectedError(spaceTrim$1((block) => `
2144
2144
  \`${name}\` is not serializable
2145
2145
 
2146
2146
  ${block(error.stack || error.message)}
@@ -2172,7 +2172,7 @@ function checkSerializableAsJson(options) {
2172
2172
  }
2173
2173
  }
2174
2174
  else {
2175
- throw new UnexpectedError(spaceTrim$2((block) => `
2175
+ throw new UnexpectedError(spaceTrim$1((block) => `
2176
2176
  \`${name}\` is unknown type
2177
2177
 
2178
2178
  Additional message for \`${name}\`:
@@ -3174,7 +3174,7 @@ function deserializeError(error, isStackAddedToMessage = true) {
3174
3174
  message = `${name}: ${message}`;
3175
3175
  }
3176
3176
  if (isStackAddedToMessage && stack !== undefined && stack !== '') {
3177
- message = spaceTrim$2((block) => `
3177
+ message = spaceTrim$1((block) => `
3178
3178
  ${block(message)}
3179
3179
 
3180
3180
  Original stack trace:
@@ -3195,7 +3195,7 @@ function serializeError(error) {
3195
3195
  const { name, message, stack } = error;
3196
3196
  const { id } = error;
3197
3197
  if (!Object.keys(ALL_ERRORS).includes(name)) {
3198
- console.error(spaceTrim$2((block) => `
3198
+ console.error(spaceTrim$1((block) => `
3199
3199
 
3200
3200
  Cannot serialize error with name "${name}"
3201
3201
 
@@ -3301,7 +3301,7 @@ function jsonParse(value) {
3301
3301
  }
3302
3302
  else if (typeof value !== 'string') {
3303
3303
  console.error('Can not parse JSON from non-string value.', { text: value });
3304
- throw new Error(spaceTrim$2(`
3304
+ throw new Error(spaceTrim$1(`
3305
3305
  Can not parse JSON from non-string value.
3306
3306
 
3307
3307
  The value type: ${typeof value}
@@ -3315,7 +3315,7 @@ function jsonParse(value) {
3315
3315
  if (!(error instanceof Error)) {
3316
3316
  throw error;
3317
3317
  }
3318
- throw new Error(spaceTrim$2((block) => `
3318
+ throw new Error(spaceTrim$1((block) => `
3319
3319
  ${block(error.message)}
3320
3320
 
3321
3321
  The expected JSON text:
@@ -3675,7 +3675,7 @@ function buildParametersSection(items) {
3675
3675
  const entries = items
3676
3676
  .flatMap((item) => formatParameterListItem(item).split(/\r?\n/))
3677
3677
  .filter((line) => line !== '');
3678
- return spaceTrim$2((block) => `
3678
+ return spaceTrim$1((block) => `
3679
3679
  **Parameters:**
3680
3680
  ${block(entries.join('\n'))}
3681
3681
 
@@ -3748,7 +3748,7 @@ function isPromptString(value) {
3748
3748
  */
3749
3749
  function prompt(strings, ...values) {
3750
3750
  if (values.length === 0) {
3751
- return new PromptString(spaceTrim$2(strings.join('')));
3751
+ return new PromptString(spaceTrim$1(strings.join('')));
3752
3752
  }
3753
3753
  const stringsWithHiddenParameters = strings.map((stringsItem) => ParameterEscaping.hideBrackets(stringsItem));
3754
3754
  const parameterMetadata = values.map((value) => {
@@ -3789,7 +3789,7 @@ function prompt(strings, ...values) {
3789
3789
  ? `${result}${stringsItem}`
3790
3790
  : `${result}${stringsItem}${ParameterSection.formatParameterPlaceholder(parameterName)}`;
3791
3791
  }, '');
3792
- pipelineString = spaceTrim$2(pipelineString);
3792
+ pipelineString = spaceTrim$1(pipelineString);
3793
3793
  try {
3794
3794
  pipelineString = templateParameters(pipelineString, parameters);
3795
3795
  }
@@ -3798,7 +3798,7 @@ function prompt(strings, ...values) {
3798
3798
  throw error;
3799
3799
  }
3800
3800
  console.error({ pipelineString, parameters, parameterNames: parameterNamesOrdered, error });
3801
- throw new UnexpectedError(spaceTrim$2((block) => `
3801
+ throw new UnexpectedError(spaceTrim$1((block) => `
3802
3802
  Internal error in prompt template literal
3803
3803
 
3804
3804
  ${block(JSON.stringify({ strings, values }, null, 4))}}
@@ -5911,7 +5911,7 @@ function serializeToPromptbookJavascript(value) {
5911
5911
  imports.push(`import { Color } from '@promptbook/color';`);
5912
5912
  }
5913
5913
  else if (typeof value === 'string') {
5914
- const trimmed = spaceTrim$2(value);
5914
+ const trimmed = spaceTrim$1(value);
5915
5915
  if (trimmed.includes('\n')) {
5916
5916
  // Multiline string -> use `spaceTrim`
5917
5917
  serializedValue = `spaceTrim(\`\n${value.replace(/`/g, '\\`')}\n\`)`;
@@ -6168,7 +6168,7 @@ function isValidPipelineUrl(url) {
6168
6168
  * @public exported from `@promptbook/core`
6169
6169
  */
6170
6170
  function normalizeAgentName(rawAgentName) {
6171
- return titleToName(spaceTrim$2(rawAgentName));
6171
+ return titleToName(spaceTrim$1(rawAgentName));
6172
6172
  }
6173
6173
 
6174
6174
  /**
@@ -17075,7 +17075,7 @@ function parseAgentSource(agentSource) {
17075
17075
  continue;
17076
17076
  }
17077
17077
  if (commitment.type === 'FROM') {
17078
- const content = spaceTrim$2(commitment.content).split(/\r?\n/)[0] || '';
17078
+ const content = spaceTrim$1(commitment.content).split(/\r?\n/)[0] || '';
17079
17079
  if (content === 'Adam' || content === '' /* <- Note: Adam is implicit */) {
17080
17080
  continue;
17081
17081
  }
@@ -17098,7 +17098,7 @@ function parseAgentSource(agentSource) {
17098
17098
  continue;
17099
17099
  }
17100
17100
  if (commitment.type === 'IMPORT') {
17101
- const content = spaceTrim$2(commitment.content).split(/\r?\n/)[0] || '';
17101
+ const content = spaceTrim$1(commitment.content).split(/\r?\n/)[0] || '';
17102
17102
  let label = content;
17103
17103
  let iconName = 'ExternalLink'; // Import remote
17104
17104
  try {
@@ -17136,7 +17136,7 @@ function parseAgentSource(agentSource) {
17136
17136
  continue;
17137
17137
  }
17138
17138
  if (commitment.type === 'KNOWLEDGE') {
17139
- const content = spaceTrim$2(commitment.content);
17139
+ const content = spaceTrim$1(commitment.content);
17140
17140
  const extractedUrls = extractUrlsFromText(content);
17141
17141
  let label = content;
17142
17142
  let iconName = 'Book';
@@ -17195,7 +17195,7 @@ function parseAgentSource(agentSource) {
17195
17195
  continue;
17196
17196
  }
17197
17197
  if (commitment.type === 'META LINK') {
17198
- const linkValue = spaceTrim$2(commitment.content);
17198
+ const linkValue = spaceTrim$1(commitment.content);
17199
17199
  links.push(linkValue);
17200
17200
  meta.link = linkValue;
17201
17201
  continue;
@@ -17205,11 +17205,11 @@ function parseAgentSource(agentSource) {
17205
17205
  continue;
17206
17206
  }
17207
17207
  if (commitment.type === 'META IMAGE') {
17208
- meta.image = spaceTrim$2(commitment.content);
17208
+ meta.image = spaceTrim$1(commitment.content);
17209
17209
  continue;
17210
17210
  }
17211
17211
  if (commitment.type === 'META DESCRIPTION') {
17212
- meta.description = spaceTrim$2(commitment.content);
17212
+ meta.description = spaceTrim$1(commitment.content);
17213
17213
  continue;
17214
17214
  }
17215
17215
  if (commitment.type === 'META DISCLAIMER') {
@@ -17217,7 +17217,7 @@ function parseAgentSource(agentSource) {
17217
17217
  continue;
17218
17218
  }
17219
17219
  if (commitment.type === 'META INPUT PLACEHOLDER') {
17220
- meta.inputPlaceholder = spaceTrim$2(commitment.content);
17220
+ meta.inputPlaceholder = spaceTrim$1(commitment.content);
17221
17221
  continue;
17222
17222
  }
17223
17223
  if (commitment.type === 'MESSAGE SUFFIX') {
@@ -17233,7 +17233,7 @@ function parseAgentSource(agentSource) {
17233
17233
  continue;
17234
17234
  }
17235
17235
  if (commitment.type === 'META VOICE') {
17236
- meta.voice = spaceTrim$2(commitment.content);
17236
+ meta.voice = spaceTrim$1(commitment.content);
17237
17237
  continue;
17238
17238
  }
17239
17239
  if (commitment.type !== 'META') {
@@ -17242,10 +17242,10 @@ function parseAgentSource(agentSource) {
17242
17242
  // Parse META commitments - format is "META TYPE content"
17243
17243
  const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
17244
17244
  if (metaTypeRaw === 'LINK') {
17245
- links.push(spaceTrim$2(commitment.content.substring(metaTypeRaw.length)));
17245
+ links.push(spaceTrim$1(commitment.content.substring(metaTypeRaw.length)));
17246
17246
  }
17247
17247
  const metaType = normalizeTo_camelCase(metaTypeRaw);
17248
- meta[metaType] = spaceTrim$2(commitment.content.substring(metaTypeRaw.length));
17248
+ meta[metaType] = spaceTrim$1(commitment.content.substring(metaTypeRaw.length));
17249
17249
  }
17250
17250
  // Generate fullname fallback if no meta fullname specified
17251
17251
  if (!meta.fullname) {
@@ -17276,7 +17276,7 @@ function parseAgentSource(agentSource) {
17276
17276
  * @returns The content with normalized separators
17277
17277
  */
17278
17278
  function normalizeSeparator(content) {
17279
- const trimmed = spaceTrim$2(content);
17279
+ const trimmed = spaceTrim$1(content);
17280
17280
  if (trimmed.includes(',')) {
17281
17281
  return trimmed;
17282
17282
  }
@@ -17289,7 +17289,7 @@ function normalizeSeparator(content) {
17289
17289
  * @returns Normalized domain or a trimmed fallback.
17290
17290
  */
17291
17291
  function normalizeMetaDomain(content) {
17292
- const trimmed = spaceTrim$2(content);
17292
+ const trimmed = spaceTrim$1(content);
17293
17293
  return normalizeDomainForMatching(trimmed) || trimmed.toLowerCase();
17294
17294
  }
17295
17295
  /**
@@ -17430,7 +17430,7 @@ function validateBook(source) {
17430
17430
  * @deprecated Use `$generateBookBoilerplate` instead
17431
17431
  * @public exported from `@promptbook/core`
17432
17432
  */
17433
- const DEFAULT_BOOK = padBook(validateBook(spaceTrim$2(`
17433
+ const DEFAULT_BOOK = padBook(validateBook(spaceTrim$1(`
17434
17434
  AI Avatar
17435
17435
 
17436
17436
  PERSONA A friendly AI assistant that helps you with your tasks
@@ -18225,7 +18225,7 @@ const PUBLIC_AGENTS_SERVERS = [
18225
18225
  function aboutPromptbookInformation(options) {
18226
18226
  const { isServersInfoIncluded = true, isRuntimeEnvironmentInfoIncluded = true } = options || {};
18227
18227
  const fullInfoPieces = [];
18228
- const basicInfo = spaceTrim$2(`
18228
+ const basicInfo = spaceTrim$1(`
18229
18229
 
18230
18230
  # ${NAME}
18231
18231
 
@@ -18237,7 +18237,7 @@ function aboutPromptbookInformation(options) {
18237
18237
  `);
18238
18238
  fullInfoPieces.push(basicInfo);
18239
18239
  if (isServersInfoIncluded) {
18240
- const serversInfo = spaceTrim$2((block) => `
18240
+ const serversInfo = spaceTrim$1((block) => `
18241
18241
 
18242
18242
  ## Servers
18243
18243
 
@@ -18251,7 +18251,7 @@ function aboutPromptbookInformation(options) {
18251
18251
  ...runtimeEnvironment,
18252
18252
  isCostPrevented: IS_COST_PREVENTED,
18253
18253
  };
18254
- const environmentInfo = spaceTrim$2((block) => `
18254
+ const environmentInfo = spaceTrim$1((block) => `
18255
18255
 
18256
18256
  ## Environment
18257
18257
 
@@ -18261,7 +18261,7 @@ function aboutPromptbookInformation(options) {
18261
18261
  `);
18262
18262
  fullInfoPieces.push(environmentInfo);
18263
18263
  }
18264
- const fullInfo = spaceTrim$2(fullInfoPieces.join('\n\n'));
18264
+ const fullInfo = spaceTrim$1(fullInfoPieces.join('\n\n'));
18265
18265
  return fullInfo;
18266
18266
  }
18267
18267
  /**
@@ -21544,7 +21544,7 @@ function getTextColor(bgColor) {
21544
21544
  const luminance = 0.299 * r + 0.587 * g + 0.114 * b;
21545
21545
  return luminance > 186 ? '#0f172a' : '#f8fafc';
21546
21546
  }
21547
- const HERO_ILLUSTRATION_SVG = spaceTrim$2(() => `
21547
+ const HERO_ILLUSTRATION_SVG = spaceTrim$1(() => `
21548
21548
  <svg width="320" height="220" viewBox="0 0 320 220" fill="none" xmlns="http://www.w3.org/2000/svg">
21549
21549
  <defs>
21550
21550
  <linearGradient id="heroGradient" x1="0" y1="0" x2="320" y2="220">
@@ -21562,7 +21562,7 @@ const HERO_ILLUSTRATION_SVG = spaceTrim$2(() => `
21562
21562
  <rect x="62" y="130" width="196" height="20" rx="10" fill="rgba(255,255,255,0.15)" />
21563
21563
  </svg>
21564
21564
  `);
21565
- const BRAND_MARK_SVG = spaceTrim$2(() => `
21565
+ const BRAND_MARK_SVG = spaceTrim$1(() => `
21566
21566
  <svg width="92" height="92" viewBox="0 0 92 92" fill="none" xmlns="http://www.w3.org/2000/svg">
21567
21567
  <defs>
21568
21568
  <linearGradient id="badgeGradient" x1="0" y1="0" x2="92" y2="92">
@@ -21712,7 +21712,7 @@ function buildAttachmentsMarkup(message) {
21712
21712
  const href = hasUrl ? ` href="${escapeHtml$1((_a = attachment.url) !== null && _a !== void 0 ? _a : '#')}" target="_blank" rel="noopener"` : '';
21713
21713
  const name = escapeHtml$1(attachment.name || 'Attachment');
21714
21714
  const meta = escapeHtml$1(attachment.type || 'file');
21715
- return spaceTrim$2(`
21715
+ return spaceTrim$1(`
21716
21716
  <${tag} class="attachment-chip"${href}>
21717
21717
  <span class="attachment-icon">📎</span>
21718
21718
  <span class="attachment-name">${name}</span>
@@ -21737,7 +21737,7 @@ function buildCitationsMarkup(message) {
21737
21737
  const urlLink = citation.url
21738
21738
  ? `<a class="citation-link" href="${escapeHtml$1(citation.url)}" target="_blank" rel="noopener">Open source</a>`
21739
21739
  : '';
21740
- return spaceTrim$2(`
21740
+ return spaceTrim$1(`
21741
21741
  <article class="citation-chip">
21742
21742
  <div class="citation-header">
21743
21743
  <span class="citation-badge">${escapeHtml$1(citation.id)}</span>
@@ -21769,7 +21769,7 @@ function renderMessageBlock(message, participants) {
21769
21769
  const avatarMarkup = visuals.avatarSrc
21770
21770
  ? `<img class="message-avatar-img" src="${escapeHtml$1(visuals.avatarSrc)}" alt="${escapeHtml$1(visuals.displayName)}" />`
21771
21771
  : `<span class="message-avatar-fallback" style="background:${visuals.accentColor};color:${bubbleTextColor};">${escapeHtml$1(visuals.avatarLabel)}</span>`;
21772
- return spaceTrim$2(`
21772
+ return spaceTrim$1(`
21773
21773
  <article class="message-block ${alignmentClass}">
21774
21774
  <div class="message-avatar">${avatarMarkup}</div>
21775
21775
  <div class="message-card" style="--bubble-color:${visuals.accentColor};--bubble-text:${bubbleTextColor};">
@@ -21820,7 +21820,7 @@ const htmlSaveFormatDefinition = {
21820
21820
  const messageMarkup = messages.length > 0
21821
21821
  ? messages.map((message) => renderMessageBlock(message, participantLookup)).join('')
21822
21822
  : '<div class="empty-state">No messages yet. Send a note to capture this chat.</div>';
21823
- return spaceTrim$2(`
21823
+ return spaceTrim$1(`
21824
21824
  <!DOCTYPE html>
21825
21825
  <html lang="en">
21826
21826
  <head>
@@ -22225,7 +22225,7 @@ const htmlSaveFormatDefinition = {
22225
22225
  <p class="hero-subtitle">${escapeHtml$1(heroSubtitle)}</p>
22226
22226
  <div class="stat-grid">
22227
22227
  ${statCards
22228
- .map((stat) => spaceTrim$2(`
22228
+ .map((stat) => spaceTrim$1(`
22229
22229
  <div class="stat-card">
22230
22230
  <span class="stat-value">${escapeHtml$1(stat.value)}</span>
22231
22231
  <span class="stat-label">${escapeHtml$1(stat.label)}</span>
@@ -22480,7 +22480,7 @@ const reactSaveFormatDefinition = {
22480
22480
  const { imports: participantsImports, value: participantsValue } = serializeToPromptbookJavascript(participants);
22481
22481
  const { imports: messagesImports, value: messagesValue } = serializeToPromptbookJavascript(messages);
22482
22482
  const uniqueImports = Array.from(new Set([`import { Chat } from '@promptbook/components';`, ...participantsImports, ...messagesImports])).filter((imp) => !!imp && imp.trim().length > 0);
22483
- return spaceTrim$2((block) => `
22483
+ return spaceTrim$1((block) => `
22484
22484
  "use client";
22485
22485
 
22486
22486
  ${block(uniqueImports.join('\n'))}
@@ -23976,7 +23976,7 @@ function ChatInputArea(props) {
23976
23976
  type: uploadedFile.file.type,
23977
23977
  url: uploadedFile.content,
23978
23978
  }));
23979
- if (spaceTrim$2(messageContent) === '' && attachments.length === 0) {
23979
+ if (spaceTrim$1(messageContent) === '' && attachments.length === 0) {
23980
23980
  throw new Error(`You need to write some text or upload a file`);
23981
23981
  }
23982
23982
  if (soundSystem) {
@@ -24038,7 +24038,7 @@ function ChatInputArea(props) {
24038
24038
  }
24039
24039
  const resolvedAction = resolveChatEnterAction(resolvedBehavior, false);
24040
24040
  if (resolvedAction === 'SEND') {
24041
- const hasTextToSend = spaceTrim$2(snapshot.value) !== '' || snapshot.attachmentIds.length > 0;
24041
+ const hasTextToSend = spaceTrim$1(snapshot.value) !== '' || snapshot.attachmentIds.length > 0;
24042
24042
  if (!hasTextToSend) {
24043
24043
  return;
24044
24044
  }
@@ -24315,7 +24315,7 @@ function pipelineJsonToString(pipelineJson) {
24315
24315
  pipelineString += '\n\n';
24316
24316
  pipelineString += '```' + contentLanguage;
24317
24317
  pipelineString += '\n';
24318
- pipelineString += spaceTrim$2(content);
24318
+ pipelineString += spaceTrim$1(content);
24319
24319
  // <- TODO: [main] !!3 Escape
24320
24320
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
24321
24321
  pipelineString += '\n';
@@ -25339,14 +25339,14 @@ class MultipleLlmExecutionTools {
25339
25339
  if (description === undefined) {
25340
25340
  return headLine;
25341
25341
  }
25342
- return spaceTrim$2((block) => `
25342
+ return spaceTrim$1((block) => `
25343
25343
  ${headLine}
25344
25344
 
25345
25345
  ${ /* <- Note: Indenting the description: */block(description)}
25346
25346
  `);
25347
25347
  })
25348
25348
  .join('\n\n');
25349
- return spaceTrim$2((block) => `
25349
+ return spaceTrim$1((block) => `
25350
25350
  Multiple LLM Providers:
25351
25351
 
25352
25352
  ${block(innerModelsTitlesAndDescriptions)}
@@ -25448,7 +25448,7 @@ class MultipleLlmExecutionTools {
25448
25448
  // 1) OpenAI throw PipelineExecutionError: Parameter `{knowledge}` is not defined
25449
25449
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
25450
25450
  // 3) ...
25451
- spaceTrim$2((block) => `
25451
+ spaceTrim$1((block) => `
25452
25452
  All execution tools of ${this.title} failed:
25453
25453
 
25454
25454
  ${block(errors
@@ -25461,7 +25461,7 @@ class MultipleLlmExecutionTools {
25461
25461
  throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
25462
25462
  }
25463
25463
  else {
25464
- throw new PipelineExecutionError(spaceTrim$2((block) => `
25464
+ throw new PipelineExecutionError(spaceTrim$1((block) => `
25465
25465
  You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
25466
25466
 
25467
25467
  Available \`LlmExecutionTools\`:
@@ -25498,7 +25498,7 @@ class MultipleLlmExecutionTools {
25498
25498
  */
25499
25499
  function joinLlmExecutionTools(title, ...llmExecutionTools) {
25500
25500
  if (llmExecutionTools.length === 0) {
25501
- const warningMessage = spaceTrim$2(`
25501
+ const warningMessage = spaceTrim$1(`
25502
25502
  You have not provided any \`LlmExecutionTools\`
25503
25503
  This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.
25504
25504
 
@@ -25675,14 +25675,14 @@ function $registeredScrapersMessage(availableScrapers) {
25675
25675
  return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
25676
25676
  });
25677
25677
  if (metadata.length === 0) {
25678
- return spaceTrim$2(`
25678
+ return spaceTrim$1(`
25679
25679
  **No scrapers are available**
25680
25680
 
25681
25681
  This is a unexpected behavior, you are probably using some broken version of Promptbook
25682
25682
  At least there should be available the metadata of the scrapers
25683
25683
  `);
25684
25684
  }
25685
- return spaceTrim$2((block) => `
25685
+ return spaceTrim$1((block) => `
25686
25686
  Available scrapers are:
25687
25687
  ${block(metadata
25688
25688
  .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
@@ -25809,7 +25809,7 @@ const promptbookFetch = async (urlOrRequest, init) => {
25809
25809
  else if (urlOrRequest instanceof Request) {
25810
25810
  url = urlOrRequest.url;
25811
25811
  }
25812
- throw new PromptbookFetchError(spaceTrim$2((block) => `
25812
+ throw new PromptbookFetchError(spaceTrim$1((block) => `
25813
25813
  Can not fetch "${url}"
25814
25814
 
25815
25815
  Fetch error:
@@ -25969,7 +25969,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
25969
25969
  const fileExtension = getFileExtension(filename);
25970
25970
  const mimeType = extensionToMimeType(fileExtension || '');
25971
25971
  if (!(await isFileExisting(filename, tools.fs))) {
25972
- throw new NotFoundError(spaceTrim$2((block) => `
25972
+ throw new NotFoundError(spaceTrim$1((block) => `
25973
25973
  Can not make source handler for file which does not exist:
25974
25974
 
25975
25975
  File:
@@ -26062,7 +26062,7 @@ async function prepareKnowledgePieces(knowledgeSources, tools, options) {
26062
26062
  // <- TODO: [🪓] Here should be no need for spreading new array, just `partialPieces = partialPiecesUnchecked`
26063
26063
  break;
26064
26064
  }
26065
- console.warn(spaceTrim$2((block) => `
26065
+ console.warn(spaceTrim$1((block) => `
26066
26066
  Cannot scrape knowledge from source despite the scraper \`${scraper.metadata.className}\` supports the mime type "${sourceHandler.mimeType}".
26067
26067
 
26068
26068
  The source:
@@ -26078,7 +26078,7 @@ async function prepareKnowledgePieces(knowledgeSources, tools, options) {
26078
26078
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
26079
26079
  }
26080
26080
  if (partialPieces === null) {
26081
- throw new KnowledgeScrapeError(spaceTrim$2((block) => `
26081
+ throw new KnowledgeScrapeError(spaceTrim$1((block) => `
26082
26082
  Cannot scrape knowledge
26083
26083
 
26084
26084
  The source:
@@ -26515,7 +26515,7 @@ const CsvFormatParser = {
26515
26515
  const { value, outputParameterName, settings, mapCallback, onProgress } = options;
26516
26516
  const csv = csvParse(value, settings);
26517
26517
  if (csv.errors.length !== 0) {
26518
- throw new CsvFormatError(spaceTrim$2((block) => `
26518
+ throw new CsvFormatError(spaceTrim$1((block) => `
26519
26519
  CSV parsing error
26520
26520
 
26521
26521
  Error(s) from CSV parsing:
@@ -26560,7 +26560,7 @@ const CsvFormatParser = {
26560
26560
  const { value, settings, mapCallback, onProgress } = options;
26561
26561
  const csv = csvParse(value, settings);
26562
26562
  if (csv.errors.length !== 0) {
26563
- throw new CsvFormatError(spaceTrim$2((block) => `
26563
+ throw new CsvFormatError(spaceTrim$1((block) => `
26564
26564
  CSV parsing error
26565
26565
 
26566
26566
  Error(s) from CSV parsing:
@@ -26746,7 +26746,7 @@ function mapAvailableToExpectedParameters(options) {
26746
26746
  }
26747
26747
  // Phase 2️⃣: Non-matching mapping
26748
26748
  if (expectedParameterNames.size !== availableParametersNames.size) {
26749
- throw new PipelineExecutionError(spaceTrim$2((block) => `
26749
+ throw new PipelineExecutionError(spaceTrim$1((block) => `
26750
26750
  Can not map available parameters to expected parameters
26751
26751
 
26752
26752
  Mapped parameters:
@@ -27319,7 +27319,7 @@ async function executeFormatSubvalues(options) {
27319
27319
  return /* not await */ executeAttempts({ ...options, logLlmCall });
27320
27320
  }
27321
27321
  if (jokerParameterNames.length !== 0) {
27322
- throw new UnexpectedError(spaceTrim$2((block) => `
27322
+ throw new UnexpectedError(spaceTrim$1((block) => `
27323
27323
  JOKER parameters are not supported together with FOREACH command
27324
27324
 
27325
27325
  [🧞‍♀️] This should be prevented in \`validatePipeline\`
@@ -27332,7 +27332,7 @@ async function executeFormatSubvalues(options) {
27332
27332
  if (formatDefinition === undefined) {
27333
27333
  throw new UnexpectedError(
27334
27334
  // <- TODO: [🧠][🧐] Should be formats fixed per promptbook version or behave as plugins (=> change UnexpectedError)
27335
- spaceTrim$2((block) => `
27335
+ spaceTrim$1((block) => `
27336
27336
  Unsupported format "${task.foreach.formatName}"
27337
27337
 
27338
27338
  Available formats:
@@ -27349,7 +27349,7 @@ async function executeFormatSubvalues(options) {
27349
27349
  if (subvalueParser === undefined) {
27350
27350
  throw new UnexpectedError(
27351
27351
  // <- TODO: [🧠][🧐] Should be formats fixed per promptbook version or behave as plugins (=> change UnexpectedError)
27352
- spaceTrim$2((block) => `
27352
+ spaceTrim$1((block) => `
27353
27353
  Unsupported subformat name "${task.foreach.subformatName}" for format "${task.foreach.formatName}"
27354
27354
 
27355
27355
  Available subformat names for format "${formatDefinition.formatName}":
@@ -27389,7 +27389,7 @@ async function executeFormatSubvalues(options) {
27389
27389
  if (!(error instanceof PipelineExecutionError)) {
27390
27390
  throw error;
27391
27391
  }
27392
- const highLevelError = new PipelineExecutionError(spaceTrim$2((block) => `
27392
+ const highLevelError = new PipelineExecutionError(spaceTrim$1((block) => `
27393
27393
  ${error.message}
27394
27394
 
27395
27395
  This is error in FOREACH command when mapping ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
@@ -27413,7 +27413,7 @@ async function executeFormatSubvalues(options) {
27413
27413
  ...options,
27414
27414
  priority: priority + index,
27415
27415
  parameters: allSubparameters,
27416
- pipelineIdentification: spaceTrim$2((block) => `
27416
+ pipelineIdentification: spaceTrim$1((block) => `
27417
27417
  ${block(pipelineIdentification)}
27418
27418
  Subparameter index: ${index}
27419
27419
  `),
@@ -27422,7 +27422,7 @@ async function executeFormatSubvalues(options) {
27422
27422
  }
27423
27423
  catch (error) {
27424
27424
  if (length > BIG_DATASET_TRESHOLD) {
27425
- console.error(spaceTrim$2((block) => `
27425
+ console.error(spaceTrim$1((block) => `
27426
27426
  ${error.message}
27427
27427
 
27428
27428
  This is error in FOREACH command when processing ${formatDefinition.formatName} ${subvalueParser.subvalueName} data (${index + 1}/${length})
@@ -28853,7 +28853,7 @@ import { assertsError } from '../../errors/assertsError';
28853
28853
  /**
28854
28854
  * Description of the FORMAT command
28855
28855
  */
28856
- description: spaceTrim$2(`
28856
+ description: spaceTrim$1(`
28857
28857
  Expect command describes the desired output of the task *(after post-processing)*
28858
28858
  It can set limits for the maximum/minimum length of the output, measured in characters, words, sentences, paragraphs or some other shape of the output.
28859
28859
  `),
@@ -28927,7 +28927,7 @@ import { assertsError } from '../../errors/assertsError';
28927
28927
  }
28928
28928
  catch (error) {
28929
28929
  assertsError(error);
28930
- throw new ParseError(spaceTrim$2((block) => `
28930
+ throw new ParseError(spaceTrim$1((block) => `
28931
28931
  Invalid FORMAT command
28932
28932
  ${block(error.message)}:
28933
28933
  `));
@@ -28993,7 +28993,7 @@ import { assertsError } from '../../errors/assertsError';
28993
28993
  /**
28994
28994
  * Description of the FORMAT command
28995
28995
  */
28996
- description: spaceTrim$2(`
28996
+ description: spaceTrim$1(`
28997
28997
  Format command describes the desired output of the task (after post-processing)
28998
28998
  It can set limits for the maximum/minimum length of the output, measured in characters, words, sentences, paragraphs or some other shape of the output.
28999
28999
  `),
@@ -29583,7 +29583,7 @@ function pricing(value) {
29583
29583
  /**
29584
29584
  * List of available OpenAI models with pricing
29585
29585
  *
29586
- * Note: Synced with official API docs at 2025-11-19
29586
+ * Note: Synced with official API docs at 2026-03-22
29587
29587
  *
29588
29588
  * @see https://platform.openai.com/docs/models/
29589
29589
  * @see https://openai.com/api/pricing/
@@ -29705,8 +29705,8 @@ const OPENAI_MODELS = exportJson({
29705
29705
  modelName: 'gpt-4.1',
29706
29706
  modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
29707
29707
  pricing: {
29708
- prompt: pricing(`$3.00 / 1M tokens`),
29709
- output: pricing(`$12.00 / 1M tokens`),
29708
+ prompt: pricing(`$2.00 / 1M tokens`),
29709
+ output: pricing(`$8.00 / 1M tokens`),
29710
29710
  },
29711
29711
  },
29712
29712
  /**/
@@ -29717,8 +29717,8 @@ const OPENAI_MODELS = exportJson({
29717
29717
  modelName: 'gpt-4.1-mini',
29718
29718
  modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
29719
29719
  pricing: {
29720
- prompt: pricing(`$0.80 / 1M tokens`),
29721
- output: pricing(`$3.20 / 1M tokens`),
29720
+ prompt: pricing(`$0.40 / 1M tokens`),
29721
+ output: pricing(`$1.60 / 1M tokens`),
29722
29722
  },
29723
29723
  },
29724
29724
  /**/
@@ -29729,8 +29729,8 @@ const OPENAI_MODELS = exportJson({
29729
29729
  modelName: 'gpt-4.1-nano',
29730
29730
  modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
29731
29731
  pricing: {
29732
- prompt: pricing(`$0.20 / 1M tokens`),
29733
- output: pricing(`$0.80 / 1M tokens`),
29732
+ prompt: pricing(`$0.10 / 1M tokens`),
29733
+ output: pricing(`$0.40 / 1M tokens`),
29734
29734
  },
29735
29735
  },
29736
29736
  /**/
@@ -29741,8 +29741,8 @@ const OPENAI_MODELS = exportJson({
29741
29741
  modelName: 'o3',
29742
29742
  modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
29743
29743
  pricing: {
29744
- prompt: pricing(`$15.00 / 1M tokens`),
29745
- output: pricing(`$60.00 / 1M tokens`),
29744
+ prompt: pricing(`$2.00 / 1M tokens`),
29745
+ output: pricing(`$8.00 / 1M tokens`),
29746
29746
  },
29747
29747
  },
29748
29748
  /**/
@@ -29753,8 +29753,8 @@ const OPENAI_MODELS = exportJson({
29753
29753
  modelName: 'o3-pro',
29754
29754
  modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
29755
29755
  pricing: {
29756
- prompt: pricing(`$30.00 / 1M tokens`),
29757
- output: pricing(`$120.00 / 1M tokens`),
29756
+ prompt: pricing(`$20.00 / 1M tokens`),
29757
+ output: pricing(`$80.00 / 1M tokens`),
29758
29758
  },
29759
29759
  },
29760
29760
  /**/
@@ -29765,8 +29765,8 @@ const OPENAI_MODELS = exportJson({
29765
29765
  modelName: 'o4-mini',
29766
29766
  modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
29767
29767
  pricing: {
29768
- prompt: pricing(`$4.00 / 1M tokens`),
29769
- output: pricing(`$16.00 / 1M tokens`),
29768
+ prompt: pricing(`$1.10 / 1M tokens`),
29769
+ output: pricing(`$4.40 / 1M tokens`),
29770
29770
  },
29771
29771
  },
29772
29772
  /**/
@@ -30124,8 +30124,8 @@ const OPENAI_MODELS = exportJson({
30124
30124
  modelName: 'gpt-4o-2024-05-13',
30125
30125
  modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
30126
30126
  pricing: {
30127
- prompt: pricing(`$5.00 / 1M tokens`),
30128
- output: pricing(`$15.00 / 1M tokens`),
30127
+ prompt: pricing(`$2.50 / 1M tokens`),
30128
+ output: pricing(`$10.00 / 1M tokens`),
30129
30129
  },
30130
30130
  },
30131
30131
  /**/
@@ -30136,8 +30136,8 @@ const OPENAI_MODELS = exportJson({
30136
30136
  modelName: 'gpt-4o',
30137
30137
  modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
30138
30138
  pricing: {
30139
- prompt: pricing(`$5.00 / 1M tokens`),
30140
- output: pricing(`$15.00 / 1M tokens`),
30139
+ prompt: pricing(`$2.50 / 1M tokens`),
30140
+ output: pricing(`$10.00 / 1M tokens`),
30141
30141
  },
30142
30142
  },
30143
30143
  /**/
@@ -30208,8 +30208,8 @@ const OPENAI_MODELS = exportJson({
30208
30208
  modelName: 'o3-mini',
30209
30209
  modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
30210
30210
  pricing: {
30211
- prompt: pricing(`$3.00 / 1M tokens`),
30212
- output: pricing(`$12.00 / 1M tokens`),
30211
+ prompt: pricing(`$1.10 / 1M tokens`),
30212
+ output: pricing(`$4.40 / 1M tokens`),
30213
30213
  },
30214
30214
  },
30215
30215
  /**/
@@ -30309,53 +30309,6 @@ resultContent, rawResponse, duration = ZERO_VALUE) {
30309
30309
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
30310
30310
  */
30311
30311
 
30312
- /**
30313
- * Maps Promptbook tools to OpenAI tools.
30314
- *
30315
- * @private
30316
- */
30317
- function mapToolsToOpenAi(tools) {
30318
- return tools.map((tool) => ({
30319
- type: 'function',
30320
- function: {
30321
- name: tool.name,
30322
- description: tool.description,
30323
- parameters: tool.parameters,
30324
- },
30325
- }));
30326
- }
30327
-
30328
- /**
30329
- * Builds a tool invocation script that injects hidden runtime context into tool args.
30330
- *
30331
- * @private utility of OpenAI tool execution wrappers
30332
- */
30333
- function buildToolInvocationScript(options) {
30334
- const { functionName, functionArgsExpression } = options;
30335
- return `
30336
- const args = ${functionArgsExpression};
30337
- const runtimeContextRaw =
30338
- typeof ${TOOL_RUNTIME_CONTEXT_PARAMETER} === 'undefined'
30339
- ? undefined
30340
- : ${TOOL_RUNTIME_CONTEXT_PARAMETER};
30341
-
30342
- if (runtimeContextRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
30343
- args.${TOOL_RUNTIME_CONTEXT_ARGUMENT} = runtimeContextRaw;
30344
- }
30345
-
30346
- const toolProgressTokenRaw =
30347
- typeof ${TOOL_PROGRESS_TOKEN_PARAMETER} === 'undefined'
30348
- ? undefined
30349
- : ${TOOL_PROGRESS_TOKEN_PARAMETER};
30350
-
30351
- if (toolProgressTokenRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
30352
- args.${TOOL_PROGRESS_TOKEN_ARGUMENT} = toolProgressTokenRaw;
30353
- }
30354
-
30355
- return await ${functionName}(args);
30356
- `;
30357
- }
30358
-
30359
30312
  /**
30360
30313
  * Parses an OpenAI error message to identify which parameter is unsupported
30361
30314
  *
@@ -30412,6 +30365,53 @@ function isUnsupportedParameterError(error) {
30412
30365
  errorMessage.includes('does not support'));
30413
30366
  }
30414
30367
 
30368
+ /**
30369
+ * Builds a tool invocation script that injects hidden runtime context into tool args.
30370
+ *
30371
+ * @private utility of OpenAI tool execution wrappers
30372
+ */
30373
+ function buildToolInvocationScript(options) {
30374
+ const { functionName, functionArgsExpression } = options;
30375
+ return `
30376
+ const args = ${functionArgsExpression};
30377
+ const runtimeContextRaw =
30378
+ typeof ${TOOL_RUNTIME_CONTEXT_PARAMETER} === 'undefined'
30379
+ ? undefined
30380
+ : ${TOOL_RUNTIME_CONTEXT_PARAMETER};
30381
+
30382
+ if (runtimeContextRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
30383
+ args.${TOOL_RUNTIME_CONTEXT_ARGUMENT} = runtimeContextRaw;
30384
+ }
30385
+
30386
+ const toolProgressTokenRaw =
30387
+ typeof ${TOOL_PROGRESS_TOKEN_PARAMETER} === 'undefined'
30388
+ ? undefined
30389
+ : ${TOOL_PROGRESS_TOKEN_PARAMETER};
30390
+
30391
+ if (toolProgressTokenRaw !== undefined && args && typeof args === 'object' && !Array.isArray(args)) {
30392
+ args.${TOOL_PROGRESS_TOKEN_ARGUMENT} = toolProgressTokenRaw;
30393
+ }
30394
+
30395
+ return await ${functionName}(args);
30396
+ `;
30397
+ }
30398
+
30399
+ /**
30400
+ * Maps Promptbook tools to OpenAI tools.
30401
+ *
30402
+ * @private
30403
+ */
30404
+ function mapToolsToOpenAi(tools) {
30405
+ return tools.map((tool) => ({
30406
+ type: 'function',
30407
+ function: {
30408
+ name: tool.name,
30409
+ description: tool.description,
30410
+ parameters: tool.parameters,
30411
+ },
30412
+ }));
30413
+ }
30414
+
30415
30415
  /**
30416
30416
  * Provides access to the structured clone implementation when available.
30417
30417
  */
@@ -31378,7 +31378,7 @@ class OpenAiCompatibleExecutionTools {
31378
31378
  // Note: Match exact or prefix for model families
31379
31379
  const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
31380
31380
  if (model === undefined) {
31381
- throw new PipelineExecutionError(spaceTrim$2((block) => `
31381
+ throw new PipelineExecutionError(spaceTrim$1((block) => `
31382
31382
  Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
31383
31383
 
31384
31384
  Available models:
@@ -32304,7 +32304,7 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
32304
32304
  }
32305
32305
  }
32306
32306
 
32307
- const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5-mini-2025-08-07';
32307
+ const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.4-nano';
32308
32308
  /**
32309
32309
  * Creates one structured log entry for streamed tool-call updates.
32310
32310
  *
@@ -32799,7 +32799,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
32799
32799
  }),
32800
32800
  ],
32801
32801
  };
32802
- const errorMessage = spaceTrim$2((block) => `
32802
+ const errorMessage = spaceTrim$1((block) => `
32803
32803
 
32804
32804
  The invoked tool \`${functionName}\` failed with error:
32805
32805
 
@@ -33517,7 +33517,7 @@ class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
33517
33517
  assertsError(error);
33518
33518
  const serializedError = serializeError(error);
33519
33519
  errors = [serializedError];
33520
- functionResponse = spaceTrim$2((block) => `
33520
+ functionResponse = spaceTrim$1((block) => `
33521
33521
 
33522
33522
  The invoked tool \`${functionName}\` failed with error:
33523
33523
 
@@ -34525,7 +34525,7 @@ class SelfLearningManager {
34525
34525
  if (isJsonSchemaResponseFormat(responseFormat)) {
34526
34526
  const jsonSchema = responseFormat.json_schema;
34527
34527
  const schemaJson = JSON.stringify(jsonSchema, null, 4);
34528
- userMessageContent = spaceTrim$2((block) => `
34528
+ userMessageContent = spaceTrim$1((block) => `
34529
34529
  ${block(prompt.content)}
34530
34530
 
34531
34531
  NOTE Request was made through OpenAI Compatible API with \`response_format\` of type \`json_schema\` with the following schema:
@@ -34556,12 +34556,12 @@ class SelfLearningManager {
34556
34556
  const formattedAgentMessage = formatAgentMessageForJsonMode(result.content, usesJsonSchemaMode);
34557
34557
  const teacherInstructions = extractOpenTeacherInstructions(agentSource);
34558
34558
  const teacherInstructionsSection = teacherInstructions
34559
- ? spaceTrim$2((block) => `
34559
+ ? spaceTrim$1((block) => `
34560
34560
  **Teacher instructions:**
34561
34561
  ${block(teacherInstructions)}
34562
34562
  `)
34563
34563
  : '';
34564
- const teacherPromptContent = spaceTrim$2((block) => `
34564
+ const teacherPromptContent = spaceTrim$1((block) => `
34565
34565
 
34566
34566
  You are a teacher agent helping another agent to learn from its interactions.
34567
34567
 
@@ -34594,7 +34594,7 @@ class SelfLearningManager {
34594
34594
  ? '- This interaction used JSON mode, so the agent answer should stay as a formatted JSON code block.'
34595
34595
  : ''}
34596
34596
  ${block(isInitialMessageMissing
34597
- ? spaceTrim$2(`
34597
+ ? spaceTrim$1(`
34598
34598
  - The agent source does not have an INITIAL MESSAGE defined, generate one.
34599
34599
  - The INITIAL MESSAGE should be welcoming, informative about the agent capabilities and also should give some quick options to start the conversation with the agent.
34600
34600
  - The quick option looks like \`[👋 Hello](?message=Hello, how are you?)\`
@@ -34637,7 +34637,7 @@ class SelfLearningManager {
34637
34637
  */
34638
34638
  appendToAgentSource(section) {
34639
34639
  const currentSource = this.options.getAgentSource();
34640
- const newSource = padBook(validateBook(spaceTrim$2(currentSource) + section));
34640
+ const newSource = padBook(validateBook(spaceTrim$1(currentSource) + section));
34641
34641
  this.options.updateAgentSource(newSource);
34642
34642
  }
34643
34643
  }
@@ -34665,13 +34665,13 @@ function formatAgentMessageForJsonMode(content, isJsonMode) {
34665
34665
  }
34666
34666
  const parsedJson = tryParseJson(content);
34667
34667
  if (parsedJson === null) {
34668
- return spaceTrim$2((block) => `
34668
+ return spaceTrim$1((block) => `
34669
34669
  \`\`\`json
34670
34670
  ${block(content)}
34671
34671
  \`\`\`
34672
34672
  `);
34673
34673
  }
34674
- return spaceTrim$2((block) => `
34674
+ return spaceTrim$1((block) => `
34675
34675
  \`\`\`json
34676
34676
  ${block(JSON.stringify(parsedJson, null, 4))}
34677
34677
  \`\`\`
@@ -34703,7 +34703,7 @@ function formatSelfLearningSample(options) {
34703
34703
  const internalMessagesSection = options.internalMessages
34704
34704
  .map((internalMessage) => formatInternalLearningMessage(internalMessage))
34705
34705
  .join('\n\n');
34706
- return spaceTrim$2((block) => `
34706
+ return spaceTrim$1((block) => `
34707
34707
 
34708
34708
  USER MESSAGE
34709
34709
  ${block(options.userMessageContent)}
@@ -34721,7 +34721,7 @@ function formatSelfLearningSample(options) {
34721
34721
  * @private function of Agent
34722
34722
  */
34723
34723
  function formatInternalLearningMessage(internalMessage) {
34724
- return spaceTrim$2((block) => `
34724
+ return spaceTrim$1((block) => `
34725
34725
  INTERNAL MESSAGE
34726
34726
  ${block(stringifyInternalLearningPayload(internalMessage))}
34727
34727
  `);
@@ -35225,7 +35225,7 @@ function book(strings, ...values) {
35225
35225
  const bookString = prompt(strings, ...values).toString();
35226
35226
  if (!isValidPipelineString(bookString)) {
35227
35227
  // TODO: Make the CustomError for this
35228
- throw new Error(spaceTrim$2(`
35228
+ throw new Error(spaceTrim$1(`
35229
35229
  The string is not a valid pipeline string
35230
35230
 
35231
35231
  book\`
@@ -35235,7 +35235,7 @@ function book(strings, ...values) {
35235
35235
  }
35236
35236
  if (!isValidBook(bookString)) {
35237
35237
  // TODO: Make the CustomError for this
35238
- throw new Error(spaceTrim$2(`
35238
+ throw new Error(spaceTrim$1(`
35239
35239
  The string is not a valid book
35240
35240
 
35241
35241
  book\`
@@ -35504,7 +35504,7 @@ function buildRemoteAgentSource(profile, meta) {
35504
35504
  .filter((line) => Boolean(line))
35505
35505
  .join('\n');
35506
35506
  const personaBlock = profile.personaDescription
35507
- ? spaceTrim$2((block) => `
35507
+ ? spaceTrim$1((block) => `
35508
35508
  PERSONA
35509
35509
  ${block(profile.personaDescription || '')}
35510
35510
  `)
@@ -35540,7 +35540,7 @@ class RemoteAgent extends Agent {
35540
35540
  // <- TODO: [🐱‍🚀] What about closed-source agents?
35541
35541
  // <- TODO: [🐱‍🚀] Maybe use promptbookFetch
35542
35542
  if (!profileResponse.ok) {
35543
- throw new Error(spaceTrim$2((block) => `
35543
+ throw new Error(spaceTrim$1((block) => `
35544
35544
  Failed to fetch remote agent profile:
35545
35545
 
35546
35546
  Agent URL:
@@ -38587,11 +38587,11 @@ function splitMessageContentByImagePrompts(content) {
38587
38587
  });
38588
38588
  }
38589
38589
  const decodedPrompt = decodePrompt(rawPrompt);
38590
- const prompt = spaceTrim$2(decodedPrompt) || decodedPrompt || 'Generated image';
38590
+ const prompt = spaceTrim$1(decodedPrompt) || decodedPrompt || 'Generated image';
38591
38591
  const decodedAlt = decodePrompt(alt);
38592
38592
  segments.push({
38593
38593
  type: 'image',
38594
- alt: spaceTrim$2(decodedAlt) || 'Generated image',
38594
+ alt: spaceTrim$1(decodedAlt) || 'Generated image',
38595
38595
  prompt,
38596
38596
  });
38597
38597
  lastIndex = start + fullMatch.length;
@@ -39291,7 +39291,7 @@ async function fetchGeneratedImageUrl(filename) {
39291
39291
  * @private internal component of `<ChatMessageItem/>`
39292
39292
  */
39293
39293
  function ImagePromptRenderer({ alt, prompt }) {
39294
- const trimmedPrompt = useMemo(() => spaceTrim$2(prompt), [prompt]);
39294
+ const trimmedPrompt = useMemo(() => spaceTrim$1(prompt), [prompt]);
39295
39295
  const filename = useMemo(() => constructImageFilename({
39296
39296
  prompt: trimmedPrompt,
39297
39297
  }), [trimmedPrompt]);
@@ -42953,7 +42953,7 @@ function AgentChat(props) {
42953
42953
  id: AGENT_CHAT_INITIAL_MESSAGE_ID,
42954
42954
  sender: 'AGENT',
42955
42955
  content: agent.initialMessage ||
42956
- spaceTrim$2(`
42956
+ spaceTrim$1(`
42957
42957
 
42958
42958
  Hello! I am ${agent.meta.fullname || agent.agentName || 'an AI Agent'}.
42959
42959