@promptbook/browser 0.110.0-8 → 0.110.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. package/esm/index.es.js +432 -87
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  5. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  6. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  7. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  8. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  9. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
  10. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +19 -0
  11. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +6 -0
  12. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  13. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +39 -0
  15. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  16. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  17. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  18. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  19. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  20. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  21. package/esm/typings/src/version.d.ts +1 -1
  22. package/package.json +2 -2
  23. package/umd/index.umd.js +432 -87
  24. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -29,7 +29,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
29
29
  * @generated
30
30
  * @see https://github.com/webgptorg/promptbook
31
31
  */
32
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-8';
32
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-9';
33
33
  /**
34
34
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
35
35
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1330,6 +1330,28 @@ class BaseCommitmentDefinition {
1330
1330
  return currentMessage + separator + content;
1331
1331
  });
1332
1332
  }
1333
+ /**
1334
+ * Helper method to create a new requirements object with updated prompt suffix
1335
+ */
1336
+ updatePromptSuffix(requirements, contentUpdate) {
1337
+ const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
1338
+ return {
1339
+ ...requirements,
1340
+ promptSuffix: newSuffix,
1341
+ };
1342
+ }
1343
+ /**
1344
+ * Helper method to append content to the prompt suffix
1345
+ * Default separator is a single newline for bullet lists.
1346
+ */
1347
+ appendToPromptSuffix(requirements, content, separator = '\n') {
1348
+ return this.updatePromptSuffix(requirements, (currentSuffix) => {
1349
+ if (!currentSuffix.trim()) {
1350
+ return content;
1351
+ }
1352
+ return `${currentSuffix}${separator}${content}`;
1353
+ });
1354
+ }
1333
1355
  /**
1334
1356
  * Helper method to add a comment section to the system message
1335
1357
  * Comments are lines starting with # that will be removed from the final system message
@@ -1524,13 +1546,9 @@ class ClosedCommitmentDefinition extends BaseCommitmentDefinition {
1524
1546
  `);
1525
1547
  }
1526
1548
  applyToAgentModelRequirements(requirements, _content) {
1527
- const updatedMetadata = {
1528
- ...requirements.metadata,
1529
- isClosed: true,
1530
- };
1531
1549
  return {
1532
1550
  ...requirements,
1533
- metadata: updatedMetadata,
1551
+ isClosed: true,
1534
1552
  };
1535
1553
  }
1536
1554
  }
@@ -1808,12 +1826,12 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
1808
1826
  return requirements;
1809
1827
  }
1810
1828
  // Get existing dictionary entries from metadata
1811
- const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
1829
+ const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
1812
1830
  // Merge the new dictionary entry with existing entries
1813
1831
  const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
1814
1832
  // Store the merged dictionary in metadata for debugging and inspection
1815
1833
  const updatedMetadata = {
1816
- ...requirements.metadata,
1834
+ ...requirements._metadata,
1817
1835
  DICTIONARY: mergedDictionary,
1818
1836
  };
1819
1837
  // Create the dictionary section for the system message
@@ -1821,7 +1839,7 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
1821
1839
  const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
1822
1840
  return {
1823
1841
  ...this.appendToSystemMessage(requirements, dictionarySection),
1824
- metadata: updatedMetadata,
1842
+ _metadata: updatedMetadata,
1825
1843
  };
1826
1844
  }
1827
1845
  }
@@ -5966,10 +5984,7 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
5966
5984
  applyToAgentModelRequirements(requirements, content) {
5967
5985
  const trimmedContent = content.trim();
5968
5986
  if (!trimmedContent) {
5969
- return {
5970
- ...requirements,
5971
- parentAgentUrl: undefined,
5972
- };
5987
+ return requirements;
5973
5988
  }
5974
5989
  if (trimmedContent.toUpperCase() === 'VOID' ||
5975
5990
  trimmedContent.toUpperCase() === 'NULL' ||
@@ -6183,6 +6198,136 @@ class ImportCommitmentDefinition extends BaseCommitmentDefinition {
6183
6198
  * Note: [💞] Ignore a discrepancy between file name and entity name
6184
6199
  */
6185
6200
 
6201
+ /**
6202
+ * @@@
6203
+ *
6204
+ * @private thing of inline knowledge
6205
+ */
6206
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
6207
+ /**
6208
+ * @@@
6209
+ *
6210
+ * @private thing of inline knowledge
6211
+ */
6212
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
6213
+ /**
6214
+ * @@@
6215
+ *
6216
+ * @private thing of inline knowledge
6217
+ */
6218
+ const DATA_URL_PREFIX = 'data:';
6219
+ /**
6220
+ * @@@
6221
+ *
6222
+ * @private thing of inline knowledge
6223
+ */
6224
+ function getFirstNonEmptyLine(content) {
6225
+ const lines = content.split(/\r?\n/);
6226
+ for (const line of lines) {
6227
+ const trimmed = line.trim();
6228
+ if (trimmed) {
6229
+ return trimmed;
6230
+ }
6231
+ }
6232
+ return null;
6233
+ }
6234
+ /**
6235
+ * @@@
6236
+ *
6237
+ * @private thing of inline knowledge
6238
+ */
6239
+ function deriveBaseFilename(content) {
6240
+ const firstLine = getFirstNonEmptyLine(content);
6241
+ if (!firstLine) {
6242
+ return INLINE_KNOWLEDGE_BASE_NAME;
6243
+ }
6244
+ const normalized = normalizeToKebabCase(firstLine);
6245
+ return normalized || INLINE_KNOWLEDGE_BASE_NAME;
6246
+ }
6247
+ /**
6248
+ * Creates a data URL that represents the inline knowledge content as a text file.
6249
+ *
6250
+ * @private thing of inline knowledge
6251
+ */
6252
+ function createInlineKnowledgeSourceFile(content) {
6253
+ const trimmedContent = content.trim();
6254
+ const baseName = deriveBaseFilename(trimmedContent);
6255
+ const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
6256
+ const mimeType = 'text/plain';
6257
+ const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
6258
+ const encodedFilename = encodeURIComponent(filename);
6259
+ const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
6260
+ return {
6261
+ filename,
6262
+ mimeType,
6263
+ url,
6264
+ };
6265
+ }
6266
+ /**
6267
+ * Checks whether the provided source string is a data URL that can be decoded.
6268
+ *
6269
+ * @private thing of inline knowledge
6270
+ */
6271
+ function isDataUrlKnowledgeSource(source) {
6272
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
6273
+ }
6274
+ /**
6275
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
6276
+ *
6277
+ * @private thing of inline knowledge
6278
+ */
6279
+ function parseDataUrlKnowledgeSource(source) {
6280
+ if (!isDataUrlKnowledgeSource(source)) {
6281
+ return null;
6282
+ }
6283
+ const commaIndex = source.indexOf(',');
6284
+ if (commaIndex === -1) {
6285
+ return null;
6286
+ }
6287
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
6288
+ const payload = source.slice(commaIndex + 1);
6289
+ const tokens = header.split(';');
6290
+ const mediaType = tokens[0] || 'text/plain';
6291
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
6292
+ let isBase64 = false;
6293
+ for (let i = 1; i < tokens.length; i++) {
6294
+ const token = tokens[i];
6295
+ if (!token) {
6296
+ continue;
6297
+ }
6298
+ if (token.toLowerCase() === 'base64') {
6299
+ isBase64 = true;
6300
+ continue;
6301
+ }
6302
+ const [key, value] = token.split('=');
6303
+ if (key === 'name' && value !== undefined) {
6304
+ try {
6305
+ filename = decodeURIComponent(value);
6306
+ }
6307
+ catch (_a) {
6308
+ filename = value;
6309
+ }
6310
+ }
6311
+ }
6312
+ if (!isBase64) {
6313
+ return null;
6314
+ }
6315
+ try {
6316
+ const buffer = Buffer.from(payload, 'base64');
6317
+ return {
6318
+ buffer,
6319
+ filename,
6320
+ mimeType: mediaType,
6321
+ };
6322
+ }
6323
+ catch (_b) {
6324
+ return null;
6325
+ }
6326
+ }
6327
+ /**
6328
+ * Note: [💞] Ignore a discrepancy between file name and entity name
6329
+ */
6330
+
6186
6331
  /**
6187
6332
  * KNOWLEDGE commitment definition
6188
6333
  *
@@ -6281,9 +6426,13 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
6281
6426
  return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
6282
6427
  }
6283
6428
  else {
6284
- // Direct text knowledge - add to system message
6285
- const knowledgeSection = `Knowledge: ${trimmedContent}`;
6286
- return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
6429
+ const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
6430
+ const updatedRequirements = {
6431
+ ...requirements,
6432
+ knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
6433
+ };
6434
+ const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
6435
+ return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
6287
6436
  }
6288
6437
  }
6289
6438
  }
@@ -6530,16 +6679,16 @@ class AgentMessageCommitmentDefinition extends BaseCommitmentDefinition {
6530
6679
  // and typically doesn't need to be added to the system prompt or model requirements directly.
6531
6680
  // It is extracted separately for the chat interface.
6532
6681
  var _a;
6533
- const pendingUserMessage = (_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
6682
+ const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
6534
6683
  if (pendingUserMessage) {
6535
6684
  const newSample = { question: pendingUserMessage, answer: content };
6536
6685
  const newSamples = [...(requirements.samples || []), newSample];
6537
- const newMetadata = { ...requirements.metadata };
6686
+ const newMetadata = { ...requirements._metadata };
6538
6687
  delete newMetadata.pendingUserMessage;
6539
6688
  return {
6540
6689
  ...requirements,
6541
6690
  samples: newSamples,
6542
- metadata: newMetadata,
6691
+ _metadata: newMetadata,
6543
6692
  };
6544
6693
  }
6545
6694
  return requirements;
@@ -6787,8 +6936,8 @@ class UserMessageCommitmentDefinition extends BaseCommitmentDefinition {
6787
6936
  applyToAgentModelRequirements(requirements, content) {
6788
6937
  return {
6789
6938
  ...requirements,
6790
- metadata: {
6791
- ...requirements.metadata,
6939
+ _metadata: {
6940
+ ...requirements._metadata,
6792
6941
  pendingUserMessage: content,
6793
6942
  },
6794
6943
  };
@@ -7646,11 +7795,7 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
7646
7795
  if (trimmedContent === '') {
7647
7796
  return requirements;
7648
7797
  }
7649
- // Return requirements with updated notes but no changes to system message
7650
- return {
7651
- ...requirements,
7652
- notes: [...(requirements.notes || []), trimmedContent],
7653
- };
7798
+ return requirements;
7654
7799
  }
7655
7800
  }
7656
7801
  /**
@@ -7712,12 +7857,12 @@ class OpenCommitmentDefinition extends BaseCommitmentDefinition {
7712
7857
  // Since OPEN is default, we can just ensure isClosed is false
7713
7858
  // But to be explicit we can set it
7714
7859
  const updatedMetadata = {
7715
- ...requirements.metadata,
7860
+ ...requirements._metadata,
7716
7861
  isClosed: false,
7717
7862
  };
7718
7863
  return {
7719
7864
  ...requirements,
7720
- metadata: updatedMetadata,
7865
+ _metadata: updatedMetadata,
7721
7866
  };
7722
7867
  }
7723
7868
  }
@@ -7798,7 +7943,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
7798
7943
  return requirements;
7799
7944
  }
7800
7945
  // Get existing persona content from metadata
7801
- const existingPersonaContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
7946
+ const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
7802
7947
  // Merge the new content with existing persona content
7803
7948
  // When multiple PERSONA commitments exist, they are merged into one
7804
7949
  const mergedPersonaContent = existingPersonaContent
@@ -7806,12 +7951,12 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
7806
7951
  : trimmedContent;
7807
7952
  // Store the merged persona content in metadata for debugging and inspection
7808
7953
  const updatedMetadata = {
7809
- ...requirements.metadata,
7954
+ ...requirements._metadata,
7810
7955
  PERSONA: mergedPersonaContent,
7811
7956
  };
7812
7957
  // Get the agent name from metadata (which should contain the first line of agent source)
7813
7958
  // If not available, extract from current system message as fallback
7814
- let agentName = (_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.agentName;
7959
+ let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
7815
7960
  if (!agentName) {
7816
7961
  // Fallback: extract from current system message
7817
7962
  const currentMessage = requirements.systemMessage.trim();
@@ -7858,7 +8003,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
7858
8003
  return {
7859
8004
  ...requirements,
7860
8005
  systemMessage: newSystemMessage,
7861
- metadata: updatedMetadata,
8006
+ _metadata: updatedMetadata,
7862
8007
  };
7863
8008
  }
7864
8009
  }
@@ -7941,7 +8086,16 @@ class RuleCommitmentDefinition extends BaseCommitmentDefinition {
7941
8086
  }
7942
8087
  // Add rule to the system message
7943
8088
  const ruleSection = `Rule: ${trimmedContent}`;
7944
- return this.appendToSystemMessage(requirements, ruleSection, '\n\n');
8089
+ const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
8090
+ const ruleLines = trimmedContent
8091
+ .split(/\r?\n/)
8092
+ .map((line) => line.trim())
8093
+ .filter(Boolean)
8094
+ .map((line) => `- ${line}`);
8095
+ if (ruleLines.length === 0) {
8096
+ return requirementsWithRule;
8097
+ }
8098
+ return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
7945
8099
  }
7946
8100
  }
7947
8101
  /**
@@ -8447,7 +8601,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
8447
8601
  if (teammates.length === 0) {
8448
8602
  return requirements;
8449
8603
  }
8450
- const agentName = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
8604
+ const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
8451
8605
  const teamEntries = teammates.map((teammate) => ({
8452
8606
  toolName: createTeamToolName(teammate.url),
8453
8607
  teammate,
@@ -8487,7 +8641,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
8487
8641
  },
8488
8642
  });
8489
8643
  }
8490
- const existingTeammates = ((_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
8644
+ const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
8491
8645
  const updatedTeammates = [...existingTeammates];
8492
8646
  for (const entry of teamEntries) {
8493
8647
  if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
@@ -8516,8 +8670,8 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
8516
8670
  return this.appendToSystemMessage({
8517
8671
  ...requirements,
8518
8672
  tools: updatedTools,
8519
- metadata: {
8520
- ...requirements.metadata,
8673
+ _metadata: {
8674
+ ...requirements._metadata,
8521
8675
  teammates: updatedTeammates,
8522
8676
  },
8523
8677
  }, teamSystemMessage);
@@ -8749,7 +8903,7 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
8749
8903
  if (!trimmedContent) {
8750
8904
  // Store template mode flag in metadata
8751
8905
  const updatedMetadata = {
8752
- ...requirements.metadata,
8906
+ ...requirements._metadata,
8753
8907
  templateMode: true,
8754
8908
  };
8755
8909
  // Add a general instruction about using structured templates
@@ -8759,21 +8913,21 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
8759
8913
  `);
8760
8914
  return {
8761
8915
  ...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
8762
- metadata: updatedMetadata,
8916
+ _metadata: updatedMetadata,
8763
8917
  };
8764
8918
  }
8765
8919
  // If content is provided, add the specific template instructions
8766
8920
  const templateSection = `Response Template: ${trimmedContent}`;
8767
8921
  // Store the template in metadata for potential programmatic access
8768
- const existingTemplates = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
8922
+ const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
8769
8923
  const updatedMetadata = {
8770
- ...requirements.metadata,
8924
+ ...requirements._metadata,
8771
8925
  templates: [...existingTemplates, trimmedContent],
8772
8926
  templateMode: true,
8773
8927
  };
8774
8928
  return {
8775
8929
  ...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
8776
- metadata: updatedMetadata,
8930
+ _metadata: updatedMetadata,
8777
8931
  };
8778
8932
  }
8779
8933
  }
@@ -9110,8 +9264,8 @@ class UseBrowserCommitmentDefinition extends BaseCommitmentDefinition {
9110
9264
  return this.appendToSystemMessage({
9111
9265
  ...requirements,
9112
9266
  tools: updatedTools,
9113
- metadata: {
9114
- ...requirements.metadata,
9267
+ _metadata: {
9268
+ ...requirements._metadata,
9115
9269
  useBrowser: true,
9116
9270
  },
9117
9271
  }, spaceTrim$1(`
@@ -9340,8 +9494,8 @@ class UseEmailCommitmentDefinition extends BaseCommitmentDefinition {
9340
9494
  return this.appendToSystemMessage({
9341
9495
  ...requirements,
9342
9496
  tools: updatedTools,
9343
- metadata: {
9344
- ...requirements.metadata,
9497
+ _metadata: {
9498
+ ...requirements._metadata,
9345
9499
  useEmail: content || true,
9346
9500
  },
9347
9501
  }, spaceTrim$1((block) => `
@@ -9476,8 +9630,8 @@ class UseImageGeneratorCommitmentDefinition extends BaseCommitmentDefinition {
9476
9630
  return this.appendToSystemMessage({
9477
9631
  ...requirements,
9478
9632
  tools: updatedTools,
9479
- metadata: {
9480
- ...requirements.metadata,
9633
+ _metadata: {
9634
+ ...requirements._metadata,
9481
9635
  useImageGenerator: content || true,
9482
9636
  },
9483
9637
  }, spaceTrim$1(`
@@ -9768,8 +9922,8 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
9768
9922
  return this.appendToSystemMessage({
9769
9923
  ...requirements,
9770
9924
  tools: updatedTools,
9771
- metadata: {
9772
- ...requirements.metadata,
9925
+ _metadata: {
9926
+ ...requirements._metadata,
9773
9927
  useSearchEngine: content || true,
9774
9928
  },
9775
9929
  }, spaceTrim$1((block) => `
@@ -9917,8 +10071,8 @@ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
9917
10071
  return this.appendToSystemMessage({
9918
10072
  ...requirements,
9919
10073
  tools: updatedTools,
9920
- metadata: {
9921
- ...requirements.metadata,
10074
+ _metadata: {
10075
+ ...requirements._metadata,
9922
10076
  },
9923
10077
  }, spaceTrim$1((block) => `
9924
10078
  Time and date context:
@@ -15648,11 +15802,14 @@ async function preparePersona(personaDescription, tools, options) {
15648
15802
  function createEmptyAgentModelRequirements() {
15649
15803
  return {
15650
15804
  systemMessage: '',
15805
+ promptSuffix: '',
15651
15806
  // modelName: 'gpt-5',
15652
15807
  modelName: 'gemini-2.5-flash-lite',
15653
15808
  temperature: 0.7,
15654
15809
  topP: 0.9,
15655
15810
  topK: 50,
15811
+ parentAgentUrl: null,
15812
+ isClosed: false,
15656
15813
  };
15657
15814
  }
15658
15815
  /**
@@ -15842,8 +15999,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
15842
15999
  // Store the agent name in metadata so commitments can access it
15843
16000
  requirements = {
15844
16001
  ...requirements,
15845
- metadata: {
15846
- ...requirements.metadata,
16002
+ _metadata: {
16003
+ ...requirements._metadata,
15847
16004
  agentName: parseResult.agentName,
15848
16005
  },
15849
16006
  };
@@ -16326,6 +16483,66 @@ const OPENAI_MODELS = exportJson({
16326
16483
  },
16327
16484
  /**/
16328
16485
  /**/
16486
+ {
16487
+ modelVariant: 'CHAT',
16488
+ modelTitle: 'gpt-5.2-codex',
16489
+ modelName: 'gpt-5.2-codex',
16490
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
16491
+ pricing: {
16492
+ prompt: pricing(`$1.75 / 1M tokens`),
16493
+ output: pricing(`$14.00 / 1M tokens`),
16494
+ },
16495
+ },
16496
+ /**/
16497
+ /**/
16498
+ {
16499
+ modelVariant: 'CHAT',
16500
+ modelTitle: 'gpt-5.1-codex-max',
16501
+ modelName: 'gpt-5.1-codex-max',
16502
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
16503
+ pricing: {
16504
+ prompt: pricing(`$1.25 / 1M tokens`),
16505
+ output: pricing(`$10.00 / 1M tokens`),
16506
+ },
16507
+ },
16508
+ /**/
16509
+ /**/
16510
+ {
16511
+ modelVariant: 'CHAT',
16512
+ modelTitle: 'gpt-5.1-codex',
16513
+ modelName: 'gpt-5.1-codex',
16514
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
16515
+ pricing: {
16516
+ prompt: pricing(`$1.25 / 1M tokens`),
16517
+ output: pricing(`$10.00 / 1M tokens`),
16518
+ },
16519
+ },
16520
+ /**/
16521
+ /**/
16522
+ {
16523
+ modelVariant: 'CHAT',
16524
+ modelTitle: 'gpt-5.1-codex-mini',
16525
+ modelName: 'gpt-5.1-codex-mini',
16526
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
16527
+ pricing: {
16528
+ prompt: pricing(`$0.25 / 1M tokens`),
16529
+ output: pricing(`$2.00 / 1M tokens`),
16530
+ },
16531
+ },
16532
+ /**/
16533
+ /**/
16534
+ {
16535
+ modelVariant: 'CHAT',
16536
+ modelTitle: 'gpt-5-codex',
16537
+ modelName: 'gpt-5-codex',
16538
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
16539
+ pricing: {
16540
+ prompt: pricing(`$1.25 / 1M tokens`),
16541
+ output: pricing(`$10.00 / 1M tokens`),
16542
+ },
16543
+ },
16544
+ /**/
16545
+ /**/
16329
16546
  {
16330
16547
  modelVariant: 'CHAT',
16331
16548
  modelTitle: 'gpt-5-mini',
@@ -17030,6 +17247,32 @@ function isUnsupportedParameterError(error) {
17030
17247
  errorMessage.includes('does not support'));
17031
17248
  }
17032
17249
 
17250
+ /**
17251
+ * Provides access to the structured clone implementation when available.
17252
+ */
17253
+ function getStructuredCloneFunction() {
17254
+ return globalThis.structuredClone;
17255
+ }
17256
+ /**
17257
+ * Checks whether the prompt is a chat prompt that carries file attachments.
17258
+ */
17259
+ function hasChatPromptFiles(prompt) {
17260
+ return 'files' in prompt && Array.isArray(prompt.files);
17261
+ }
17262
+ /**
17263
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
17264
+ */
17265
+ function clonePromptPreservingFiles(prompt) {
17266
+ const structuredCloneFn = getStructuredCloneFunction();
17267
+ if (typeof structuredCloneFn === 'function') {
17268
+ return structuredCloneFn(prompt);
17269
+ }
17270
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
17271
+ if (hasChatPromptFiles(prompt)) {
17272
+ clonedPrompt.files = prompt.files;
17273
+ }
17274
+ return clonedPrompt;
17275
+ }
17033
17276
  /**
17034
17277
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
17035
17278
  *
@@ -17114,7 +17357,7 @@ class OpenAiCompatibleExecutionTools {
17114
17357
  */
17115
17358
  async callChatModelStream(prompt, onProgress) {
17116
17359
  // Deep clone prompt and modelRequirements to avoid mutation across calls
17117
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
17360
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
17118
17361
  // Use local Set for retried parameters to ensure independence and thread safety
17119
17362
  const retriedUnsupportedParameters = new Set();
17120
17363
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -17141,7 +17384,10 @@ class OpenAiCompatibleExecutionTools {
17141
17384
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
17142
17385
  // <- Note: [🧆]
17143
17386
  }; // <- TODO: [💩] Guard here types better
17144
- if (format === 'JSON') {
17387
+ if (currentModelRequirements.responseFormat !== undefined) {
17388
+ modelSettings.response_format = currentModelRequirements.responseFormat;
17389
+ }
17390
+ else if (format === 'JSON') {
17145
17391
  modelSettings.response_format = {
17146
17392
  type: 'json_object',
17147
17393
  };
@@ -18622,7 +18868,9 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
18622
18868
  const processingStartedAtMs = Date.now();
18623
18869
  for (const [index, source] of knowledgeSources.entries()) {
18624
18870
  try {
18625
- const sourceType = source.startsWith('http') || source.startsWith('https') ? 'url' : 'file';
18871
+ const isDataUrl = isDataUrlKnowledgeSource(source);
18872
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
18873
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
18626
18874
  if (this.options.isVerbose) {
18627
18875
  console.info('[🤰]', 'Processing knowledge source', {
18628
18876
  index: index + 1,
@@ -18632,8 +18880,27 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
18632
18880
  logLabel,
18633
18881
  });
18634
18882
  }
18635
- // Check if it's a URL
18636
- if (source.startsWith('http://') || source.startsWith('https://')) {
18883
+ if (isDataUrl) {
18884
+ const parsed = parseDataUrlKnowledgeSource(source);
18885
+ if (!parsed) {
18886
+ skippedSources.push({ source, reason: 'invalid_data_url' });
18887
+ if (this.options.isVerbose) {
18888
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
18889
+ source,
18890
+ sourceType,
18891
+ logLabel,
18892
+ });
18893
+ }
18894
+ continue;
18895
+ }
18896
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
18897
+ type: parsed.mimeType,
18898
+ });
18899
+ fileStreams.push(dataUrlFile);
18900
+ totalBytes += parsed.buffer.length;
18901
+ continue;
18902
+ }
18903
+ if (isHttp) {
18637
18904
  const downloadResult = await this.downloadKnowledgeSourceFile({
18638
18905
  source,
18639
18906
  timeoutMs: downloadTimeoutMs,
@@ -18735,6 +19002,64 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
18735
19002
  }
18736
19003
 
18737
19004
  const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
19005
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
19006
+ /*
19007
+ TODO: Use or remove
19008
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
19009
+ type: 'object',
19010
+ properties: {},
19011
+ required: [],
19012
+ additionalProperties: true,
19013
+ };
19014
+ */
19015
+ function buildJsonSchemaDefinition(jsonSchema) {
19016
+ var _a, _b, _c;
19017
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
19018
+ return {
19019
+ type: 'json_schema',
19020
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
19021
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
19022
+ schema: {
19023
+ type: 'object',
19024
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
19025
+ required: Array.isArray(schema.required) ? schema.required : [],
19026
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
19027
+ description: schema.description,
19028
+ },
19029
+ };
19030
+ }
19031
+ /**
19032
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
19033
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
19034
+ *
19035
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
19036
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
19037
+ * @private utility of Open AI
19038
+ */
19039
+ function mapResponseFormatToAgentOutputType(responseFormat) {
19040
+ if (!responseFormat) {
19041
+ return undefined;
19042
+ }
19043
+ if (typeof responseFormat === 'string') {
19044
+ if (responseFormat === 'text') {
19045
+ return 'text';
19046
+ }
19047
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
19048
+ return buildJsonSchemaDefinition();
19049
+ }
19050
+ return 'text';
19051
+ }
19052
+ switch (responseFormat.type) {
19053
+ case 'text':
19054
+ return 'text';
19055
+ case 'json_schema':
19056
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
19057
+ case 'json_object':
19058
+ return buildJsonSchemaDefinition();
19059
+ default:
19060
+ return undefined;
19061
+ }
19062
+ }
18738
19063
  /**
18739
19064
  * Execution tools for OpenAI AgentKit (Agents SDK).
18740
19065
  *
@@ -18782,6 +19107,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
18782
19107
  ...parameters,
18783
19108
  modelName: this.agentKitModelName,
18784
19109
  });
19110
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
18785
19111
  const preparedAgentKitAgent = await this.prepareAgentKitAgent({
18786
19112
  name: (prompt.title || 'Agent'),
18787
19113
  instructions: modelRequirements.systemMessage || '',
@@ -18793,6 +19119,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
18793
19119
  prompt,
18794
19120
  rawPromptContent,
18795
19121
  onProgress,
19122
+ responseFormatOutputType,
18796
19123
  });
18797
19124
  }
18798
19125
  /**
@@ -18974,16 +19301,21 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
18974
19301
  ...prompt.parameters,
18975
19302
  modelName: this.agentKitModelName,
18976
19303
  });
19304
+ const agentForRun = options.responseFormatOutputType !== undefined
19305
+ ? openAiAgentKitAgent.clone({
19306
+ outputType: options.responseFormatOutputType,
19307
+ })
19308
+ : openAiAgentKitAgent;
18977
19309
  const start = $getCurrentDate();
18978
19310
  let latestContent = '';
18979
19311
  const toolCalls = [];
18980
19312
  const toolCallIndexById = new Map();
18981
19313
  const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
18982
19314
  const rawRequest = {
18983
- agentName: openAiAgentKitAgent.name,
19315
+ agentName: agentForRun.name,
18984
19316
  input: inputItems,
18985
19317
  };
18986
- const streamResult = await run(openAiAgentKitAgent, inputItems, {
19318
+ const streamResult = await run(agentForRun, inputItems, {
18987
19319
  stream: true,
18988
19320
  context: { parameters: prompt.parameters },
18989
19321
  });
@@ -19972,22 +20304,28 @@ class AgentLlmExecutionTools {
19972
20304
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
19973
20305
  }
19974
20306
  const modelRequirements = await this.getModelRequirements();
20307
+ const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
19975
20308
  const chatPrompt = prompt;
19976
20309
  let underlyingLlmResult;
19977
- // Create modified chat prompt with agent system message
20310
+ const chatPromptContentWithSuffix = promptSuffix
20311
+ ? `${chatPrompt.content}\n\n${promptSuffix}`
20312
+ : chatPrompt.content;
19978
20313
  const promptWithAgentModelRequirements = {
19979
20314
  ...chatPrompt,
20315
+ content: chatPromptContentWithSuffix,
19980
20316
  modelRequirements: {
19981
20317
  ...chatPrompt.modelRequirements,
19982
- ...modelRequirements,
20318
+ ...sanitizedRequirements,
19983
20319
  // Spread tools to convert readonly array to mutable
19984
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
20320
+ tools: sanitizedRequirements.tools
20321
+ ? [...sanitizedRequirements.tools]
20322
+ : chatPrompt.modelRequirements.tools,
19985
20323
  // Spread knowledgeSources to convert readonly array to mutable
19986
- knowledgeSources: modelRequirements.knowledgeSources
19987
- ? [...modelRequirements.knowledgeSources]
20324
+ knowledgeSources: sanitizedRequirements.knowledgeSources
20325
+ ? [...sanitizedRequirements.knowledgeSources]
19988
20326
  : undefined,
19989
20327
  // Prepend agent system message to existing system message
19990
- systemMessage: modelRequirements.systemMessage +
20328
+ systemMessage: sanitizedRequirements.systemMessage +
19991
20329
  (chatPrompt.modelRequirements.systemMessage
19992
20330
  ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
19993
20331
  : ''),
@@ -19995,8 +20333,8 @@ class AgentLlmExecutionTools {
19995
20333
  };
19996
20334
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
19997
20335
  if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
19998
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
19999
- const vectorStoreHash = SHA256(JSON.stringify((_a = modelRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
20336
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
20337
+ const vectorStoreHash = SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
20000
20338
  const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
20001
20339
  const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
20002
20340
  let preparedAgentKit = this.options.assistantPreparationMode === 'external'
@@ -20023,7 +20361,7 @@ class AgentLlmExecutionTools {
20023
20361
  agent: this.title,
20024
20362
  });
20025
20363
  }
20026
- if (!vectorStoreId && ((_b = modelRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
20364
+ if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
20027
20365
  emitAssistantPreparationProgress({
20028
20366
  onProgress,
20029
20367
  prompt,
@@ -20039,9 +20377,9 @@ class AgentLlmExecutionTools {
20039
20377
  });
20040
20378
  preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
20041
20379
  name: this.title,
20042
- instructions: modelRequirements.systemMessage || '',
20043
- knowledgeSources: modelRequirements.knowledgeSources,
20044
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
20380
+ instructions: sanitizedRequirements.systemMessage || '',
20381
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
20382
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
20045
20383
  vectorStoreId,
20046
20384
  });
20047
20385
  }
@@ -20056,15 +20394,17 @@ class AgentLlmExecutionTools {
20056
20394
  requirementsHash,
20057
20395
  vectorStoreId: preparedAgentKit.vectorStoreId,
20058
20396
  });
20397
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
20059
20398
  underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
20060
20399
  openAiAgentKitAgent: preparedAgentKit.agent,
20061
20400
  prompt: promptWithAgentModelRequirements,
20062
20401
  onProgress,
20402
+ responseFormatOutputType,
20063
20403
  });
20064
20404
  }
20065
20405
  else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
20066
20406
  // ... deprecated path ...
20067
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
20407
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
20068
20408
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
20069
20409
  let assistant;
20070
20410
  if (this.options.assistantPreparationMode === 'external') {
@@ -20106,9 +20446,9 @@ class AgentLlmExecutionTools {
20106
20446
  assistant = await this.options.llmTools.updateAssistant({
20107
20447
  assistantId: cached.assistantId,
20108
20448
  name: this.title,
20109
- instructions: modelRequirements.systemMessage,
20110
- knowledgeSources: modelRequirements.knowledgeSources,
20111
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
20449
+ instructions: sanitizedRequirements.systemMessage,
20450
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
20451
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
20112
20452
  });
20113
20453
  AgentLlmExecutionTools.assistantCache.set(this.title, {
20114
20454
  assistantId: assistant.assistantId,
@@ -20131,9 +20471,9 @@ class AgentLlmExecutionTools {
20131
20471
  });
20132
20472
  assistant = await this.options.llmTools.createNewAssistant({
20133
20473
  name: this.title,
20134
- instructions: modelRequirements.systemMessage,
20135
- knowledgeSources: modelRequirements.knowledgeSources,
20136
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
20474
+ instructions: sanitizedRequirements.systemMessage,
20475
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
20476
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
20137
20477
  /*
20138
20478
  !!!
20139
20479
  metadata: {
@@ -20175,13 +20515,19 @@ class AgentLlmExecutionTools {
20175
20515
  }
20176
20516
  }
20177
20517
  let content = underlyingLlmResult.content;
20178
- // Note: Cleanup the AI artifacts from the content
20179
- content = humanizeAiText(content);
20180
- // Note: Make sure the content is Promptbook-like
20181
- content = promptbookifyAiText(content);
20518
+ if (typeof content === 'string') {
20519
+ // Note: Cleanup the AI artifacts from the content
20520
+ content = humanizeAiText(content);
20521
+ // Note: Make sure the content is Promptbook-like
20522
+ content = promptbookifyAiText(content);
20523
+ }
20524
+ else {
20525
+ // TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
20526
+ content = JSON.stringify(content);
20527
+ }
20182
20528
  const agentResult = {
20183
20529
  ...underlyingLlmResult,
20184
- content,
20530
+ content: content,
20185
20531
  modelName: this.modelName,
20186
20532
  };
20187
20533
  return agentResult;
@@ -20370,7 +20716,6 @@ class Agent extends AgentLlmExecutionTools {
20370
20716
  * Note: This method also implements the learning mechanism
20371
20717
  */
20372
20718
  async callChatModelStream(prompt, onProgress) {
20373
- var _a;
20374
20719
  // [1] Check if the user is asking the same thing as in the samples
20375
20720
  const modelRequirements = await this.getModelRequirements();
20376
20721
  if (modelRequirements.samples) {
@@ -20418,7 +20763,7 @@ class Agent extends AgentLlmExecutionTools {
20418
20763
  if (result.rawResponse && 'sample' in result.rawResponse) {
20419
20764
  return result;
20420
20765
  }
20421
- if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
20766
+ if (modelRequirements.isClosed) {
20422
20767
  return result;
20423
20768
  }
20424
20769
  // Note: [0] Notify start of self-learning