@promptbook/node 0.110.0-7 → 0.110.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/esm/index.es.js +467 -107
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  5. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  6. package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
  7. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  8. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  9. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  10. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
  11. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +19 -0
  12. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +10 -1
  13. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  14. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  15. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +39 -0
  16. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  17. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  18. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  19. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  20. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  21. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  22. package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +2 -2
  25. package/umd/index.umd.js +467 -107
  26. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -35,7 +35,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
35
35
  * @generated
36
36
  * @see https://github.com/webgptorg/promptbook
37
37
  */
38
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-7';
38
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-9';
39
39
  /**
40
40
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
41
41
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -13710,6 +13710,28 @@ class BaseCommitmentDefinition {
13710
13710
  return currentMessage + separator + content;
13711
13711
  });
13712
13712
  }
13713
+ /**
13714
+ * Helper method to create a new requirements object with updated prompt suffix
13715
+ */
13716
+ updatePromptSuffix(requirements, contentUpdate) {
13717
+ const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
13718
+ return {
13719
+ ...requirements,
13720
+ promptSuffix: newSuffix,
13721
+ };
13722
+ }
13723
+ /**
13724
+ * Helper method to append content to the prompt suffix
13725
+ * Default separator is a single newline for bullet lists.
13726
+ */
13727
+ appendToPromptSuffix(requirements, content, separator = '\n') {
13728
+ return this.updatePromptSuffix(requirements, (currentSuffix) => {
13729
+ if (!currentSuffix.trim()) {
13730
+ return content;
13731
+ }
13732
+ return `${currentSuffix}${separator}${content}`;
13733
+ });
13734
+ }
13713
13735
  /**
13714
13736
  * Helper method to add a comment section to the system message
13715
13737
  * Comments are lines starting with # that will be removed from the final system message
@@ -13887,13 +13909,9 @@ class ClosedCommitmentDefinition extends BaseCommitmentDefinition {
13887
13909
  `);
13888
13910
  }
13889
13911
  applyToAgentModelRequirements(requirements, _content) {
13890
- const updatedMetadata = {
13891
- ...requirements.metadata,
13892
- isClosed: true,
13893
- };
13894
13912
  return {
13895
13913
  ...requirements,
13896
- metadata: updatedMetadata,
13914
+ isClosed: true,
13897
13915
  };
13898
13916
  }
13899
13917
  }
@@ -14171,12 +14189,12 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
14171
14189
  return requirements;
14172
14190
  }
14173
14191
  // Get existing dictionary entries from metadata
14174
- const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
14192
+ const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
14175
14193
  // Merge the new dictionary entry with existing entries
14176
14194
  const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
14177
14195
  // Store the merged dictionary in metadata for debugging and inspection
14178
14196
  const updatedMetadata = {
14179
- ...requirements.metadata,
14197
+ ...requirements._metadata,
14180
14198
  DICTIONARY: mergedDictionary,
14181
14199
  };
14182
14200
  // Create the dictionary section for the system message
@@ -14184,7 +14202,7 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
14184
14202
  const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
14185
14203
  return {
14186
14204
  ...this.appendToSystemMessage(requirements, dictionarySection),
14187
- metadata: updatedMetadata,
14205
+ _metadata: updatedMetadata,
14188
14206
  };
14189
14207
  }
14190
14208
  }
@@ -14324,10 +14342,7 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
14324
14342
  applyToAgentModelRequirements(requirements, content) {
14325
14343
  const trimmedContent = content.trim();
14326
14344
  if (!trimmedContent) {
14327
- return {
14328
- ...requirements,
14329
- parentAgentUrl: undefined,
14330
- };
14345
+ return requirements;
14331
14346
  }
14332
14347
  if (trimmedContent.toUpperCase() === 'VOID' ||
14333
14348
  trimmedContent.toUpperCase() === 'NULL' ||
@@ -14541,6 +14556,136 @@ class ImportCommitmentDefinition extends BaseCommitmentDefinition {
14541
14556
  * Note: [💞] Ignore a discrepancy between file name and entity name
14542
14557
  */
14543
14558
 
14559
+ /**
14560
+ * @@@
14561
+ *
14562
+ * @private thing of inline knowledge
14563
+ */
14564
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
14565
+ /**
14566
+ * @@@
14567
+ *
14568
+ * @private thing of inline knowledge
14569
+ */
14570
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
14571
+ /**
14572
+ * @@@
14573
+ *
14574
+ * @private thing of inline knowledge
14575
+ */
14576
+ const DATA_URL_PREFIX = 'data:';
14577
+ /**
14578
+ * @@@
14579
+ *
14580
+ * @private thing of inline knowledge
14581
+ */
14582
+ function getFirstNonEmptyLine(content) {
14583
+ const lines = content.split(/\r?\n/);
14584
+ for (const line of lines) {
14585
+ const trimmed = line.trim();
14586
+ if (trimmed) {
14587
+ return trimmed;
14588
+ }
14589
+ }
14590
+ return null;
14591
+ }
14592
+ /**
14593
+ * @@@
14594
+ *
14595
+ * @private thing of inline knowledge
14596
+ */
14597
+ function deriveBaseFilename(content) {
14598
+ const firstLine = getFirstNonEmptyLine(content);
14599
+ if (!firstLine) {
14600
+ return INLINE_KNOWLEDGE_BASE_NAME;
14601
+ }
14602
+ const normalized = normalizeToKebabCase(firstLine);
14603
+ return normalized || INLINE_KNOWLEDGE_BASE_NAME;
14604
+ }
14605
+ /**
14606
+ * Creates a data URL that represents the inline knowledge content as a text file.
14607
+ *
14608
+ * @private thing of inline knowledge
14609
+ */
14610
+ function createInlineKnowledgeSourceFile(content) {
14611
+ const trimmedContent = content.trim();
14612
+ const baseName = deriveBaseFilename(trimmedContent);
14613
+ const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
14614
+ const mimeType = 'text/plain';
14615
+ const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
14616
+ const encodedFilename = encodeURIComponent(filename);
14617
+ const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
14618
+ return {
14619
+ filename,
14620
+ mimeType,
14621
+ url,
14622
+ };
14623
+ }
14624
+ /**
14625
+ * Checks whether the provided source string is a data URL that can be decoded.
14626
+ *
14627
+ * @private thing of inline knowledge
14628
+ */
14629
+ function isDataUrlKnowledgeSource(source) {
14630
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
14631
+ }
14632
+ /**
14633
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
14634
+ *
14635
+ * @private thing of inline knowledge
14636
+ */
14637
+ function parseDataUrlKnowledgeSource(source) {
14638
+ if (!isDataUrlKnowledgeSource(source)) {
14639
+ return null;
14640
+ }
14641
+ const commaIndex = source.indexOf(',');
14642
+ if (commaIndex === -1) {
14643
+ return null;
14644
+ }
14645
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
14646
+ const payload = source.slice(commaIndex + 1);
14647
+ const tokens = header.split(';');
14648
+ const mediaType = tokens[0] || 'text/plain';
14649
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
14650
+ let isBase64 = false;
14651
+ for (let i = 1; i < tokens.length; i++) {
14652
+ const token = tokens[i];
14653
+ if (!token) {
14654
+ continue;
14655
+ }
14656
+ if (token.toLowerCase() === 'base64') {
14657
+ isBase64 = true;
14658
+ continue;
14659
+ }
14660
+ const [key, value] = token.split('=');
14661
+ if (key === 'name' && value !== undefined) {
14662
+ try {
14663
+ filename = decodeURIComponent(value);
14664
+ }
14665
+ catch (_a) {
14666
+ filename = value;
14667
+ }
14668
+ }
14669
+ }
14670
+ if (!isBase64) {
14671
+ return null;
14672
+ }
14673
+ try {
14674
+ const buffer = Buffer.from(payload, 'base64');
14675
+ return {
14676
+ buffer,
14677
+ filename,
14678
+ mimeType: mediaType,
14679
+ };
14680
+ }
14681
+ catch (_b) {
14682
+ return null;
14683
+ }
14684
+ }
14685
+ /**
14686
+ * Note: [💞] Ignore a discrepancy between file name and entity name
14687
+ */
14688
+
14544
14689
  /**
14545
14690
  * KNOWLEDGE commitment definition
14546
14691
  *
@@ -14639,9 +14784,13 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
14639
14784
  return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
14640
14785
  }
14641
14786
  else {
14642
- // Direct text knowledge - add to system message
14643
- const knowledgeSection = `Knowledge: ${trimmedContent}`;
14644
- return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
14787
+ const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
14788
+ const updatedRequirements = {
14789
+ ...requirements,
14790
+ knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
14791
+ };
14792
+ const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
14793
+ return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
14645
14794
  }
14646
14795
  }
14647
14796
  }
@@ -14888,16 +15037,16 @@ class AgentMessageCommitmentDefinition extends BaseCommitmentDefinition {
14888
15037
  // and typically doesn't need to be added to the system prompt or model requirements directly.
14889
15038
  // It is extracted separately for the chat interface.
14890
15039
  var _a;
14891
- const pendingUserMessage = (_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
15040
+ const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
14892
15041
  if (pendingUserMessage) {
14893
15042
  const newSample = { question: pendingUserMessage, answer: content };
14894
15043
  const newSamples = [...(requirements.samples || []), newSample];
14895
- const newMetadata = { ...requirements.metadata };
15044
+ const newMetadata = { ...requirements._metadata };
14896
15045
  delete newMetadata.pendingUserMessage;
14897
15046
  return {
14898
15047
  ...requirements,
14899
15048
  samples: newSamples,
14900
- metadata: newMetadata,
15049
+ _metadata: newMetadata,
14901
15050
  };
14902
15051
  }
14903
15052
  return requirements;
@@ -15145,8 +15294,8 @@ class UserMessageCommitmentDefinition extends BaseCommitmentDefinition {
15145
15294
  applyToAgentModelRequirements(requirements, content) {
15146
15295
  return {
15147
15296
  ...requirements,
15148
- metadata: {
15149
- ...requirements.metadata,
15297
+ _metadata: {
15298
+ ...requirements._metadata,
15150
15299
  pendingUserMessage: content,
15151
15300
  },
15152
15301
  };
@@ -16004,11 +16153,7 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
16004
16153
  if (trimmedContent === '') {
16005
16154
  return requirements;
16006
16155
  }
16007
- // Return requirements with updated notes but no changes to system message
16008
- return {
16009
- ...requirements,
16010
- notes: [...(requirements.notes || []), trimmedContent],
16011
- };
16156
+ return requirements;
16012
16157
  }
16013
16158
  }
16014
16159
  /**
@@ -16070,12 +16215,12 @@ class OpenCommitmentDefinition extends BaseCommitmentDefinition {
16070
16215
  // Since OPEN is default, we can just ensure isClosed is false
16071
16216
  // But to be explicit we can set it
16072
16217
  const updatedMetadata = {
16073
- ...requirements.metadata,
16218
+ ...requirements._metadata,
16074
16219
  isClosed: false,
16075
16220
  };
16076
16221
  return {
16077
16222
  ...requirements,
16078
- metadata: updatedMetadata,
16223
+ _metadata: updatedMetadata,
16079
16224
  };
16080
16225
  }
16081
16226
  }
@@ -16156,7 +16301,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
16156
16301
  return requirements;
16157
16302
  }
16158
16303
  // Get existing persona content from metadata
16159
- const existingPersonaContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
16304
+ const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
16160
16305
  // Merge the new content with existing persona content
16161
16306
  // When multiple PERSONA commitments exist, they are merged into one
16162
16307
  const mergedPersonaContent = existingPersonaContent
@@ -16164,12 +16309,12 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
16164
16309
  : trimmedContent;
16165
16310
  // Store the merged persona content in metadata for debugging and inspection
16166
16311
  const updatedMetadata = {
16167
- ...requirements.metadata,
16312
+ ...requirements._metadata,
16168
16313
  PERSONA: mergedPersonaContent,
16169
16314
  };
16170
16315
  // Get the agent name from metadata (which should contain the first line of agent source)
16171
16316
  // If not available, extract from current system message as fallback
16172
- let agentName = (_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.agentName;
16317
+ let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
16173
16318
  if (!agentName) {
16174
16319
  // Fallback: extract from current system message
16175
16320
  const currentMessage = requirements.systemMessage.trim();
@@ -16216,7 +16361,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
16216
16361
  return {
16217
16362
  ...requirements,
16218
16363
  systemMessage: newSystemMessage,
16219
- metadata: updatedMetadata,
16364
+ _metadata: updatedMetadata,
16220
16365
  };
16221
16366
  }
16222
16367
  }
@@ -16299,7 +16444,16 @@ class RuleCommitmentDefinition extends BaseCommitmentDefinition {
16299
16444
  }
16300
16445
  // Add rule to the system message
16301
16446
  const ruleSection = `Rule: ${trimmedContent}`;
16302
- return this.appendToSystemMessage(requirements, ruleSection, '\n\n');
16447
+ const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
16448
+ const ruleLines = trimmedContent
16449
+ .split(/\r?\n/)
16450
+ .map((line) => line.trim())
16451
+ .filter(Boolean)
16452
+ .map((line) => `- ${line}`);
16453
+ if (ruleLines.length === 0) {
16454
+ return requirementsWithRule;
16455
+ }
16456
+ return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
16303
16457
  }
16304
16458
  }
16305
16459
  /**
@@ -16805,7 +16959,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
16805
16959
  if (teammates.length === 0) {
16806
16960
  return requirements;
16807
16961
  }
16808
- const agentName = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
16962
+ const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
16809
16963
  const teamEntries = teammates.map((teammate) => ({
16810
16964
  toolName: createTeamToolName(teammate.url),
16811
16965
  teammate,
@@ -16845,7 +16999,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
16845
16999
  },
16846
17000
  });
16847
17001
  }
16848
- const existingTeammates = ((_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
17002
+ const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
16849
17003
  const updatedTeammates = [...existingTeammates];
16850
17004
  for (const entry of teamEntries) {
16851
17005
  if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
@@ -16874,8 +17028,8 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
16874
17028
  return this.appendToSystemMessage({
16875
17029
  ...requirements,
16876
17030
  tools: updatedTools,
16877
- metadata: {
16878
- ...requirements.metadata,
17031
+ _metadata: {
17032
+ ...requirements._metadata,
16879
17033
  teammates: updatedTeammates,
16880
17034
  },
16881
17035
  }, teamSystemMessage);
@@ -17107,7 +17261,7 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
17107
17261
  if (!trimmedContent) {
17108
17262
  // Store template mode flag in metadata
17109
17263
  const updatedMetadata = {
17110
- ...requirements.metadata,
17264
+ ...requirements._metadata,
17111
17265
  templateMode: true,
17112
17266
  };
17113
17267
  // Add a general instruction about using structured templates
@@ -17117,21 +17271,21 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
17117
17271
  `);
17118
17272
  return {
17119
17273
  ...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
17120
- metadata: updatedMetadata,
17274
+ _metadata: updatedMetadata,
17121
17275
  };
17122
17276
  }
17123
17277
  // If content is provided, add the specific template instructions
17124
17278
  const templateSection = `Response Template: ${trimmedContent}`;
17125
17279
  // Store the template in metadata for potential programmatic access
17126
- const existingTemplates = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
17280
+ const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
17127
17281
  const updatedMetadata = {
17128
- ...requirements.metadata,
17282
+ ...requirements._metadata,
17129
17283
  templates: [...existingTemplates, trimmedContent],
17130
17284
  templateMode: true,
17131
17285
  };
17132
17286
  return {
17133
17287
  ...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
17134
- metadata: updatedMetadata,
17288
+ _metadata: updatedMetadata,
17135
17289
  };
17136
17290
  }
17137
17291
  }
@@ -17468,8 +17622,8 @@ class UseBrowserCommitmentDefinition extends BaseCommitmentDefinition {
17468
17622
  return this.appendToSystemMessage({
17469
17623
  ...requirements,
17470
17624
  tools: updatedTools,
17471
- metadata: {
17472
- ...requirements.metadata,
17625
+ _metadata: {
17626
+ ...requirements._metadata,
17473
17627
  useBrowser: true,
17474
17628
  },
17475
17629
  }, spaceTrim$1(`
@@ -17698,8 +17852,8 @@ class UseEmailCommitmentDefinition extends BaseCommitmentDefinition {
17698
17852
  return this.appendToSystemMessage({
17699
17853
  ...requirements,
17700
17854
  tools: updatedTools,
17701
- metadata: {
17702
- ...requirements.metadata,
17855
+ _metadata: {
17856
+ ...requirements._metadata,
17703
17857
  useEmail: content || true,
17704
17858
  },
17705
17859
  }, spaceTrim$1((block) => `
@@ -17834,8 +17988,8 @@ class UseImageGeneratorCommitmentDefinition extends BaseCommitmentDefinition {
17834
17988
  return this.appendToSystemMessage({
17835
17989
  ...requirements,
17836
17990
  tools: updatedTools,
17837
- metadata: {
17838
- ...requirements.metadata,
17991
+ _metadata: {
17992
+ ...requirements._metadata,
17839
17993
  useImageGenerator: content || true,
17840
17994
  },
17841
17995
  }, spaceTrim$1(`
@@ -18126,8 +18280,8 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
18126
18280
  return this.appendToSystemMessage({
18127
18281
  ...requirements,
18128
18282
  tools: updatedTools,
18129
- metadata: {
18130
- ...requirements.metadata,
18283
+ _metadata: {
18284
+ ...requirements._metadata,
18131
18285
  useSearchEngine: content || true,
18132
18286
  },
18133
18287
  }, spaceTrim$1((block) => `
@@ -18275,8 +18429,8 @@ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
18275
18429
  return this.appendToSystemMessage({
18276
18430
  ...requirements,
18277
18431
  tools: updatedTools,
18278
- metadata: {
18279
- ...requirements.metadata,
18432
+ _metadata: {
18433
+ ...requirements._metadata,
18280
18434
  },
18281
18435
  }, spaceTrim$1((block) => `
18282
18436
  Time and date context:
@@ -20310,6 +20464,40 @@ function isAssistantPreparationToolCall(toolCall) {
20310
20464
  return toolCall.name === ASSISTANT_PREPARATION_TOOL_CALL_NAME;
20311
20465
  }
20312
20466
 
20467
+ /**
20468
+ * Builds a stable identity string for tool calls across partial updates.
20469
+ *
20470
+ * @param toolCall - Tool call entry to identify.
20471
+ * @returns Stable identity string for deduplication.
20472
+ *
20473
+ * @private function of <Chat/>
20474
+ */
20475
+ function getToolCallIdentity(toolCall) {
20476
+ const rawToolCall = toolCall.rawToolCall;
20477
+ const rawId = (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.id) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.callId) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.call_id);
20478
+ if (rawId) {
20479
+ return `id:${rawId}`;
20480
+ }
20481
+ if (toolCall.createdAt) {
20482
+ return `time:${toolCall.createdAt}:${toolCall.name}`;
20483
+ }
20484
+ const argsKey = (() => {
20485
+ if (typeof toolCall.arguments === 'string') {
20486
+ return toolCall.arguments;
20487
+ }
20488
+ if (!toolCall.arguments) {
20489
+ return '';
20490
+ }
20491
+ try {
20492
+ return JSON.stringify(toolCall.arguments);
20493
+ }
20494
+ catch (_a) {
20495
+ return '';
20496
+ }
20497
+ })();
20498
+ return `fallback:${toolCall.name}:${argsKey}`;
20499
+ }
20500
+
20313
20501
  /*! *****************************************************************************
20314
20502
  Copyright (c) Microsoft Corporation.
20315
20503
 
@@ -20948,11 +21136,14 @@ function asUpdatableSubject(value) {
20948
21136
  function createEmptyAgentModelRequirements() {
20949
21137
  return {
20950
21138
  systemMessage: '',
21139
+ promptSuffix: '',
20951
21140
  // modelName: 'gpt-5',
20952
21141
  modelName: 'gemini-2.5-flash-lite',
20953
21142
  temperature: 0.7,
20954
21143
  topP: 0.9,
20955
21144
  topK: 50,
21145
+ parentAgentUrl: null,
21146
+ isClosed: false,
20956
21147
  };
20957
21148
  }
20958
21149
  /**
@@ -21142,8 +21333,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
21142
21333
  // Store the agent name in metadata so commitments can access it
21143
21334
  requirements = {
21144
21335
  ...requirements,
21145
- metadata: {
21146
- ...requirements.metadata,
21336
+ _metadata: {
21337
+ ...requirements._metadata,
21147
21338
  agentName: parseResult.agentName,
21148
21339
  },
21149
21340
  };
@@ -21626,6 +21817,66 @@ const OPENAI_MODELS = exportJson({
21626
21817
  },
21627
21818
  /**/
21628
21819
  /**/
21820
+ {
21821
+ modelVariant: 'CHAT',
21822
+ modelTitle: 'gpt-5.2-codex',
21823
+ modelName: 'gpt-5.2-codex',
21824
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
21825
+ pricing: {
21826
+ prompt: pricing(`$1.75 / 1M tokens`),
21827
+ output: pricing(`$14.00 / 1M tokens`),
21828
+ },
21829
+ },
21830
+ /**/
21831
+ /**/
21832
+ {
21833
+ modelVariant: 'CHAT',
21834
+ modelTitle: 'gpt-5.1-codex-max',
21835
+ modelName: 'gpt-5.1-codex-max',
21836
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
21837
+ pricing: {
21838
+ prompt: pricing(`$1.25 / 1M tokens`),
21839
+ output: pricing(`$10.00 / 1M tokens`),
21840
+ },
21841
+ },
21842
+ /**/
21843
+ /**/
21844
+ {
21845
+ modelVariant: 'CHAT',
21846
+ modelTitle: 'gpt-5.1-codex',
21847
+ modelName: 'gpt-5.1-codex',
21848
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
21849
+ pricing: {
21850
+ prompt: pricing(`$1.25 / 1M tokens`),
21851
+ output: pricing(`$10.00 / 1M tokens`),
21852
+ },
21853
+ },
21854
+ /**/
21855
+ /**/
21856
+ {
21857
+ modelVariant: 'CHAT',
21858
+ modelTitle: 'gpt-5.1-codex-mini',
21859
+ modelName: 'gpt-5.1-codex-mini',
21860
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
21861
+ pricing: {
21862
+ prompt: pricing(`$0.25 / 1M tokens`),
21863
+ output: pricing(`$2.00 / 1M tokens`),
21864
+ },
21865
+ },
21866
+ /**/
21867
+ /**/
21868
+ {
21869
+ modelVariant: 'CHAT',
21870
+ modelTitle: 'gpt-5-codex',
21871
+ modelName: 'gpt-5-codex',
21872
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
21873
+ pricing: {
21874
+ prompt: pricing(`$1.25 / 1M tokens`),
21875
+ output: pricing(`$10.00 / 1M tokens`),
21876
+ },
21877
+ },
21878
+ /**/
21879
+ /**/
21629
21880
  {
21630
21881
  modelVariant: 'CHAT',
21631
21882
  modelTitle: 'gpt-5-mini',
@@ -22330,6 +22581,32 @@ function isUnsupportedParameterError(error) {
22330
22581
  errorMessage.includes('does not support'));
22331
22582
  }
22332
22583
 
22584
+ /**
22585
+ * Provides access to the structured clone implementation when available.
22586
+ */
22587
+ function getStructuredCloneFunction() {
22588
+ return globalThis.structuredClone;
22589
+ }
22590
+ /**
22591
+ * Checks whether the prompt is a chat prompt that carries file attachments.
22592
+ */
22593
+ function hasChatPromptFiles(prompt) {
22594
+ return 'files' in prompt && Array.isArray(prompt.files);
22595
+ }
22596
+ /**
22597
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
22598
+ */
22599
+ function clonePromptPreservingFiles(prompt) {
22600
+ const structuredCloneFn = getStructuredCloneFunction();
22601
+ if (typeof structuredCloneFn === 'function') {
22602
+ return structuredCloneFn(prompt);
22603
+ }
22604
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
22605
+ if (hasChatPromptFiles(prompt)) {
22606
+ clonedPrompt.files = prompt.files;
22607
+ }
22608
+ return clonedPrompt;
22609
+ }
22333
22610
  /**
22334
22611
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
22335
22612
  *
@@ -22414,7 +22691,7 @@ class OpenAiCompatibleExecutionTools {
22414
22691
  */
22415
22692
  async callChatModelStream(prompt, onProgress) {
22416
22693
  // Deep clone prompt and modelRequirements to avoid mutation across calls
22417
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
22694
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
22418
22695
  // Use local Set for retried parameters to ensure independence and thread safety
22419
22696
  const retriedUnsupportedParameters = new Set();
22420
22697
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -22441,7 +22718,10 @@ class OpenAiCompatibleExecutionTools {
22441
22718
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
22442
22719
  // <- Note: [🧆]
22443
22720
  }; // <- TODO: [💩] Guard here types better
22444
- if (format === 'JSON') {
22721
+ if (currentModelRequirements.responseFormat !== undefined) {
22722
+ modelSettings.response_format = currentModelRequirements.responseFormat;
22723
+ }
22724
+ else if (format === 'JSON') {
22445
22725
  modelSettings.response_format = {
22446
22726
  type: 'json_object',
22447
22727
  };
@@ -23922,7 +24202,9 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
23922
24202
  const processingStartedAtMs = Date.now();
23923
24203
  for (const [index, source] of knowledgeSources.entries()) {
23924
24204
  try {
23925
- const sourceType = source.startsWith('http') || source.startsWith('https') ? 'url' : 'file';
24205
+ const isDataUrl = isDataUrlKnowledgeSource(source);
24206
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
24207
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
23926
24208
  if (this.options.isVerbose) {
23927
24209
  console.info('[🤰]', 'Processing knowledge source', {
23928
24210
  index: index + 1,
@@ -23932,8 +24214,27 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
23932
24214
  logLabel,
23933
24215
  });
23934
24216
  }
23935
- // Check if it's a URL
23936
- if (source.startsWith('http://') || source.startsWith('https://')) {
24217
+ if (isDataUrl) {
24218
+ const parsed = parseDataUrlKnowledgeSource(source);
24219
+ if (!parsed) {
24220
+ skippedSources.push({ source, reason: 'invalid_data_url' });
24221
+ if (this.options.isVerbose) {
24222
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
24223
+ source,
24224
+ sourceType,
24225
+ logLabel,
24226
+ });
24227
+ }
24228
+ continue;
24229
+ }
24230
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
24231
+ type: parsed.mimeType,
24232
+ });
24233
+ fileStreams.push(dataUrlFile);
24234
+ totalBytes += parsed.buffer.length;
24235
+ continue;
24236
+ }
24237
+ if (isHttp) {
23937
24238
  const downloadResult = await this.downloadKnowledgeSourceFile({
23938
24239
  source,
23939
24240
  timeoutMs: downloadTimeoutMs,
@@ -24035,6 +24336,64 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
24035
24336
  }
24036
24337
 
24037
24338
  const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
24339
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
24340
+ /*
24341
+ TODO: Use or remove
24342
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
24343
+ type: 'object',
24344
+ properties: {},
24345
+ required: [],
24346
+ additionalProperties: true,
24347
+ };
24348
+ */
24349
+ function buildJsonSchemaDefinition(jsonSchema) {
24350
+ var _a, _b, _c;
24351
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
24352
+ return {
24353
+ type: 'json_schema',
24354
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
24355
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
24356
+ schema: {
24357
+ type: 'object',
24358
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
24359
+ required: Array.isArray(schema.required) ? schema.required : [],
24360
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
24361
+ description: schema.description,
24362
+ },
24363
+ };
24364
+ }
24365
+ /**
24366
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
24367
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
24368
+ *
24369
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
24370
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
24371
+ * @private utility of Open AI
24372
+ */
24373
+ function mapResponseFormatToAgentOutputType(responseFormat) {
24374
+ if (!responseFormat) {
24375
+ return undefined;
24376
+ }
24377
+ if (typeof responseFormat === 'string') {
24378
+ if (responseFormat === 'text') {
24379
+ return 'text';
24380
+ }
24381
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
24382
+ return buildJsonSchemaDefinition();
24383
+ }
24384
+ return 'text';
24385
+ }
24386
+ switch (responseFormat.type) {
24387
+ case 'text':
24388
+ return 'text';
24389
+ case 'json_schema':
24390
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
24391
+ case 'json_object':
24392
+ return buildJsonSchemaDefinition();
24393
+ default:
24394
+ return undefined;
24395
+ }
24396
+ }
24038
24397
  /**
24039
24398
  * Execution tools for OpenAI AgentKit (Agents SDK).
24040
24399
  *
@@ -24082,6 +24441,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
24082
24441
  ...parameters,
24083
24442
  modelName: this.agentKitModelName,
24084
24443
  });
24444
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
24085
24445
  const preparedAgentKitAgent = await this.prepareAgentKitAgent({
24086
24446
  name: (prompt.title || 'Agent'),
24087
24447
  instructions: modelRequirements.systemMessage || '',
@@ -24093,6 +24453,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
24093
24453
  prompt,
24094
24454
  rawPromptContent,
24095
24455
  onProgress,
24456
+ responseFormatOutputType,
24096
24457
  });
24097
24458
  }
24098
24459
  /**
@@ -24274,16 +24635,21 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
24274
24635
  ...prompt.parameters,
24275
24636
  modelName: this.agentKitModelName,
24276
24637
  });
24638
+ const agentForRun = options.responseFormatOutputType !== undefined
24639
+ ? openAiAgentKitAgent.clone({
24640
+ outputType: options.responseFormatOutputType,
24641
+ })
24642
+ : openAiAgentKitAgent;
24277
24643
  const start = $getCurrentDate();
24278
24644
  let latestContent = '';
24279
24645
  const toolCalls = [];
24280
24646
  const toolCallIndexById = new Map();
24281
24647
  const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
24282
24648
  const rawRequest = {
24283
- agentName: openAiAgentKitAgent.name,
24649
+ agentName: agentForRun.name,
24284
24650
  input: inputItems,
24285
24651
  };
24286
- const streamResult = await run(openAiAgentKitAgent, inputItems, {
24652
+ const streamResult = await run(agentForRun, inputItems, {
24287
24653
  stream: true,
24288
24654
  context: { parameters: prompt.parameters },
24289
24655
  });
@@ -25272,22 +25638,28 @@ class AgentLlmExecutionTools {
25272
25638
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
25273
25639
  }
25274
25640
  const modelRequirements = await this.getModelRequirements();
25641
+ const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
25275
25642
  const chatPrompt = prompt;
25276
25643
  let underlyingLlmResult;
25277
- // Create modified chat prompt with agent system message
25644
+ const chatPromptContentWithSuffix = promptSuffix
25645
+ ? `${chatPrompt.content}\n\n${promptSuffix}`
25646
+ : chatPrompt.content;
25278
25647
  const promptWithAgentModelRequirements = {
25279
25648
  ...chatPrompt,
25649
+ content: chatPromptContentWithSuffix,
25280
25650
  modelRequirements: {
25281
25651
  ...chatPrompt.modelRequirements,
25282
- ...modelRequirements,
25652
+ ...sanitizedRequirements,
25283
25653
  // Spread tools to convert readonly array to mutable
25284
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
25654
+ tools: sanitizedRequirements.tools
25655
+ ? [...sanitizedRequirements.tools]
25656
+ : chatPrompt.modelRequirements.tools,
25285
25657
  // Spread knowledgeSources to convert readonly array to mutable
25286
- knowledgeSources: modelRequirements.knowledgeSources
25287
- ? [...modelRequirements.knowledgeSources]
25658
+ knowledgeSources: sanitizedRequirements.knowledgeSources
25659
+ ? [...sanitizedRequirements.knowledgeSources]
25288
25660
  : undefined,
25289
25661
  // Prepend agent system message to existing system message
25290
- systemMessage: modelRequirements.systemMessage +
25662
+ systemMessage: sanitizedRequirements.systemMessage +
25291
25663
  (chatPrompt.modelRequirements.systemMessage
25292
25664
  ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
25293
25665
  : ''),
@@ -25295,8 +25667,8 @@ class AgentLlmExecutionTools {
25295
25667
  };
25296
25668
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
25297
25669
  if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
25298
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
25299
- const vectorStoreHash = SHA256(JSON.stringify((_a = modelRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
25670
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
25671
+ const vectorStoreHash = SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
25300
25672
  const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
25301
25673
  const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
25302
25674
  let preparedAgentKit = this.options.assistantPreparationMode === 'external'
@@ -25323,7 +25695,7 @@ class AgentLlmExecutionTools {
25323
25695
  agent: this.title,
25324
25696
  });
25325
25697
  }
25326
- if (!vectorStoreId && ((_b = modelRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
25698
+ if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
25327
25699
  emitAssistantPreparationProgress({
25328
25700
  onProgress,
25329
25701
  prompt,
@@ -25339,9 +25711,9 @@ class AgentLlmExecutionTools {
25339
25711
  });
25340
25712
  preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
25341
25713
  name: this.title,
25342
- instructions: modelRequirements.systemMessage || '',
25343
- knowledgeSources: modelRequirements.knowledgeSources,
25344
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
25714
+ instructions: sanitizedRequirements.systemMessage || '',
25715
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
25716
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
25345
25717
  vectorStoreId,
25346
25718
  });
25347
25719
  }
@@ -25356,15 +25728,17 @@ class AgentLlmExecutionTools {
25356
25728
  requirementsHash,
25357
25729
  vectorStoreId: preparedAgentKit.vectorStoreId,
25358
25730
  });
25731
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
25359
25732
  underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
25360
25733
  openAiAgentKitAgent: preparedAgentKit.agent,
25361
25734
  prompt: promptWithAgentModelRequirements,
25362
25735
  onProgress,
25736
+ responseFormatOutputType,
25363
25737
  });
25364
25738
  }
25365
25739
  else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
25366
25740
  // ... deprecated path ...
25367
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
25741
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
25368
25742
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
25369
25743
  let assistant;
25370
25744
  if (this.options.assistantPreparationMode === 'external') {
@@ -25406,9 +25780,9 @@ class AgentLlmExecutionTools {
25406
25780
  assistant = await this.options.llmTools.updateAssistant({
25407
25781
  assistantId: cached.assistantId,
25408
25782
  name: this.title,
25409
- instructions: modelRequirements.systemMessage,
25410
- knowledgeSources: modelRequirements.knowledgeSources,
25411
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
25783
+ instructions: sanitizedRequirements.systemMessage,
25784
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
25785
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
25412
25786
  });
25413
25787
  AgentLlmExecutionTools.assistantCache.set(this.title, {
25414
25788
  assistantId: assistant.assistantId,
@@ -25431,9 +25805,9 @@ class AgentLlmExecutionTools {
25431
25805
  });
25432
25806
  assistant = await this.options.llmTools.createNewAssistant({
25433
25807
  name: this.title,
25434
- instructions: modelRequirements.systemMessage,
25435
- knowledgeSources: modelRequirements.knowledgeSources,
25436
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
25808
+ instructions: sanitizedRequirements.systemMessage,
25809
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
25810
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
25437
25811
  /*
25438
25812
  !!!
25439
25813
  metadata: {
@@ -25475,13 +25849,19 @@ class AgentLlmExecutionTools {
25475
25849
  }
25476
25850
  }
25477
25851
  let content = underlyingLlmResult.content;
25478
- // Note: Cleanup the AI artifacts from the content
25479
- content = humanizeAiText(content);
25480
- // Note: Make sure the content is Promptbook-like
25481
- content = promptbookifyAiText(content);
25852
+ if (typeof content === 'string') {
25853
+ // Note: Cleanup the AI artifacts from the content
25854
+ content = humanizeAiText(content);
25855
+ // Note: Make sure the content is Promptbook-like
25856
+ content = promptbookifyAiText(content);
25857
+ }
25858
+ else {
25859
+ // TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
25860
+ content = JSON.stringify(content);
25861
+ }
25482
25862
  const agentResult = {
25483
25863
  ...underlyingLlmResult,
25484
- content,
25864
+ content: content,
25485
25865
  modelName: this.modelName,
25486
25866
  };
25487
25867
  return agentResult;
@@ -25670,7 +26050,6 @@ class Agent extends AgentLlmExecutionTools {
25670
26050
  * Note: This method also implements the learning mechanism
25671
26051
  */
25672
26052
  async callChatModelStream(prompt, onProgress) {
25673
- var _a;
25674
26053
  // [1] Check if the user is asking the same thing as in the samples
25675
26054
  const modelRequirements = await this.getModelRequirements();
25676
26055
  if (modelRequirements.samples) {
@@ -25718,7 +26097,7 @@ class Agent extends AgentLlmExecutionTools {
25718
26097
  if (result.rawResponse && 'sample' in result.rawResponse) {
25719
26098
  return result;
25720
26099
  }
25721
- if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
26100
+ if (modelRequirements.isClosed) {
25722
26101
  return result;
25723
26102
  }
25724
26103
  // Note: [0] Notify start of self-learning
@@ -26108,26 +26487,7 @@ class RemoteAgent extends Agent {
26108
26487
  };
26109
26488
  };
26110
26489
  const getToolCallKey = (toolCall) => {
26111
- var _a;
26112
- const rawId = (_a = toolCall.rawToolCall) === null || _a === void 0 ? void 0 : _a.id;
26113
- if (rawId) {
26114
- return `id:${rawId}`;
26115
- }
26116
- const argsKey = (() => {
26117
- if (typeof toolCall.arguments === 'string') {
26118
- return toolCall.arguments;
26119
- }
26120
- if (!toolCall.arguments) {
26121
- return '';
26122
- }
26123
- try {
26124
- return JSON.stringify(toolCall.arguments);
26125
- }
26126
- catch (_a) {
26127
- return '';
26128
- }
26129
- })();
26130
- return `${toolCall.name}:${toolCall.createdAt || ''}:${argsKey}`;
26490
+ return getToolCallIdentity(toolCall);
26131
26491
  };
26132
26492
  const mergeToolCall = (existing, incoming) => {
26133
26493
  const incomingResult = incoming.result;