@promptbook/node 0.110.0-8 → 0.110.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +0 -4
  2. package/esm/index.es.js +487 -97
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -2
  6. package/esm/typings/src/_packages/types.index.d.ts +10 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  8. package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
  9. package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
  11. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
  13. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +42 -0
  14. package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +0 -2
  15. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  16. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  17. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  18. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  19. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
  20. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +6 -0
  21. package/esm/typings/src/book-components/Chat/hooks/useChatRatings.d.ts +24 -2
  22. package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +2 -10
  23. package/esm/typings/src/book-components/Chat/utils/parseCitationMarker.d.ts +75 -0
  24. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +3 -1
  25. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.test.d.ts +1 -0
  26. package/esm/typings/src/book-components/icons/ArrowIcon.d.ts +17 -4
  27. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  28. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  29. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +39 -0
  30. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  31. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  32. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  33. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  34. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  35. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  36. package/esm/typings/src/version.d.ts +1 -1
  37. package/package.json +2 -2
  38. package/umd/index.umd.js +487 -97
  39. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-8';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -13723,6 +13723,28 @@
13723
13723
  return currentMessage + separator + content;
13724
13724
  });
13725
13725
  }
13726
+ /**
13727
+ * Helper method to create a new requirements object with updated prompt suffix
13728
+ */
13729
+ updatePromptSuffix(requirements, contentUpdate) {
13730
+ const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
13731
+ return {
13732
+ ...requirements,
13733
+ promptSuffix: newSuffix,
13734
+ };
13735
+ }
13736
+ /**
13737
+ * Helper method to append content to the prompt suffix
13738
+ * Default separator is a single newline for bullet lists.
13739
+ */
13740
+ appendToPromptSuffix(requirements, content, separator = '\n') {
13741
+ return this.updatePromptSuffix(requirements, (currentSuffix) => {
13742
+ if (!currentSuffix.trim()) {
13743
+ return content;
13744
+ }
13745
+ return `${currentSuffix}${separator}${content}`;
13746
+ });
13747
+ }
13726
13748
  /**
13727
13749
  * Helper method to add a comment section to the system message
13728
13750
  * Comments are lines starting with # that will be removed from the final system message
@@ -13900,13 +13922,9 @@
13900
13922
  `);
13901
13923
  }
13902
13924
  applyToAgentModelRequirements(requirements, _content) {
13903
- const updatedMetadata = {
13904
- ...requirements.metadata,
13905
- isClosed: true,
13906
- };
13907
13925
  return {
13908
13926
  ...requirements,
13909
- metadata: updatedMetadata,
13927
+ isClosed: true,
13910
13928
  };
13911
13929
  }
13912
13930
  }
@@ -14184,12 +14202,12 @@
14184
14202
  return requirements;
14185
14203
  }
14186
14204
  // Get existing dictionary entries from metadata
14187
- const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
14205
+ const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
14188
14206
  // Merge the new dictionary entry with existing entries
14189
14207
  const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
14190
14208
  // Store the merged dictionary in metadata for debugging and inspection
14191
14209
  const updatedMetadata = {
14192
- ...requirements.metadata,
14210
+ ...requirements._metadata,
14193
14211
  DICTIONARY: mergedDictionary,
14194
14212
  };
14195
14213
  // Create the dictionary section for the system message
@@ -14197,7 +14215,7 @@
14197
14215
  const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
14198
14216
  return {
14199
14217
  ...this.appendToSystemMessage(requirements, dictionarySection),
14200
- metadata: updatedMetadata,
14218
+ _metadata: updatedMetadata,
14201
14219
  };
14202
14220
  }
14203
14221
  }
@@ -14337,10 +14355,7 @@
14337
14355
  applyToAgentModelRequirements(requirements, content) {
14338
14356
  const trimmedContent = content.trim();
14339
14357
  if (!trimmedContent) {
14340
- return {
14341
- ...requirements,
14342
- parentAgentUrl: undefined,
14343
- };
14358
+ return requirements;
14344
14359
  }
14345
14360
  if (trimmedContent.toUpperCase() === 'VOID' ||
14346
14361
  trimmedContent.toUpperCase() === 'NULL' ||
@@ -14554,6 +14569,136 @@
14554
14569
  * Note: [💞] Ignore a discrepancy between file name and entity name
14555
14570
  */
14556
14571
 
14572
+ /**
14573
+ * @@@
14574
+ *
14575
+ * @private thing of inline knowledge
14576
+ */
14577
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
14578
+ /**
14579
+ * @@@
14580
+ *
14581
+ * @private thing of inline knowledge
14582
+ */
14583
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
14584
+ /**
14585
+ * @@@
14586
+ *
14587
+ * @private thing of inline knowledge
14588
+ */
14589
+ const DATA_URL_PREFIX = 'data:';
14590
+ /**
14591
+ * @@@
14592
+ *
14593
+ * @private thing of inline knowledge
14594
+ */
14595
+ function getFirstNonEmptyLine(content) {
14596
+ const lines = content.split(/\r?\n/);
14597
+ for (const line of lines) {
14598
+ const trimmed = line.trim();
14599
+ if (trimmed) {
14600
+ return trimmed;
14601
+ }
14602
+ }
14603
+ return null;
14604
+ }
14605
+ /**
14606
+ * @@@
14607
+ *
14608
+ * @private thing of inline knowledge
14609
+ */
14610
+ function deriveBaseFilename(content) {
14611
+ const firstLine = getFirstNonEmptyLine(content);
14612
+ if (!firstLine) {
14613
+ return INLINE_KNOWLEDGE_BASE_NAME;
14614
+ }
14615
+ const normalized = normalizeToKebabCase(firstLine);
14616
+ return normalized || INLINE_KNOWLEDGE_BASE_NAME;
14617
+ }
14618
+ /**
14619
+ * Creates a data URL that represents the inline knowledge content as a text file.
14620
+ *
14621
+ * @private thing of inline knowledge
14622
+ */
14623
+ function createInlineKnowledgeSourceFile(content) {
14624
+ const trimmedContent = content.trim();
14625
+ const baseName = deriveBaseFilename(trimmedContent);
14626
+ const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
14627
+ const mimeType = 'text/plain';
14628
+ const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
14629
+ const encodedFilename = encodeURIComponent(filename);
14630
+ const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
14631
+ return {
14632
+ filename,
14633
+ mimeType,
14634
+ url,
14635
+ };
14636
+ }
14637
+ /**
14638
+ * Checks whether the provided source string is a data URL that can be decoded.
14639
+ *
14640
+ * @private thing of inline knowledge
14641
+ */
14642
+ function isDataUrlKnowledgeSource(source) {
14643
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
14644
+ }
14645
+ /**
14646
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
14647
+ *
14648
+ * @private thing of inline knowledge
14649
+ */
14650
+ function parseDataUrlKnowledgeSource(source) {
14651
+ if (!isDataUrlKnowledgeSource(source)) {
14652
+ return null;
14653
+ }
14654
+ const commaIndex = source.indexOf(',');
14655
+ if (commaIndex === -1) {
14656
+ return null;
14657
+ }
14658
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
14659
+ const payload = source.slice(commaIndex + 1);
14660
+ const tokens = header.split(';');
14661
+ const mediaType = tokens[0] || 'text/plain';
14662
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
14663
+ let isBase64 = false;
14664
+ for (let i = 1; i < tokens.length; i++) {
14665
+ const token = tokens[i];
14666
+ if (!token) {
14667
+ continue;
14668
+ }
14669
+ if (token.toLowerCase() === 'base64') {
14670
+ isBase64 = true;
14671
+ continue;
14672
+ }
14673
+ const [key, value] = token.split('=');
14674
+ if (key === 'name' && value !== undefined) {
14675
+ try {
14676
+ filename = decodeURIComponent(value);
14677
+ }
14678
+ catch (_a) {
14679
+ filename = value;
14680
+ }
14681
+ }
14682
+ }
14683
+ if (!isBase64) {
14684
+ return null;
14685
+ }
14686
+ try {
14687
+ const buffer = Buffer.from(payload, 'base64');
14688
+ return {
14689
+ buffer,
14690
+ filename,
14691
+ mimeType: mediaType,
14692
+ };
14693
+ }
14694
+ catch (_b) {
14695
+ return null;
14696
+ }
14697
+ }
14698
+ /**
14699
+ * Note: [💞] Ignore a discrepancy between file name and entity name
14700
+ */
14701
+
14557
14702
  /**
14558
14703
  * KNOWLEDGE commitment definition
14559
14704
  *
@@ -14652,9 +14797,13 @@
14652
14797
  return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
14653
14798
  }
14654
14799
  else {
14655
- // Direct text knowledge - add to system message
14656
- const knowledgeSection = `Knowledge: ${trimmedContent}`;
14657
- return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
14800
+ const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
14801
+ const updatedRequirements = {
14802
+ ...requirements,
14803
+ knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
14804
+ };
14805
+ const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
14806
+ return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
14658
14807
  }
14659
14808
  }
14660
14809
  }
@@ -14901,16 +15050,16 @@
14901
15050
  // and typically doesn't need to be added to the system prompt or model requirements directly.
14902
15051
  // It is extracted separately for the chat interface.
14903
15052
  var _a;
14904
- const pendingUserMessage = (_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
15053
+ const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
14905
15054
  if (pendingUserMessage) {
14906
15055
  const newSample = { question: pendingUserMessage, answer: content };
14907
15056
  const newSamples = [...(requirements.samples || []), newSample];
14908
- const newMetadata = { ...requirements.metadata };
15057
+ const newMetadata = { ...requirements._metadata };
14909
15058
  delete newMetadata.pendingUserMessage;
14910
15059
  return {
14911
15060
  ...requirements,
14912
15061
  samples: newSamples,
14913
- metadata: newMetadata,
15062
+ _metadata: newMetadata,
14914
15063
  };
14915
15064
  }
14916
15065
  return requirements;
@@ -15158,8 +15307,8 @@
15158
15307
  applyToAgentModelRequirements(requirements, content) {
15159
15308
  return {
15160
15309
  ...requirements,
15161
- metadata: {
15162
- ...requirements.metadata,
15310
+ _metadata: {
15311
+ ...requirements._metadata,
15163
15312
  pendingUserMessage: content,
15164
15313
  },
15165
15314
  };
@@ -16017,11 +16166,7 @@
16017
16166
  if (trimmedContent === '') {
16018
16167
  return requirements;
16019
16168
  }
16020
- // Return requirements with updated notes but no changes to system message
16021
- return {
16022
- ...requirements,
16023
- notes: [...(requirements.notes || []), trimmedContent],
16024
- };
16169
+ return requirements;
16025
16170
  }
16026
16171
  }
16027
16172
  /**
@@ -16083,12 +16228,12 @@
16083
16228
  // Since OPEN is default, we can just ensure isClosed is false
16084
16229
  // But to be explicit we can set it
16085
16230
  const updatedMetadata = {
16086
- ...requirements.metadata,
16231
+ ...requirements._metadata,
16087
16232
  isClosed: false,
16088
16233
  };
16089
16234
  return {
16090
16235
  ...requirements,
16091
- metadata: updatedMetadata,
16236
+ _metadata: updatedMetadata,
16092
16237
  };
16093
16238
  }
16094
16239
  }
@@ -16169,7 +16314,7 @@
16169
16314
  return requirements;
16170
16315
  }
16171
16316
  // Get existing persona content from metadata
16172
- const existingPersonaContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
16317
+ const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
16173
16318
  // Merge the new content with existing persona content
16174
16319
  // When multiple PERSONA commitments exist, they are merged into one
16175
16320
  const mergedPersonaContent = existingPersonaContent
@@ -16177,12 +16322,12 @@
16177
16322
  : trimmedContent;
16178
16323
  // Store the merged persona content in metadata for debugging and inspection
16179
16324
  const updatedMetadata = {
16180
- ...requirements.metadata,
16325
+ ...requirements._metadata,
16181
16326
  PERSONA: mergedPersonaContent,
16182
16327
  };
16183
16328
  // Get the agent name from metadata (which should contain the first line of agent source)
16184
16329
  // If not available, extract from current system message as fallback
16185
- let agentName = (_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.agentName;
16330
+ let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
16186
16331
  if (!agentName) {
16187
16332
  // Fallback: extract from current system message
16188
16333
  const currentMessage = requirements.systemMessage.trim();
@@ -16229,7 +16374,7 @@
16229
16374
  return {
16230
16375
  ...requirements,
16231
16376
  systemMessage: newSystemMessage,
16232
- metadata: updatedMetadata,
16377
+ _metadata: updatedMetadata,
16233
16378
  };
16234
16379
  }
16235
16380
  }
@@ -16312,7 +16457,16 @@
16312
16457
  }
16313
16458
  // Add rule to the system message
16314
16459
  const ruleSection = `Rule: ${trimmedContent}`;
16315
- return this.appendToSystemMessage(requirements, ruleSection, '\n\n');
16460
+ const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
16461
+ const ruleLines = trimmedContent
16462
+ .split(/\r?\n/)
16463
+ .map((line) => line.trim())
16464
+ .filter(Boolean)
16465
+ .map((line) => `- ${line}`);
16466
+ if (ruleLines.length === 0) {
16467
+ return requirementsWithRule;
16468
+ }
16469
+ return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
16316
16470
  }
16317
16471
  }
16318
16472
  /**
@@ -16814,11 +16968,12 @@
16814
16968
  if (!trimmedContent) {
16815
16969
  return requirements;
16816
16970
  }
16817
- const teammates = parseTeamCommitmentContent(trimmedContent, { strict: true });
16971
+ // Keep TEAM resilient: unresolved/malformed teammate entries are skipped, valid ones are still registered.
16972
+ const teammates = parseTeamCommitmentContent(trimmedContent, { strict: false });
16818
16973
  if (teammates.length === 0) {
16819
16974
  return requirements;
16820
16975
  }
16821
- const agentName = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
16976
+ const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
16822
16977
  const teamEntries = teammates.map((teammate) => ({
16823
16978
  toolName: createTeamToolName(teammate.url),
16824
16979
  teammate,
@@ -16858,7 +17013,7 @@
16858
17013
  },
16859
17014
  });
16860
17015
  }
16861
- const existingTeammates = ((_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
17016
+ const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
16862
17017
  const updatedTeammates = [...existingTeammates];
16863
17018
  for (const entry of teamEntries) {
16864
17019
  if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
@@ -16887,8 +17042,8 @@
16887
17042
  return this.appendToSystemMessage({
16888
17043
  ...requirements,
16889
17044
  tools: updatedTools,
16890
- metadata: {
16891
- ...requirements.metadata,
17045
+ _metadata: {
17046
+ ...requirements._metadata,
16892
17047
  teammates: updatedTeammates,
16893
17048
  },
16894
17049
  }, teamSystemMessage);
@@ -17120,7 +17275,7 @@
17120
17275
  if (!trimmedContent) {
17121
17276
  // Store template mode flag in metadata
17122
17277
  const updatedMetadata = {
17123
- ...requirements.metadata,
17278
+ ...requirements._metadata,
17124
17279
  templateMode: true,
17125
17280
  };
17126
17281
  // Add a general instruction about using structured templates
@@ -17130,21 +17285,21 @@
17130
17285
  `);
17131
17286
  return {
17132
17287
  ...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
17133
- metadata: updatedMetadata,
17288
+ _metadata: updatedMetadata,
17134
17289
  };
17135
17290
  }
17136
17291
  // If content is provided, add the specific template instructions
17137
17292
  const templateSection = `Response Template: ${trimmedContent}`;
17138
17293
  // Store the template in metadata for potential programmatic access
17139
- const existingTemplates = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
17294
+ const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
17140
17295
  const updatedMetadata = {
17141
- ...requirements.metadata,
17296
+ ...requirements._metadata,
17142
17297
  templates: [...existingTemplates, trimmedContent],
17143
17298
  templateMode: true,
17144
17299
  };
17145
17300
  return {
17146
17301
  ...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
17147
- metadata: updatedMetadata,
17302
+ _metadata: updatedMetadata,
17148
17303
  };
17149
17304
  }
17150
17305
  }
@@ -17481,8 +17636,8 @@
17481
17636
  return this.appendToSystemMessage({
17482
17637
  ...requirements,
17483
17638
  tools: updatedTools,
17484
- metadata: {
17485
- ...requirements.metadata,
17639
+ _metadata: {
17640
+ ...requirements._metadata,
17486
17641
  useBrowser: true,
17487
17642
  },
17488
17643
  }, spaceTrim$1.spaceTrim(`
@@ -17711,8 +17866,8 @@
17711
17866
  return this.appendToSystemMessage({
17712
17867
  ...requirements,
17713
17868
  tools: updatedTools,
17714
- metadata: {
17715
- ...requirements.metadata,
17869
+ _metadata: {
17870
+ ...requirements._metadata,
17716
17871
  useEmail: content || true,
17717
17872
  },
17718
17873
  }, spaceTrim$1.spaceTrim((block) => `
@@ -17847,8 +18002,8 @@
17847
18002
  return this.appendToSystemMessage({
17848
18003
  ...requirements,
17849
18004
  tools: updatedTools,
17850
- metadata: {
17851
- ...requirements.metadata,
18005
+ _metadata: {
18006
+ ...requirements._metadata,
17852
18007
  useImageGenerator: content || true,
17853
18008
  },
17854
18009
  }, spaceTrim$1.spaceTrim(`
@@ -18139,8 +18294,8 @@
18139
18294
  return this.appendToSystemMessage({
18140
18295
  ...requirements,
18141
18296
  tools: updatedTools,
18142
- metadata: {
18143
- ...requirements.metadata,
18297
+ _metadata: {
18298
+ ...requirements._metadata,
18144
18299
  useSearchEngine: content || true,
18145
18300
  },
18146
18301
  }, spaceTrim$1.spaceTrim((block) => `
@@ -18288,8 +18443,8 @@
18288
18443
  return this.appendToSystemMessage({
18289
18444
  ...requirements,
18290
18445
  tools: updatedTools,
18291
- metadata: {
18292
- ...requirements.metadata,
18446
+ _metadata: {
18447
+ ...requirements._metadata,
18293
18448
  },
18294
18449
  }, spaceTrim$1.spaceTrim((block) => `
18295
18450
  Time and date context:
@@ -20995,11 +21150,14 @@
20995
21150
  function createEmptyAgentModelRequirements() {
20996
21151
  return {
20997
21152
  systemMessage: '',
21153
+ promptSuffix: '',
20998
21154
  // modelName: 'gpt-5',
20999
21155
  modelName: 'gemini-2.5-flash-lite',
21000
21156
  temperature: 0.7,
21001
21157
  topP: 0.9,
21002
21158
  topK: 50,
21159
+ parentAgentUrl: null,
21160
+ isClosed: false,
21003
21161
  };
21004
21162
  }
21005
21163
  /**
@@ -21145,14 +21303,42 @@
21145
21303
  }
21146
21304
 
21147
21305
  /**
21148
- * Creates agent model requirements using the new commitment system
21306
+ * Creates agent model requirements using the new commitment system.
21307
+ *
21149
21308
  * This function uses a reduce-like pattern where each commitment applies its changes
21150
- * to build the final requirements starting from a basic empty model
21309
+ * to build the final requirements starting from a basic empty model.
21151
21310
  *
21152
- * @public exported from `@promptbook/core`
21311
+ * @param agentSource - Agent source book to parse.
21312
+ * @param modelName - Optional override for the agent model name.
21313
+ * @param options - Additional options such as the agent reference resolver.
21314
+ *
21315
+ * @private @@@
21316
+ */
21317
+ const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
21318
+ /**
21319
+ * Returns a safe fallback content when a resolver fails to transform a reference commitment.
21320
+ *
21321
+ * @param commitmentType - Commitment being resolved.
21322
+ * @param originalContent - Original unresolved commitment content.
21323
+ * @returns Fallback content that keeps requirement creation resilient.
21324
+ */
21325
+ function getSafeReferenceCommitmentFallback(commitmentType, originalContent) {
21326
+ if (commitmentType === 'FROM') {
21327
+ return 'VOID';
21328
+ }
21329
+ if (commitmentType === 'IMPORT' || commitmentType === 'IMPORTS' || commitmentType === 'TEAM') {
21330
+ return '';
21331
+ }
21332
+ return originalContent;
21333
+ }
21334
+ /**
21335
+ * @@@
21336
+ *
21337
+ * @private @@@
21153
21338
  */
21154
- async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
21339
+ async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
21155
21340
  var _a;
21341
+ const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
21156
21342
  // Parse the agent source to extract commitments
21157
21343
  const parseResult = parseAgentSourceWithCommitments(agentSource);
21158
21344
  // Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
@@ -21189,8 +21375,8 @@
21189
21375
  // Store the agent name in metadata so commitments can access it
21190
21376
  requirements = {
21191
21377
  ...requirements,
21192
- metadata: {
21193
- ...requirements.metadata,
21378
+ _metadata: {
21379
+ ...requirements._metadata,
21194
21380
  agentName: parseResult.agentName,
21195
21381
  },
21196
21382
  };
@@ -21204,6 +21390,17 @@
21204
21390
  // Apply each commitment in order using reduce-like pattern
21205
21391
  for (let i = 0; i < filteredCommitments.length; i++) {
21206
21392
  const commitment = filteredCommitments[i];
21393
+ const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
21394
+ let commitmentContent = commitment.content;
21395
+ if (isReferenceCommitment && agentReferenceResolver) {
21396
+ try {
21397
+ commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
21398
+ }
21399
+ catch (error) {
21400
+ console.warn(`Failed to resolve commitment references for ${commitment.type}, falling back to safe defaults:`, error);
21401
+ commitmentContent = getSafeReferenceCommitmentFallback(commitment.type, commitment.content);
21402
+ }
21403
+ }
21207
21404
  // CLOSED commitment should work only if its the last commitment in the book
21208
21405
  if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
21209
21406
  continue;
@@ -21211,7 +21408,7 @@
21211
21408
  const definition = getCommitmentDefinition(commitment.type);
21212
21409
  if (definition) {
21213
21410
  try {
21214
- requirements = definition.applyToAgentModelRequirements(requirements, commitment.content);
21411
+ requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
21215
21412
  }
21216
21413
  catch (error) {
21217
21414
  console.warn(`Failed to apply commitment ${commitment.type}:`, error);
@@ -21359,23 +21556,28 @@
21359
21556
  }
21360
21557
 
21361
21558
  /**
21362
- * Creates model requirements for an agent based on its source
21559
+ * Creates model requirements for an agent based on its source.
21363
21560
  *
21364
21561
  * There are 2 similar functions:
21365
21562
  * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
21366
21563
  * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
21367
21564
  *
21565
+ * @param agentSource - Book describing the agent.
21566
+ * @param modelName - Optional override for the agent's model.
21567
+ * @param availableModels - Models that could fulfill the agent.
21568
+ * @param llmTools - Execution tools used when selecting a best model.
21569
+ * @param options - Optional hooks such as the agent reference resolver.
21368
21570
  * @public exported from `@promptbook/core`
21369
21571
  */
21370
- async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
21572
+ async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
21371
21573
  // If availableModels are provided and no specific modelName is given,
21372
21574
  // use preparePersona to select the best model
21373
21575
  if (availableModels && !modelName && llmTools) {
21374
21576
  const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
21375
- return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
21577
+ return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
21376
21578
  }
21377
21579
  // Use the new commitment-based system with provided or default model
21378
- return createAgentModelRequirementsWithCommitments(agentSource, modelName);
21580
+ return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
21379
21581
  }
21380
21582
  /**
21381
21583
  * Selects the best model using the preparePersona function
@@ -21673,6 +21875,66 @@
21673
21875
  },
21674
21876
  /**/
21675
21877
  /**/
21878
+ {
21879
+ modelVariant: 'CHAT',
21880
+ modelTitle: 'gpt-5.2-codex',
21881
+ modelName: 'gpt-5.2-codex',
21882
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
21883
+ pricing: {
21884
+ prompt: pricing(`$1.75 / 1M tokens`),
21885
+ output: pricing(`$14.00 / 1M tokens`),
21886
+ },
21887
+ },
21888
+ /**/
21889
+ /**/
21890
+ {
21891
+ modelVariant: 'CHAT',
21892
+ modelTitle: 'gpt-5.1-codex-max',
21893
+ modelName: 'gpt-5.1-codex-max',
21894
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
21895
+ pricing: {
21896
+ prompt: pricing(`$1.25 / 1M tokens`),
21897
+ output: pricing(`$10.00 / 1M tokens`),
21898
+ },
21899
+ },
21900
+ /**/
21901
+ /**/
21902
+ {
21903
+ modelVariant: 'CHAT',
21904
+ modelTitle: 'gpt-5.1-codex',
21905
+ modelName: 'gpt-5.1-codex',
21906
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
21907
+ pricing: {
21908
+ prompt: pricing(`$1.25 / 1M tokens`),
21909
+ output: pricing(`$10.00 / 1M tokens`),
21910
+ },
21911
+ },
21912
+ /**/
21913
+ /**/
21914
+ {
21915
+ modelVariant: 'CHAT',
21916
+ modelTitle: 'gpt-5.1-codex-mini',
21917
+ modelName: 'gpt-5.1-codex-mini',
21918
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
21919
+ pricing: {
21920
+ prompt: pricing(`$0.25 / 1M tokens`),
21921
+ output: pricing(`$2.00 / 1M tokens`),
21922
+ },
21923
+ },
21924
+ /**/
21925
+ /**/
21926
+ {
21927
+ modelVariant: 'CHAT',
21928
+ modelTitle: 'gpt-5-codex',
21929
+ modelName: 'gpt-5-codex',
21930
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
21931
+ pricing: {
21932
+ prompt: pricing(`$1.25 / 1M tokens`),
21933
+ output: pricing(`$10.00 / 1M tokens`),
21934
+ },
21935
+ },
21936
+ /**/
21937
+ /**/
21676
21938
  {
21677
21939
  modelVariant: 'CHAT',
21678
21940
  modelTitle: 'gpt-5-mini',
@@ -22377,6 +22639,32 @@
22377
22639
  errorMessage.includes('does not support'));
22378
22640
  }
22379
22641
 
22642
+ /**
22643
+ * Provides access to the structured clone implementation when available.
22644
+ */
22645
+ function getStructuredCloneFunction() {
22646
+ return globalThis.structuredClone;
22647
+ }
22648
+ /**
22649
+ * Checks whether the prompt is a chat prompt that carries file attachments.
22650
+ */
22651
+ function hasChatPromptFiles(prompt) {
22652
+ return 'files' in prompt && Array.isArray(prompt.files);
22653
+ }
22654
+ /**
22655
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
22656
+ */
22657
+ function clonePromptPreservingFiles(prompt) {
22658
+ const structuredCloneFn = getStructuredCloneFunction();
22659
+ if (typeof structuredCloneFn === 'function') {
22660
+ return structuredCloneFn(prompt);
22661
+ }
22662
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
22663
+ if (hasChatPromptFiles(prompt)) {
22664
+ clonedPrompt.files = prompt.files;
22665
+ }
22666
+ return clonedPrompt;
22667
+ }
22380
22668
  /**
22381
22669
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
22382
22670
  *
@@ -22461,7 +22749,7 @@
22461
22749
  */
22462
22750
  async callChatModelStream(prompt, onProgress) {
22463
22751
  // Deep clone prompt and modelRequirements to avoid mutation across calls
22464
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
22752
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
22465
22753
  // Use local Set for retried parameters to ensure independence and thread safety
22466
22754
  const retriedUnsupportedParameters = new Set();
22467
22755
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -22488,7 +22776,10 @@
22488
22776
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
22489
22777
  // <- Note: [🧆]
22490
22778
  }; // <- TODO: [💩] Guard here types better
22491
- if (format === 'JSON') {
22779
+ if (currentModelRequirements.responseFormat !== undefined) {
22780
+ modelSettings.response_format = currentModelRequirements.responseFormat;
22781
+ }
22782
+ else if (format === 'JSON') {
22492
22783
  modelSettings.response_format = {
22493
22784
  type: 'json_object',
22494
22785
  };
@@ -23969,7 +24260,9 @@
23969
24260
  const processingStartedAtMs = Date.now();
23970
24261
  for (const [index, source] of knowledgeSources.entries()) {
23971
24262
  try {
23972
- const sourceType = source.startsWith('http') || source.startsWith('https') ? 'url' : 'file';
24263
+ const isDataUrl = isDataUrlKnowledgeSource(source);
24264
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
24265
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
23973
24266
  if (this.options.isVerbose) {
23974
24267
  console.info('[🤰]', 'Processing knowledge source', {
23975
24268
  index: index + 1,
@@ -23979,8 +24272,27 @@
23979
24272
  logLabel,
23980
24273
  });
23981
24274
  }
23982
- // Check if it's a URL
23983
- if (source.startsWith('http://') || source.startsWith('https://')) {
24275
+ if (isDataUrl) {
24276
+ const parsed = parseDataUrlKnowledgeSource(source);
24277
+ if (!parsed) {
24278
+ skippedSources.push({ source, reason: 'invalid_data_url' });
24279
+ if (this.options.isVerbose) {
24280
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
24281
+ source,
24282
+ sourceType,
24283
+ logLabel,
24284
+ });
24285
+ }
24286
+ continue;
24287
+ }
24288
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
24289
+ type: parsed.mimeType,
24290
+ });
24291
+ fileStreams.push(dataUrlFile);
24292
+ totalBytes += parsed.buffer.length;
24293
+ continue;
24294
+ }
24295
+ if (isHttp) {
23984
24296
  const downloadResult = await this.downloadKnowledgeSourceFile({
23985
24297
  source,
23986
24298
  timeoutMs: downloadTimeoutMs,
@@ -24082,6 +24394,64 @@
24082
24394
  }
24083
24395
 
24084
24396
  const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
24397
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
24398
+ /*
24399
+ TODO: Use or remove
24400
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
24401
+ type: 'object',
24402
+ properties: {},
24403
+ required: [],
24404
+ additionalProperties: true,
24405
+ };
24406
+ */
24407
+ function buildJsonSchemaDefinition(jsonSchema) {
24408
+ var _a, _b, _c;
24409
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
24410
+ return {
24411
+ type: 'json_schema',
24412
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
24413
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
24414
+ schema: {
24415
+ type: 'object',
24416
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
24417
+ required: Array.isArray(schema.required) ? schema.required : [],
24418
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
24419
+ description: schema.description,
24420
+ },
24421
+ };
24422
+ }
24423
+ /**
24424
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
24425
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
24426
+ *
24427
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
24428
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
24429
+ * @private utility of Open AI
24430
+ */
24431
+ function mapResponseFormatToAgentOutputType(responseFormat) {
24432
+ if (!responseFormat) {
24433
+ return undefined;
24434
+ }
24435
+ if (typeof responseFormat === 'string') {
24436
+ if (responseFormat === 'text') {
24437
+ return 'text';
24438
+ }
24439
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
24440
+ return buildJsonSchemaDefinition();
24441
+ }
24442
+ return 'text';
24443
+ }
24444
+ switch (responseFormat.type) {
24445
+ case 'text':
24446
+ return 'text';
24447
+ case 'json_schema':
24448
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
24449
+ case 'json_object':
24450
+ return buildJsonSchemaDefinition();
24451
+ default:
24452
+ return undefined;
24453
+ }
24454
+ }
24085
24455
  /**
24086
24456
  * Execution tools for OpenAI AgentKit (Agents SDK).
24087
24457
  *
@@ -24129,6 +24499,7 @@
24129
24499
  ...parameters,
24130
24500
  modelName: this.agentKitModelName,
24131
24501
  });
24502
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
24132
24503
  const preparedAgentKitAgent = await this.prepareAgentKitAgent({
24133
24504
  name: (prompt.title || 'Agent'),
24134
24505
  instructions: modelRequirements.systemMessage || '',
@@ -24140,6 +24511,7 @@
24140
24511
  prompt,
24141
24512
  rawPromptContent,
24142
24513
  onProgress,
24514
+ responseFormatOutputType,
24143
24515
  });
24144
24516
  }
24145
24517
  /**
@@ -24321,16 +24693,21 @@
24321
24693
  ...prompt.parameters,
24322
24694
  modelName: this.agentKitModelName,
24323
24695
  });
24696
+ const agentForRun = options.responseFormatOutputType !== undefined
24697
+ ? openAiAgentKitAgent.clone({
24698
+ outputType: options.responseFormatOutputType,
24699
+ })
24700
+ : openAiAgentKitAgent;
24324
24701
  const start = $getCurrentDate();
24325
24702
  let latestContent = '';
24326
24703
  const toolCalls = [];
24327
24704
  const toolCallIndexById = new Map();
24328
24705
  const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
24329
24706
  const rawRequest = {
24330
- agentName: openAiAgentKitAgent.name,
24707
+ agentName: agentForRun.name,
24331
24708
  input: inputItems,
24332
24709
  };
24333
- const streamResult = await agents.run(openAiAgentKitAgent, inputItems, {
24710
+ const streamResult = await agents.run(agentForRun, inputItems, {
24334
24711
  stream: true,
24335
24712
  context: { parameters: prompt.parameters },
24336
24713
  });
@@ -25319,22 +25696,28 @@
25319
25696
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
25320
25697
  }
25321
25698
  const modelRequirements = await this.getModelRequirements();
25699
+ const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
25322
25700
  const chatPrompt = prompt;
25323
25701
  let underlyingLlmResult;
25324
- // Create modified chat prompt with agent system message
25702
+ const chatPromptContentWithSuffix = promptSuffix
25703
+ ? `${chatPrompt.content}\n\n${promptSuffix}`
25704
+ : chatPrompt.content;
25325
25705
  const promptWithAgentModelRequirements = {
25326
25706
  ...chatPrompt,
25707
+ content: chatPromptContentWithSuffix,
25327
25708
  modelRequirements: {
25328
25709
  ...chatPrompt.modelRequirements,
25329
- ...modelRequirements,
25710
+ ...sanitizedRequirements,
25330
25711
  // Spread tools to convert readonly array to mutable
25331
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
25712
+ tools: sanitizedRequirements.tools
25713
+ ? [...sanitizedRequirements.tools]
25714
+ : chatPrompt.modelRequirements.tools,
25332
25715
  // Spread knowledgeSources to convert readonly array to mutable
25333
- knowledgeSources: modelRequirements.knowledgeSources
25334
- ? [...modelRequirements.knowledgeSources]
25716
+ knowledgeSources: sanitizedRequirements.knowledgeSources
25717
+ ? [...sanitizedRequirements.knowledgeSources]
25335
25718
  : undefined,
25336
25719
  // Prepend agent system message to existing system message
25337
- systemMessage: modelRequirements.systemMessage +
25720
+ systemMessage: sanitizedRequirements.systemMessage +
25338
25721
  (chatPrompt.modelRequirements.systemMessage
25339
25722
  ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
25340
25723
  : ''),
@@ -25342,8 +25725,8 @@
25342
25725
  };
25343
25726
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
25344
25727
  if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
25345
- const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
25346
- const vectorStoreHash = cryptoJs.SHA256(JSON.stringify((_a = modelRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
25728
+ const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
25729
+ const vectorStoreHash = cryptoJs.SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
25347
25730
  const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
25348
25731
  const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
25349
25732
  let preparedAgentKit = this.options.assistantPreparationMode === 'external'
@@ -25370,7 +25753,7 @@
25370
25753
  agent: this.title,
25371
25754
  });
25372
25755
  }
25373
- if (!vectorStoreId && ((_b = modelRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
25756
+ if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
25374
25757
  emitAssistantPreparationProgress({
25375
25758
  onProgress,
25376
25759
  prompt,
@@ -25386,9 +25769,9 @@
25386
25769
  });
25387
25770
  preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
25388
25771
  name: this.title,
25389
- instructions: modelRequirements.systemMessage || '',
25390
- knowledgeSources: modelRequirements.knowledgeSources,
25391
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
25772
+ instructions: sanitizedRequirements.systemMessage || '',
25773
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
25774
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
25392
25775
  vectorStoreId,
25393
25776
  });
25394
25777
  }
@@ -25403,15 +25786,17 @@
25403
25786
  requirementsHash,
25404
25787
  vectorStoreId: preparedAgentKit.vectorStoreId,
25405
25788
  });
25789
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
25406
25790
  underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
25407
25791
  openAiAgentKitAgent: preparedAgentKit.agent,
25408
25792
  prompt: promptWithAgentModelRequirements,
25409
25793
  onProgress,
25794
+ responseFormatOutputType,
25410
25795
  });
25411
25796
  }
25412
25797
  else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
25413
25798
  // ... deprecated path ...
25414
- const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
25799
+ const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
25415
25800
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
25416
25801
  let assistant;
25417
25802
  if (this.options.assistantPreparationMode === 'external') {
@@ -25453,9 +25838,9 @@
25453
25838
  assistant = await this.options.llmTools.updateAssistant({
25454
25839
  assistantId: cached.assistantId,
25455
25840
  name: this.title,
25456
- instructions: modelRequirements.systemMessage,
25457
- knowledgeSources: modelRequirements.knowledgeSources,
25458
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
25841
+ instructions: sanitizedRequirements.systemMessage,
25842
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
25843
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
25459
25844
  });
25460
25845
  AgentLlmExecutionTools.assistantCache.set(this.title, {
25461
25846
  assistantId: assistant.assistantId,
@@ -25478,9 +25863,9 @@
25478
25863
  });
25479
25864
  assistant = await this.options.llmTools.createNewAssistant({
25480
25865
  name: this.title,
25481
- instructions: modelRequirements.systemMessage,
25482
- knowledgeSources: modelRequirements.knowledgeSources,
25483
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
25866
+ instructions: sanitizedRequirements.systemMessage,
25867
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
25868
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
25484
25869
  /*
25485
25870
  !!!
25486
25871
  metadata: {
@@ -25522,13 +25907,19 @@
25522
25907
  }
25523
25908
  }
25524
25909
  let content = underlyingLlmResult.content;
25525
- // Note: Cleanup the AI artifacts from the content
25526
- content = humanizeAiText(content);
25527
- // Note: Make sure the content is Promptbook-like
25528
- content = promptbookifyAiText(content);
25910
+ if (typeof content === 'string') {
25911
+ // Note: Cleanup the AI artifacts from the content
25912
+ content = humanizeAiText(content);
25913
+ // Note: Make sure the content is Promptbook-like
25914
+ content = promptbookifyAiText(content);
25915
+ }
25916
+ else {
25917
+ // TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
25918
+ content = JSON.stringify(content);
25919
+ }
25529
25920
  const agentResult = {
25530
25921
  ...underlyingLlmResult,
25531
- content,
25922
+ content: content,
25532
25923
  modelName: this.modelName,
25533
25924
  };
25534
25925
  return agentResult;
@@ -25717,7 +26108,6 @@
25717
26108
  * Note: This method also implements the learning mechanism
25718
26109
  */
25719
26110
  async callChatModelStream(prompt, onProgress) {
25720
- var _a;
25721
26111
  // [1] Check if the user is asking the same thing as in the samples
25722
26112
  const modelRequirements = await this.getModelRequirements();
25723
26113
  if (modelRequirements.samples) {
@@ -25765,7 +26155,7 @@
25765
26155
  if (result.rawResponse && 'sample' in result.rawResponse) {
25766
26156
  return result;
25767
26157
  }
25768
- if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
26158
+ if (modelRequirements.isClosed) {
25769
26159
  return result;
25770
26160
  }
25771
26161
  // Note: [0] Notify start of self-learning