@promptbook/wizard 0.110.0-8 → 0.110.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +0 -4
  2. package/esm/index.es.js +487 -97
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -2
  6. package/esm/typings/src/_packages/types.index.d.ts +10 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  8. package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
  9. package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
  11. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
  13. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +42 -0
  14. package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +0 -2
  15. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  16. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  17. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  18. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  19. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
  20. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +6 -0
  21. package/esm/typings/src/book-components/Chat/hooks/useChatRatings.d.ts +24 -2
  22. package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +2 -10
  23. package/esm/typings/src/book-components/Chat/utils/parseCitationMarker.d.ts +75 -0
  24. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +3 -1
  25. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.test.d.ts +1 -0
  26. package/esm/typings/src/book-components/icons/ArrowIcon.d.ts +17 -4
  27. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  28. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  29. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +39 -0
  30. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  31. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  32. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  33. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  34. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  35. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  36. package/esm/typings/src/version.d.ts +1 -1
  37. package/package.json +2 -2
  38. package/umd/index.umd.js +487 -97
  39. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -49,7 +49,7 @@
49
49
  * @generated
50
50
  * @see https://github.com/webgptorg/promptbook
51
51
  */
52
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-8';
52
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0';
53
53
  /**
54
54
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
55
55
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3787,6 +3787,66 @@
3787
3787
  },
3788
3788
  /**/
3789
3789
  /**/
3790
+ {
3791
+ modelVariant: 'CHAT',
3792
+ modelTitle: 'gpt-5.2-codex',
3793
+ modelName: 'gpt-5.2-codex',
3794
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
3795
+ pricing: {
3796
+ prompt: pricing(`$1.75 / 1M tokens`),
3797
+ output: pricing(`$14.00 / 1M tokens`),
3798
+ },
3799
+ },
3800
+ /**/
3801
+ /**/
3802
+ {
3803
+ modelVariant: 'CHAT',
3804
+ modelTitle: 'gpt-5.1-codex-max',
3805
+ modelName: 'gpt-5.1-codex-max',
3806
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
3807
+ pricing: {
3808
+ prompt: pricing(`$1.25 / 1M tokens`),
3809
+ output: pricing(`$10.00 / 1M tokens`),
3810
+ },
3811
+ },
3812
+ /**/
3813
+ /**/
3814
+ {
3815
+ modelVariant: 'CHAT',
3816
+ modelTitle: 'gpt-5.1-codex',
3817
+ modelName: 'gpt-5.1-codex',
3818
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
3819
+ pricing: {
3820
+ prompt: pricing(`$1.25 / 1M tokens`),
3821
+ output: pricing(`$10.00 / 1M tokens`),
3822
+ },
3823
+ },
3824
+ /**/
3825
+ /**/
3826
+ {
3827
+ modelVariant: 'CHAT',
3828
+ modelTitle: 'gpt-5.1-codex-mini',
3829
+ modelName: 'gpt-5.1-codex-mini',
3830
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
3831
+ pricing: {
3832
+ prompt: pricing(`$0.25 / 1M tokens`),
3833
+ output: pricing(`$2.00 / 1M tokens`),
3834
+ },
3835
+ },
3836
+ /**/
3837
+ /**/
3838
+ {
3839
+ modelVariant: 'CHAT',
3840
+ modelTitle: 'gpt-5-codex',
3841
+ modelName: 'gpt-5-codex',
3842
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
3843
+ pricing: {
3844
+ prompt: pricing(`$1.25 / 1M tokens`),
3845
+ output: pricing(`$10.00 / 1M tokens`),
3846
+ },
3847
+ },
3848
+ /**/
3849
+ /**/
3790
3850
  {
3791
3851
  modelVariant: 'CHAT',
3792
3852
  modelTitle: 'gpt-5-mini',
@@ -8682,6 +8742,32 @@
8682
8742
  errorMessage.includes('does not support'));
8683
8743
  }
8684
8744
 
8745
+ /**
8746
+ * Provides access to the structured clone implementation when available.
8747
+ */
8748
+ function getStructuredCloneFunction() {
8749
+ return globalThis.structuredClone;
8750
+ }
8751
+ /**
8752
+ * Checks whether the prompt is a chat prompt that carries file attachments.
8753
+ */
8754
+ function hasChatPromptFiles(prompt) {
8755
+ return 'files' in prompt && Array.isArray(prompt.files);
8756
+ }
8757
+ /**
8758
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
8759
+ */
8760
+ function clonePromptPreservingFiles(prompt) {
8761
+ const structuredCloneFn = getStructuredCloneFunction();
8762
+ if (typeof structuredCloneFn === 'function') {
8763
+ return structuredCloneFn(prompt);
8764
+ }
8765
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
8766
+ if (hasChatPromptFiles(prompt)) {
8767
+ clonedPrompt.files = prompt.files;
8768
+ }
8769
+ return clonedPrompt;
8770
+ }
8685
8771
  /**
8686
8772
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
8687
8773
  *
@@ -8766,7 +8852,7 @@
8766
8852
  */
8767
8853
  async callChatModelStream(prompt, onProgress) {
8768
8854
  // Deep clone prompt and modelRequirements to avoid mutation across calls
8769
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
8855
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
8770
8856
  // Use local Set for retried parameters to ensure independence and thread safety
8771
8857
  const retriedUnsupportedParameters = new Set();
8772
8858
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -8793,7 +8879,10 @@
8793
8879
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
8794
8880
  // <- Note: [🧆]
8795
8881
  }; // <- TODO: [💩] Guard here types better
8796
- if (format === 'JSON') {
8882
+ if (currentModelRequirements.responseFormat !== undefined) {
8883
+ modelSettings.response_format = currentModelRequirements.responseFormat;
8884
+ }
8885
+ else if (format === 'JSON') {
8797
8886
  modelSettings.response_format = {
8798
8887
  type: 'json_object',
8799
8888
  };
@@ -10135,6 +10224,136 @@
10135
10224
  }
10136
10225
  }
10137
10226
 
10227
+ /**
10228
+ * @@@
10229
+ *
10230
+ * @private thing of inline knowledge
10231
+ */
10232
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
10233
+ /**
10234
+ * @@@
10235
+ *
10236
+ * @private thing of inline knowledge
10237
+ */
10238
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
10239
+ /**
10240
+ * @@@
10241
+ *
10242
+ * @private thing of inline knowledge
10243
+ */
10244
+ const DATA_URL_PREFIX = 'data:';
10245
+ /**
10246
+ * @@@
10247
+ *
10248
+ * @private thing of inline knowledge
10249
+ */
10250
+ function getFirstNonEmptyLine(content) {
10251
+ const lines = content.split(/\r?\n/);
10252
+ for (const line of lines) {
10253
+ const trimmed = line.trim();
10254
+ if (trimmed) {
10255
+ return trimmed;
10256
+ }
10257
+ }
10258
+ return null;
10259
+ }
10260
+ /**
10261
+ * @@@
10262
+ *
10263
+ * @private thing of inline knowledge
10264
+ */
10265
+ function deriveBaseFilename(content) {
10266
+ const firstLine = getFirstNonEmptyLine(content);
10267
+ if (!firstLine) {
10268
+ return INLINE_KNOWLEDGE_BASE_NAME;
10269
+ }
10270
+ const normalized = normalizeToKebabCase(firstLine);
10271
+ return normalized || INLINE_KNOWLEDGE_BASE_NAME;
10272
+ }
10273
+ /**
10274
+ * Creates a data URL that represents the inline knowledge content as a text file.
10275
+ *
10276
+ * @private thing of inline knowledge
10277
+ */
10278
+ function createInlineKnowledgeSourceFile(content) {
10279
+ const trimmedContent = content.trim();
10280
+ const baseName = deriveBaseFilename(trimmedContent);
10281
+ const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
10282
+ const mimeType = 'text/plain';
10283
+ const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
10284
+ const encodedFilename = encodeURIComponent(filename);
10285
+ const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
10286
+ return {
10287
+ filename,
10288
+ mimeType,
10289
+ url,
10290
+ };
10291
+ }
10292
+ /**
10293
+ * Checks whether the provided source string is a data URL that can be decoded.
10294
+ *
10295
+ * @private thing of inline knowledge
10296
+ */
10297
+ function isDataUrlKnowledgeSource(source) {
10298
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
10299
+ }
10300
+ /**
10301
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
10302
+ *
10303
+ * @private thing of inline knowledge
10304
+ */
10305
+ function parseDataUrlKnowledgeSource(source) {
10306
+ if (!isDataUrlKnowledgeSource(source)) {
10307
+ return null;
10308
+ }
10309
+ const commaIndex = source.indexOf(',');
10310
+ if (commaIndex === -1) {
10311
+ return null;
10312
+ }
10313
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
10314
+ const payload = source.slice(commaIndex + 1);
10315
+ const tokens = header.split(';');
10316
+ const mediaType = tokens[0] || 'text/plain';
10317
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
10318
+ let isBase64 = false;
10319
+ for (let i = 1; i < tokens.length; i++) {
10320
+ const token = tokens[i];
10321
+ if (!token) {
10322
+ continue;
10323
+ }
10324
+ if (token.toLowerCase() === 'base64') {
10325
+ isBase64 = true;
10326
+ continue;
10327
+ }
10328
+ const [key, value] = token.split('=');
10329
+ if (key === 'name' && value !== undefined) {
10330
+ try {
10331
+ filename = decodeURIComponent(value);
10332
+ }
10333
+ catch (_a) {
10334
+ filename = value;
10335
+ }
10336
+ }
10337
+ }
10338
+ if (!isBase64) {
10339
+ return null;
10340
+ }
10341
+ try {
10342
+ const buffer = Buffer.from(payload, 'base64');
10343
+ return {
10344
+ buffer,
10345
+ filename,
10346
+ mimeType: mediaType,
10347
+ };
10348
+ }
10349
+ catch (_b) {
10350
+ return null;
10351
+ }
10352
+ }
10353
+ /**
10354
+ * Note: [💞] Ignore a discrepancy between file name and entity name
10355
+ */
10356
+
10138
10357
  const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
10139
10358
  const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
10140
10359
  const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
@@ -10771,7 +10990,9 @@
10771
10990
  const processingStartedAtMs = Date.now();
10772
10991
  for (const [index, source] of knowledgeSources.entries()) {
10773
10992
  try {
10774
- const sourceType = source.startsWith('http') || source.startsWith('https') ? 'url' : 'file';
10993
+ const isDataUrl = isDataUrlKnowledgeSource(source);
10994
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
10995
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
10775
10996
  if (this.options.isVerbose) {
10776
10997
  console.info('[🤰]', 'Processing knowledge source', {
10777
10998
  index: index + 1,
@@ -10781,8 +11002,27 @@
10781
11002
  logLabel,
10782
11003
  });
10783
11004
  }
10784
- // Check if it's a URL
10785
- if (source.startsWith('http://') || source.startsWith('https://')) {
11005
+ if (isDataUrl) {
11006
+ const parsed = parseDataUrlKnowledgeSource(source);
11007
+ if (!parsed) {
11008
+ skippedSources.push({ source, reason: 'invalid_data_url' });
11009
+ if (this.options.isVerbose) {
11010
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
11011
+ source,
11012
+ sourceType,
11013
+ logLabel,
11014
+ });
11015
+ }
11016
+ continue;
11017
+ }
11018
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
11019
+ type: parsed.mimeType,
11020
+ });
11021
+ fileStreams.push(dataUrlFile);
11022
+ totalBytes += parsed.buffer.length;
11023
+ continue;
11024
+ }
11025
+ if (isHttp) {
10786
11026
  const downloadResult = await this.downloadKnowledgeSourceFile({
10787
11027
  source,
10788
11028
  timeoutMs: downloadTimeoutMs,
@@ -17186,11 +17426,14 @@
17186
17426
  function createEmptyAgentModelRequirements() {
17187
17427
  return {
17188
17428
  systemMessage: '',
17429
+ promptSuffix: '',
17189
17430
  // modelName: 'gpt-5',
17190
17431
  modelName: 'gemini-2.5-flash-lite',
17191
17432
  temperature: 0.7,
17192
17433
  topP: 0.9,
17193
17434
  topK: 50,
17435
+ parentAgentUrl: null,
17436
+ isClosed: false,
17194
17437
  };
17195
17438
  }
17196
17439
  /**
@@ -17307,6 +17550,28 @@
17307
17550
  return currentMessage + separator + content;
17308
17551
  });
17309
17552
  }
17553
+ /**
17554
+ * Helper method to create a new requirements object with updated prompt suffix
17555
+ */
17556
+ updatePromptSuffix(requirements, contentUpdate) {
17557
+ const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
17558
+ return {
17559
+ ...requirements,
17560
+ promptSuffix: newSuffix,
17561
+ };
17562
+ }
17563
+ /**
17564
+ * Helper method to append content to the prompt suffix
17565
+ * Default separator is a single newline for bullet lists.
17566
+ */
17567
+ appendToPromptSuffix(requirements, content, separator = '\n') {
17568
+ return this.updatePromptSuffix(requirements, (currentSuffix) => {
17569
+ if (!currentSuffix.trim()) {
17570
+ return content;
17571
+ }
17572
+ return `${currentSuffix}${separator}${content}`;
17573
+ });
17574
+ }
17310
17575
  /**
17311
17576
  * Helper method to add a comment section to the system message
17312
17577
  * Comments are lines starting with # that will be removed from the final system message
@@ -17484,13 +17749,9 @@
17484
17749
  `);
17485
17750
  }
17486
17751
  applyToAgentModelRequirements(requirements, _content) {
17487
- const updatedMetadata = {
17488
- ...requirements.metadata,
17489
- isClosed: true,
17490
- };
17491
17752
  return {
17492
17753
  ...requirements,
17493
- metadata: updatedMetadata,
17754
+ isClosed: true,
17494
17755
  };
17495
17756
  }
17496
17757
  }
@@ -17768,12 +18029,12 @@
17768
18029
  return requirements;
17769
18030
  }
17770
18031
  // Get existing dictionary entries from metadata
17771
- const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
18032
+ const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
17772
18033
  // Merge the new dictionary entry with existing entries
17773
18034
  const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
17774
18035
  // Store the merged dictionary in metadata for debugging and inspection
17775
18036
  const updatedMetadata = {
17776
- ...requirements.metadata,
18037
+ ...requirements._metadata,
17777
18038
  DICTIONARY: mergedDictionary,
17778
18039
  };
17779
18040
  // Create the dictionary section for the system message
@@ -17781,7 +18042,7 @@
17781
18042
  const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
17782
18043
  return {
17783
18044
  ...this.appendToSystemMessage(requirements, dictionarySection),
17784
- metadata: updatedMetadata,
18045
+ _metadata: updatedMetadata,
17785
18046
  };
17786
18047
  }
17787
18048
  }
@@ -17921,10 +18182,7 @@
17921
18182
  applyToAgentModelRequirements(requirements, content) {
17922
18183
  const trimmedContent = content.trim();
17923
18184
  if (!trimmedContent) {
17924
- return {
17925
- ...requirements,
17926
- parentAgentUrl: undefined,
17927
- };
18185
+ return requirements;
17928
18186
  }
17929
18187
  if (trimmedContent.toUpperCase() === 'VOID' ||
17930
18188
  trimmedContent.toUpperCase() === 'NULL' ||
@@ -18236,9 +18494,13 @@
18236
18494
  return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
18237
18495
  }
18238
18496
  else {
18239
- // Direct text knowledge - add to system message
18240
- const knowledgeSection = `Knowledge: ${trimmedContent}`;
18241
- return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
18497
+ const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
18498
+ const updatedRequirements = {
18499
+ ...requirements,
18500
+ knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
18501
+ };
18502
+ const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
18503
+ return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
18242
18504
  }
18243
18505
  }
18244
18506
  }
@@ -18485,16 +18747,16 @@
18485
18747
  // and typically doesn't need to be added to the system prompt or model requirements directly.
18486
18748
  // It is extracted separately for the chat interface.
18487
18749
  var _a;
18488
- const pendingUserMessage = (_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
18750
+ const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
18489
18751
  if (pendingUserMessage) {
18490
18752
  const newSample = { question: pendingUserMessage, answer: content };
18491
18753
  const newSamples = [...(requirements.samples || []), newSample];
18492
- const newMetadata = { ...requirements.metadata };
18754
+ const newMetadata = { ...requirements._metadata };
18493
18755
  delete newMetadata.pendingUserMessage;
18494
18756
  return {
18495
18757
  ...requirements,
18496
18758
  samples: newSamples,
18497
- metadata: newMetadata,
18759
+ _metadata: newMetadata,
18498
18760
  };
18499
18761
  }
18500
18762
  return requirements;
@@ -18742,8 +19004,8 @@
18742
19004
  applyToAgentModelRequirements(requirements, content) {
18743
19005
  return {
18744
19006
  ...requirements,
18745
- metadata: {
18746
- ...requirements.metadata,
19007
+ _metadata: {
19008
+ ...requirements._metadata,
18747
19009
  pendingUserMessage: content,
18748
19010
  },
18749
19011
  };
@@ -19601,11 +19863,7 @@
19601
19863
  if (trimmedContent === '') {
19602
19864
  return requirements;
19603
19865
  }
19604
- // Return requirements with updated notes but no changes to system message
19605
- return {
19606
- ...requirements,
19607
- notes: [...(requirements.notes || []), trimmedContent],
19608
- };
19866
+ return requirements;
19609
19867
  }
19610
19868
  }
19611
19869
  /**
@@ -19667,12 +19925,12 @@
19667
19925
  // Since OPEN is default, we can just ensure isClosed is false
19668
19926
  // But to be explicit we can set it
19669
19927
  const updatedMetadata = {
19670
- ...requirements.metadata,
19928
+ ...requirements._metadata,
19671
19929
  isClosed: false,
19672
19930
  };
19673
19931
  return {
19674
19932
  ...requirements,
19675
- metadata: updatedMetadata,
19933
+ _metadata: updatedMetadata,
19676
19934
  };
19677
19935
  }
19678
19936
  }
@@ -19753,7 +20011,7 @@
19753
20011
  return requirements;
19754
20012
  }
19755
20013
  // Get existing persona content from metadata
19756
- const existingPersonaContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
20014
+ const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
19757
20015
  // Merge the new content with existing persona content
19758
20016
  // When multiple PERSONA commitments exist, they are merged into one
19759
20017
  const mergedPersonaContent = existingPersonaContent
@@ -19761,12 +20019,12 @@
19761
20019
  : trimmedContent;
19762
20020
  // Store the merged persona content in metadata for debugging and inspection
19763
20021
  const updatedMetadata = {
19764
- ...requirements.metadata,
20022
+ ...requirements._metadata,
19765
20023
  PERSONA: mergedPersonaContent,
19766
20024
  };
19767
20025
  // Get the agent name from metadata (which should contain the first line of agent source)
19768
20026
  // If not available, extract from current system message as fallback
19769
- let agentName = (_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.agentName;
20027
+ let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
19770
20028
  if (!agentName) {
19771
20029
  // Fallback: extract from current system message
19772
20030
  const currentMessage = requirements.systemMessage.trim();
@@ -19813,7 +20071,7 @@
19813
20071
  return {
19814
20072
  ...requirements,
19815
20073
  systemMessage: newSystemMessage,
19816
- metadata: updatedMetadata,
20074
+ _metadata: updatedMetadata,
19817
20075
  };
19818
20076
  }
19819
20077
  }
@@ -19896,7 +20154,16 @@
19896
20154
  }
19897
20155
  // Add rule to the system message
19898
20156
  const ruleSection = `Rule: ${trimmedContent}`;
19899
- return this.appendToSystemMessage(requirements, ruleSection, '\n\n');
20157
+ const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
20158
+ const ruleLines = trimmedContent
20159
+ .split(/\r?\n/)
20160
+ .map((line) => line.trim())
20161
+ .filter(Boolean)
20162
+ .map((line) => `- ${line}`);
20163
+ if (ruleLines.length === 0) {
20164
+ return requirementsWithRule;
20165
+ }
20166
+ return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
19900
20167
  }
19901
20168
  }
19902
20169
  /**
@@ -20398,11 +20665,12 @@
20398
20665
  if (!trimmedContent) {
20399
20666
  return requirements;
20400
20667
  }
20401
- const teammates = parseTeamCommitmentContent(trimmedContent, { strict: true });
20668
+ // Keep TEAM resilient: unresolved/malformed teammate entries are skipped, valid ones are still registered.
20669
+ const teammates = parseTeamCommitmentContent(trimmedContent, { strict: false });
20402
20670
  if (teammates.length === 0) {
20403
20671
  return requirements;
20404
20672
  }
20405
- const agentName = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
20673
+ const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
20406
20674
  const teamEntries = teammates.map((teammate) => ({
20407
20675
  toolName: createTeamToolName(teammate.url),
20408
20676
  teammate,
@@ -20442,7 +20710,7 @@
20442
20710
  },
20443
20711
  });
20444
20712
  }
20445
- const existingTeammates = ((_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
20713
+ const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
20446
20714
  const updatedTeammates = [...existingTeammates];
20447
20715
  for (const entry of teamEntries) {
20448
20716
  if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
@@ -20471,8 +20739,8 @@
20471
20739
  return this.appendToSystemMessage({
20472
20740
  ...requirements,
20473
20741
  tools: updatedTools,
20474
- metadata: {
20475
- ...requirements.metadata,
20742
+ _metadata: {
20743
+ ...requirements._metadata,
20476
20744
  teammates: updatedTeammates,
20477
20745
  },
20478
20746
  }, teamSystemMessage);
@@ -20704,7 +20972,7 @@
20704
20972
  if (!trimmedContent) {
20705
20973
  // Store template mode flag in metadata
20706
20974
  const updatedMetadata = {
20707
- ...requirements.metadata,
20975
+ ...requirements._metadata,
20708
20976
  templateMode: true,
20709
20977
  };
20710
20978
  // Add a general instruction about using structured templates
@@ -20714,21 +20982,21 @@
20714
20982
  `);
20715
20983
  return {
20716
20984
  ...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
20717
- metadata: updatedMetadata,
20985
+ _metadata: updatedMetadata,
20718
20986
  };
20719
20987
  }
20720
20988
  // If content is provided, add the specific template instructions
20721
20989
  const templateSection = `Response Template: ${trimmedContent}`;
20722
20990
  // Store the template in metadata for potential programmatic access
20723
- const existingTemplates = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
20991
+ const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
20724
20992
  const updatedMetadata = {
20725
- ...requirements.metadata,
20993
+ ...requirements._metadata,
20726
20994
  templates: [...existingTemplates, trimmedContent],
20727
20995
  templateMode: true,
20728
20996
  };
20729
20997
  return {
20730
20998
  ...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
20731
- metadata: updatedMetadata,
20999
+ _metadata: updatedMetadata,
20732
21000
  };
20733
21001
  }
20734
21002
  }
@@ -21065,8 +21333,8 @@
21065
21333
  return this.appendToSystemMessage({
21066
21334
  ...requirements,
21067
21335
  tools: updatedTools,
21068
- metadata: {
21069
- ...requirements.metadata,
21336
+ _metadata: {
21337
+ ...requirements._metadata,
21070
21338
  useBrowser: true,
21071
21339
  },
21072
21340
  }, spaceTrim$1.spaceTrim(`
@@ -21295,8 +21563,8 @@
21295
21563
  return this.appendToSystemMessage({
21296
21564
  ...requirements,
21297
21565
  tools: updatedTools,
21298
- metadata: {
21299
- ...requirements.metadata,
21566
+ _metadata: {
21567
+ ...requirements._metadata,
21300
21568
  useEmail: content || true,
21301
21569
  },
21302
21570
  }, spaceTrim$1.spaceTrim((block) => `
@@ -21431,8 +21699,8 @@
21431
21699
  return this.appendToSystemMessage({
21432
21700
  ...requirements,
21433
21701
  tools: updatedTools,
21434
- metadata: {
21435
- ...requirements.metadata,
21702
+ _metadata: {
21703
+ ...requirements._metadata,
21436
21704
  useImageGenerator: content || true,
21437
21705
  },
21438
21706
  }, spaceTrim$1.spaceTrim(`
@@ -21723,8 +21991,8 @@
21723
21991
  return this.appendToSystemMessage({
21724
21992
  ...requirements,
21725
21993
  tools: updatedTools,
21726
- metadata: {
21727
- ...requirements.metadata,
21994
+ _metadata: {
21995
+ ...requirements._metadata,
21728
21996
  useSearchEngine: content || true,
21729
21997
  },
21730
21998
  }, spaceTrim$1.spaceTrim((block) => `
@@ -21872,8 +22140,8 @@
21872
22140
  return this.appendToSystemMessage({
21873
22141
  ...requirements,
21874
22142
  tools: updatedTools,
21875
- metadata: {
21876
- ...requirements.metadata,
22143
+ _metadata: {
22144
+ ...requirements._metadata,
21877
22145
  },
21878
22146
  }, spaceTrim$1.spaceTrim((block) => `
21879
22147
  Time and date context:
@@ -22457,14 +22725,42 @@
22457
22725
  }
22458
22726
 
22459
22727
  /**
22460
- * Creates agent model requirements using the new commitment system
22728
+ * Creates agent model requirements using the new commitment system.
22729
+ *
22461
22730
  * This function uses a reduce-like pattern where each commitment applies its changes
22462
- * to build the final requirements starting from a basic empty model
22731
+ * to build the final requirements starting from a basic empty model.
22463
22732
  *
22464
- * @public exported from `@promptbook/core`
22733
+ * @param agentSource - Agent source book to parse.
22734
+ * @param modelName - Optional override for the agent model name.
22735
+ * @param options - Additional options such as the agent reference resolver.
22736
+ *
22737
+ * @private @@@
22465
22738
  */
22466
- async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
22739
+ const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
22740
+ /**
22741
+ * Returns a safe fallback content when a resolver fails to transform a reference commitment.
22742
+ *
22743
+ * @param commitmentType - Commitment being resolved.
22744
+ * @param originalContent - Original unresolved commitment content.
22745
+ * @returns Fallback content that keeps requirement creation resilient.
22746
+ */
22747
+ function getSafeReferenceCommitmentFallback(commitmentType, originalContent) {
22748
+ if (commitmentType === 'FROM') {
22749
+ return 'VOID';
22750
+ }
22751
+ if (commitmentType === 'IMPORT' || commitmentType === 'IMPORTS' || commitmentType === 'TEAM') {
22752
+ return '';
22753
+ }
22754
+ return originalContent;
22755
+ }
22756
+ /**
22757
+ * @@@
22758
+ *
22759
+ * @private @@@
22760
+ */
22761
+ async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
22467
22762
  var _a;
22763
+ const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
22468
22764
  // Parse the agent source to extract commitments
22469
22765
  const parseResult = parseAgentSourceWithCommitments(agentSource);
22470
22766
  // Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
@@ -22501,8 +22797,8 @@
22501
22797
  // Store the agent name in metadata so commitments can access it
22502
22798
  requirements = {
22503
22799
  ...requirements,
22504
- metadata: {
22505
- ...requirements.metadata,
22800
+ _metadata: {
22801
+ ...requirements._metadata,
22506
22802
  agentName: parseResult.agentName,
22507
22803
  },
22508
22804
  };
@@ -22516,6 +22812,17 @@
22516
22812
  // Apply each commitment in order using reduce-like pattern
22517
22813
  for (let i = 0; i < filteredCommitments.length; i++) {
22518
22814
  const commitment = filteredCommitments[i];
22815
+ const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
22816
+ let commitmentContent = commitment.content;
22817
+ if (isReferenceCommitment && agentReferenceResolver) {
22818
+ try {
22819
+ commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
22820
+ }
22821
+ catch (error) {
22822
+ console.warn(`Failed to resolve commitment references for ${commitment.type}, falling back to safe defaults:`, error);
22823
+ commitmentContent = getSafeReferenceCommitmentFallback(commitment.type, commitment.content);
22824
+ }
22825
+ }
22519
22826
  // CLOSED commitment should work only if its the last commitment in the book
22520
22827
  if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
22521
22828
  continue;
@@ -22523,7 +22830,7 @@
22523
22830
  const definition = getCommitmentDefinition(commitment.type);
22524
22831
  if (definition) {
22525
22832
  try {
22526
- requirements = definition.applyToAgentModelRequirements(requirements, commitment.content);
22833
+ requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
22527
22834
  }
22528
22835
  catch (error) {
22529
22836
  console.warn(`Failed to apply commitment ${commitment.type}:`, error);
@@ -22982,23 +23289,28 @@
22982
23289
  */
22983
23290
 
22984
23291
  /**
22985
- * Creates model requirements for an agent based on its source
23292
+ * Creates model requirements for an agent based on its source.
22986
23293
  *
22987
23294
  * There are 2 similar functions:
22988
23295
  * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
22989
23296
  * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
22990
23297
  *
23298
+ * @param agentSource - Book describing the agent.
23299
+ * @param modelName - Optional override for the agent's model.
23300
+ * @param availableModels - Models that could fulfill the agent.
23301
+ * @param llmTools - Execution tools used when selecting a best model.
23302
+ * @param options - Optional hooks such as the agent reference resolver.
22991
23303
  * @public exported from `@promptbook/core`
22992
23304
  */
22993
- async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
23305
+ async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
22994
23306
  // If availableModels are provided and no specific modelName is given,
22995
23307
  // use preparePersona to select the best model
22996
23308
  if (availableModels && !modelName && llmTools) {
22997
23309
  const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
22998
- return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
23310
+ return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
22999
23311
  }
23000
23312
  // Use the new commitment-based system with provided or default model
23001
- return createAgentModelRequirementsWithCommitments(agentSource, modelName);
23313
+ return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
23002
23314
  }
23003
23315
  /**
23004
23316
  * Selects the best model using the preparePersona function
@@ -29551,6 +29863,64 @@
29551
29863
  */
29552
29864
 
29553
29865
  const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
29866
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
29867
+ /*
29868
+ TODO: Use or remove
29869
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
29870
+ type: 'object',
29871
+ properties: {},
29872
+ required: [],
29873
+ additionalProperties: true,
29874
+ };
29875
+ */
29876
+ function buildJsonSchemaDefinition(jsonSchema) {
29877
+ var _a, _b, _c;
29878
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
29879
+ return {
29880
+ type: 'json_schema',
29881
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
29882
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
29883
+ schema: {
29884
+ type: 'object',
29885
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
29886
+ required: Array.isArray(schema.required) ? schema.required : [],
29887
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
29888
+ description: schema.description,
29889
+ },
29890
+ };
29891
+ }
29892
+ /**
29893
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
29894
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
29895
+ *
29896
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
29897
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
29898
+ * @private utility of Open AI
29899
+ */
29900
+ function mapResponseFormatToAgentOutputType(responseFormat) {
29901
+ if (!responseFormat) {
29902
+ return undefined;
29903
+ }
29904
+ if (typeof responseFormat === 'string') {
29905
+ if (responseFormat === 'text') {
29906
+ return 'text';
29907
+ }
29908
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
29909
+ return buildJsonSchemaDefinition();
29910
+ }
29911
+ return 'text';
29912
+ }
29913
+ switch (responseFormat.type) {
29914
+ case 'text':
29915
+ return 'text';
29916
+ case 'json_schema':
29917
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
29918
+ case 'json_object':
29919
+ return buildJsonSchemaDefinition();
29920
+ default:
29921
+ return undefined;
29922
+ }
29923
+ }
29554
29924
  /**
29555
29925
  * Execution tools for OpenAI AgentKit (Agents SDK).
29556
29926
  *
@@ -29598,6 +29968,7 @@
29598
29968
  ...parameters,
29599
29969
  modelName: this.agentKitModelName,
29600
29970
  });
29971
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
29601
29972
  const preparedAgentKitAgent = await this.prepareAgentKitAgent({
29602
29973
  name: (prompt.title || 'Agent'),
29603
29974
  instructions: modelRequirements.systemMessage || '',
@@ -29609,6 +29980,7 @@
29609
29980
  prompt,
29610
29981
  rawPromptContent,
29611
29982
  onProgress,
29983
+ responseFormatOutputType,
29612
29984
  });
29613
29985
  }
29614
29986
  /**
@@ -29790,16 +30162,21 @@
29790
30162
  ...prompt.parameters,
29791
30163
  modelName: this.agentKitModelName,
29792
30164
  });
30165
+ const agentForRun = options.responseFormatOutputType !== undefined
30166
+ ? openAiAgentKitAgent.clone({
30167
+ outputType: options.responseFormatOutputType,
30168
+ })
30169
+ : openAiAgentKitAgent;
29793
30170
  const start = $getCurrentDate();
29794
30171
  let latestContent = '';
29795
30172
  const toolCalls = [];
29796
30173
  const toolCallIndexById = new Map();
29797
30174
  const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
29798
30175
  const rawRequest = {
29799
- agentName: openAiAgentKitAgent.name,
30176
+ agentName: agentForRun.name,
29800
30177
  input: inputItems,
29801
30178
  };
29802
- const streamResult = await agents.run(openAiAgentKitAgent, inputItems, {
30179
+ const streamResult = await agents.run(agentForRun, inputItems, {
29803
30180
  stream: true,
29804
30181
  context: { parameters: prompt.parameters },
29805
30182
  });
@@ -30147,22 +30524,28 @@
30147
30524
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
30148
30525
  }
30149
30526
  const modelRequirements = await this.getModelRequirements();
30527
+ const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
30150
30528
  const chatPrompt = prompt;
30151
30529
  let underlyingLlmResult;
30152
- // Create modified chat prompt with agent system message
30530
+ const chatPromptContentWithSuffix = promptSuffix
30531
+ ? `${chatPrompt.content}\n\n${promptSuffix}`
30532
+ : chatPrompt.content;
30153
30533
  const promptWithAgentModelRequirements = {
30154
30534
  ...chatPrompt,
30535
+ content: chatPromptContentWithSuffix,
30155
30536
  modelRequirements: {
30156
30537
  ...chatPrompt.modelRequirements,
30157
- ...modelRequirements,
30538
+ ...sanitizedRequirements,
30158
30539
  // Spread tools to convert readonly array to mutable
30159
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
30540
+ tools: sanitizedRequirements.tools
30541
+ ? [...sanitizedRequirements.tools]
30542
+ : chatPrompt.modelRequirements.tools,
30160
30543
  // Spread knowledgeSources to convert readonly array to mutable
30161
- knowledgeSources: modelRequirements.knowledgeSources
30162
- ? [...modelRequirements.knowledgeSources]
30544
+ knowledgeSources: sanitizedRequirements.knowledgeSources
30545
+ ? [...sanitizedRequirements.knowledgeSources]
30163
30546
  : undefined,
30164
30547
  // Prepend agent system message to existing system message
30165
- systemMessage: modelRequirements.systemMessage +
30548
+ systemMessage: sanitizedRequirements.systemMessage +
30166
30549
  (chatPrompt.modelRequirements.systemMessage
30167
30550
  ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
30168
30551
  : ''),
@@ -30170,8 +30553,8 @@
30170
30553
  };
30171
30554
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
30172
30555
  if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
30173
- const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
30174
- const vectorStoreHash = cryptoJs.SHA256(JSON.stringify((_a = modelRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
30556
+ const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
30557
+ const vectorStoreHash = cryptoJs.SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
30175
30558
  const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
30176
30559
  const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
30177
30560
  let preparedAgentKit = this.options.assistantPreparationMode === 'external'
@@ -30198,7 +30581,7 @@
30198
30581
  agent: this.title,
30199
30582
  });
30200
30583
  }
30201
- if (!vectorStoreId && ((_b = modelRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
30584
+ if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
30202
30585
  emitAssistantPreparationProgress({
30203
30586
  onProgress,
30204
30587
  prompt,
@@ -30214,9 +30597,9 @@
30214
30597
  });
30215
30598
  preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
30216
30599
  name: this.title,
30217
- instructions: modelRequirements.systemMessage || '',
30218
- knowledgeSources: modelRequirements.knowledgeSources,
30219
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
30600
+ instructions: sanitizedRequirements.systemMessage || '',
30601
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
30602
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
30220
30603
  vectorStoreId,
30221
30604
  });
30222
30605
  }
@@ -30231,15 +30614,17 @@
30231
30614
  requirementsHash,
30232
30615
  vectorStoreId: preparedAgentKit.vectorStoreId,
30233
30616
  });
30617
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
30234
30618
  underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
30235
30619
  openAiAgentKitAgent: preparedAgentKit.agent,
30236
30620
  prompt: promptWithAgentModelRequirements,
30237
30621
  onProgress,
30622
+ responseFormatOutputType,
30238
30623
  });
30239
30624
  }
30240
30625
  else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
30241
30626
  // ... deprecated path ...
30242
- const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
30627
+ const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
30243
30628
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
30244
30629
  let assistant;
30245
30630
  if (this.options.assistantPreparationMode === 'external') {
@@ -30281,9 +30666,9 @@
30281
30666
  assistant = await this.options.llmTools.updateAssistant({
30282
30667
  assistantId: cached.assistantId,
30283
30668
  name: this.title,
30284
- instructions: modelRequirements.systemMessage,
30285
- knowledgeSources: modelRequirements.knowledgeSources,
30286
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
30669
+ instructions: sanitizedRequirements.systemMessage,
30670
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
30671
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
30287
30672
  });
30288
30673
  AgentLlmExecutionTools.assistantCache.set(this.title, {
30289
30674
  assistantId: assistant.assistantId,
@@ -30306,9 +30691,9 @@
30306
30691
  });
30307
30692
  assistant = await this.options.llmTools.createNewAssistant({
30308
30693
  name: this.title,
30309
- instructions: modelRequirements.systemMessage,
30310
- knowledgeSources: modelRequirements.knowledgeSources,
30311
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
30694
+ instructions: sanitizedRequirements.systemMessage,
30695
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
30696
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
30312
30697
  /*
30313
30698
  !!!
30314
30699
  metadata: {
@@ -30350,13 +30735,19 @@
30350
30735
  }
30351
30736
  }
30352
30737
  let content = underlyingLlmResult.content;
30353
- // Note: Cleanup the AI artifacts from the content
30354
- content = humanizeAiText(content);
30355
- // Note: Make sure the content is Promptbook-like
30356
- content = promptbookifyAiText(content);
30738
+ if (typeof content === 'string') {
30739
+ // Note: Cleanup the AI artifacts from the content
30740
+ content = humanizeAiText(content);
30741
+ // Note: Make sure the content is Promptbook-like
30742
+ content = promptbookifyAiText(content);
30743
+ }
30744
+ else {
30745
+ // TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
30746
+ content = JSON.stringify(content);
30747
+ }
30357
30748
  const agentResult = {
30358
30749
  ...underlyingLlmResult,
30359
- content,
30750
+ content: content,
30360
30751
  modelName: this.modelName,
30361
30752
  };
30362
30753
  return agentResult;
@@ -30545,7 +30936,6 @@
30545
30936
  * Note: This method also implements the learning mechanism
30546
30937
  */
30547
30938
  async callChatModelStream(prompt, onProgress) {
30548
- var _a;
30549
30939
  // [1] Check if the user is asking the same thing as in the samples
30550
30940
  const modelRequirements = await this.getModelRequirements();
30551
30941
  if (modelRequirements.samples) {
@@ -30593,7 +30983,7 @@
30593
30983
  if (result.rawResponse && 'sample' in result.rawResponse) {
30594
30984
  return result;
30595
30985
  }
30596
- if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
30986
+ if (modelRequirements.isClosed) {
30597
30987
  return result;
30598
30988
  }
30599
30989
  // Note: [0] Notify start of self-learning