@promptbook/wizard 0.110.0-8 → 0.110.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. package/esm/index.es.js +432 -87
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  5. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  6. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  7. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  8. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  9. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
  10. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +19 -0
  11. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +6 -0
  12. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  13. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +39 -0
  15. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  16. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  17. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  18. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  19. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  20. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  21. package/esm/typings/src/version.d.ts +1 -1
  22. package/package.json +2 -2
  23. package/umd/index.umd.js +432 -87
  24. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -38,7 +38,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
38
38
  * @generated
39
39
  * @see https://github.com/webgptorg/promptbook
40
40
  */
41
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-8';
41
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-9';
42
42
  /**
43
43
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
44
44
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3776,6 +3776,66 @@ const OPENAI_MODELS = exportJson({
3776
3776
  },
3777
3777
  /**/
3778
3778
  /**/
3779
+ {
3780
+ modelVariant: 'CHAT',
3781
+ modelTitle: 'gpt-5.2-codex',
3782
+ modelName: 'gpt-5.2-codex',
3783
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
3784
+ pricing: {
3785
+ prompt: pricing(`$1.75 / 1M tokens`),
3786
+ output: pricing(`$14.00 / 1M tokens`),
3787
+ },
3788
+ },
3789
+ /**/
3790
+ /**/
3791
+ {
3792
+ modelVariant: 'CHAT',
3793
+ modelTitle: 'gpt-5.1-codex-max',
3794
+ modelName: 'gpt-5.1-codex-max',
3795
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
3796
+ pricing: {
3797
+ prompt: pricing(`$1.25 / 1M tokens`),
3798
+ output: pricing(`$10.00 / 1M tokens`),
3799
+ },
3800
+ },
3801
+ /**/
3802
+ /**/
3803
+ {
3804
+ modelVariant: 'CHAT',
3805
+ modelTitle: 'gpt-5.1-codex',
3806
+ modelName: 'gpt-5.1-codex',
3807
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
3808
+ pricing: {
3809
+ prompt: pricing(`$1.25 / 1M tokens`),
3810
+ output: pricing(`$10.00 / 1M tokens`),
3811
+ },
3812
+ },
3813
+ /**/
3814
+ /**/
3815
+ {
3816
+ modelVariant: 'CHAT',
3817
+ modelTitle: 'gpt-5.1-codex-mini',
3818
+ modelName: 'gpt-5.1-codex-mini',
3819
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
3820
+ pricing: {
3821
+ prompt: pricing(`$0.25 / 1M tokens`),
3822
+ output: pricing(`$2.00 / 1M tokens`),
3823
+ },
3824
+ },
3825
+ /**/
3826
+ /**/
3827
+ {
3828
+ modelVariant: 'CHAT',
3829
+ modelTitle: 'gpt-5-codex',
3830
+ modelName: 'gpt-5-codex',
3831
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
3832
+ pricing: {
3833
+ prompt: pricing(`$1.25 / 1M tokens`),
3834
+ output: pricing(`$10.00 / 1M tokens`),
3835
+ },
3836
+ },
3837
+ /**/
3838
+ /**/
3779
3839
  {
3780
3840
  modelVariant: 'CHAT',
3781
3841
  modelTitle: 'gpt-5-mini',
@@ -8671,6 +8731,32 @@ function isUnsupportedParameterError(error) {
8671
8731
  errorMessage.includes('does not support'));
8672
8732
  }
8673
8733
 
8734
+ /**
8735
+ * Provides access to the structured clone implementation when available.
8736
+ */
8737
+ function getStructuredCloneFunction() {
8738
+ return globalThis.structuredClone;
8739
+ }
8740
+ /**
8741
+ * Checks whether the prompt is a chat prompt that carries file attachments.
8742
+ */
8743
+ function hasChatPromptFiles(prompt) {
8744
+ return 'files' in prompt && Array.isArray(prompt.files);
8745
+ }
8746
+ /**
8747
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
8748
+ */
8749
+ function clonePromptPreservingFiles(prompt) {
8750
+ const structuredCloneFn = getStructuredCloneFunction();
8751
+ if (typeof structuredCloneFn === 'function') {
8752
+ return structuredCloneFn(prompt);
8753
+ }
8754
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
8755
+ if (hasChatPromptFiles(prompt)) {
8756
+ clonedPrompt.files = prompt.files;
8757
+ }
8758
+ return clonedPrompt;
8759
+ }
8674
8760
  /**
8675
8761
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
8676
8762
  *
@@ -8755,7 +8841,7 @@ class OpenAiCompatibleExecutionTools {
8755
8841
  */
8756
8842
  async callChatModelStream(prompt, onProgress) {
8757
8843
  // Deep clone prompt and modelRequirements to avoid mutation across calls
8758
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
8844
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
8759
8845
  // Use local Set for retried parameters to ensure independence and thread safety
8760
8846
  const retriedUnsupportedParameters = new Set();
8761
8847
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -8782,7 +8868,10 @@ class OpenAiCompatibleExecutionTools {
8782
8868
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
8783
8869
  // <- Note: [🧆]
8784
8870
  }; // <- TODO: [💩] Guard here types better
8785
- if (format === 'JSON') {
8871
+ if (currentModelRequirements.responseFormat !== undefined) {
8872
+ modelSettings.response_format = currentModelRequirements.responseFormat;
8873
+ }
8874
+ else if (format === 'JSON') {
8786
8875
  modelSettings.response_format = {
8787
8876
  type: 'json_object',
8788
8877
  };
@@ -10124,6 +10213,136 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
10124
10213
  }
10125
10214
  }
10126
10215
 
10216
+ /**
10217
+ * @@@
10218
+ *
10219
+ * @private thing of inline knowledge
10220
+ */
10221
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
10222
+ /**
10223
+ * @@@
10224
+ *
10225
+ * @private thing of inline knowledge
10226
+ */
10227
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
10228
+ /**
10229
+ * @@@
10230
+ *
10231
+ * @private thing of inline knowledge
10232
+ */
10233
+ const DATA_URL_PREFIX = 'data:';
10234
+ /**
10235
+ * @@@
10236
+ *
10237
+ * @private thing of inline knowledge
10238
+ */
10239
+ function getFirstNonEmptyLine(content) {
10240
+ const lines = content.split(/\r?\n/);
10241
+ for (const line of lines) {
10242
+ const trimmed = line.trim();
10243
+ if (trimmed) {
10244
+ return trimmed;
10245
+ }
10246
+ }
10247
+ return null;
10248
+ }
10249
+ /**
10250
+ * @@@
10251
+ *
10252
+ * @private thing of inline knowledge
10253
+ */
10254
+ function deriveBaseFilename(content) {
10255
+ const firstLine = getFirstNonEmptyLine(content);
10256
+ if (!firstLine) {
10257
+ return INLINE_KNOWLEDGE_BASE_NAME;
10258
+ }
10259
+ const normalized = normalizeToKebabCase(firstLine);
10260
+ return normalized || INLINE_KNOWLEDGE_BASE_NAME;
10261
+ }
10262
+ /**
10263
+ * Creates a data URL that represents the inline knowledge content as a text file.
10264
+ *
10265
+ * @private thing of inline knowledge
10266
+ */
10267
+ function createInlineKnowledgeSourceFile(content) {
10268
+ const trimmedContent = content.trim();
10269
+ const baseName = deriveBaseFilename(trimmedContent);
10270
+ const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
10271
+ const mimeType = 'text/plain';
10272
+ const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
10273
+ const encodedFilename = encodeURIComponent(filename);
10274
+ const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
10275
+ return {
10276
+ filename,
10277
+ mimeType,
10278
+ url,
10279
+ };
10280
+ }
10281
+ /**
10282
+ * Checks whether the provided source string is a data URL that can be decoded.
10283
+ *
10284
+ * @private thing of inline knowledge
10285
+ */
10286
+ function isDataUrlKnowledgeSource(source) {
10287
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
10288
+ }
10289
+ /**
10290
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
10291
+ *
10292
+ * @private thing of inline knowledge
10293
+ */
10294
+ function parseDataUrlKnowledgeSource(source) {
10295
+ if (!isDataUrlKnowledgeSource(source)) {
10296
+ return null;
10297
+ }
10298
+ const commaIndex = source.indexOf(',');
10299
+ if (commaIndex === -1) {
10300
+ return null;
10301
+ }
10302
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
10303
+ const payload = source.slice(commaIndex + 1);
10304
+ const tokens = header.split(';');
10305
+ const mediaType = tokens[0] || 'text/plain';
10306
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
10307
+ let isBase64 = false;
10308
+ for (let i = 1; i < tokens.length; i++) {
10309
+ const token = tokens[i];
10310
+ if (!token) {
10311
+ continue;
10312
+ }
10313
+ if (token.toLowerCase() === 'base64') {
10314
+ isBase64 = true;
10315
+ continue;
10316
+ }
10317
+ const [key, value] = token.split('=');
10318
+ if (key === 'name' && value !== undefined) {
10319
+ try {
10320
+ filename = decodeURIComponent(value);
10321
+ }
10322
+ catch (_a) {
10323
+ filename = value;
10324
+ }
10325
+ }
10326
+ }
10327
+ if (!isBase64) {
10328
+ return null;
10329
+ }
10330
+ try {
10331
+ const buffer = Buffer.from(payload, 'base64');
10332
+ return {
10333
+ buffer,
10334
+ filename,
10335
+ mimeType: mediaType,
10336
+ };
10337
+ }
10338
+ catch (_b) {
10339
+ return null;
10340
+ }
10341
+ }
10342
+ /**
10343
+ * Note: [💞] Ignore a discrepancy between file name and entity name
10344
+ */
10345
+
10127
10346
  const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
10128
10347
  const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
10129
10348
  const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
@@ -10760,7 +10979,9 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
10760
10979
  const processingStartedAtMs = Date.now();
10761
10980
  for (const [index, source] of knowledgeSources.entries()) {
10762
10981
  try {
10763
- const sourceType = source.startsWith('http') || source.startsWith('https') ? 'url' : 'file';
10982
+ const isDataUrl = isDataUrlKnowledgeSource(source);
10983
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
10984
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
10764
10985
  if (this.options.isVerbose) {
10765
10986
  console.info('[🤰]', 'Processing knowledge source', {
10766
10987
  index: index + 1,
@@ -10770,8 +10991,27 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
10770
10991
  logLabel,
10771
10992
  });
10772
10993
  }
10773
- // Check if it's a URL
10774
- if (source.startsWith('http://') || source.startsWith('https://')) {
10994
+ if (isDataUrl) {
10995
+ const parsed = parseDataUrlKnowledgeSource(source);
10996
+ if (!parsed) {
10997
+ skippedSources.push({ source, reason: 'invalid_data_url' });
10998
+ if (this.options.isVerbose) {
10999
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
11000
+ source,
11001
+ sourceType,
11002
+ logLabel,
11003
+ });
11004
+ }
11005
+ continue;
11006
+ }
11007
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
11008
+ type: parsed.mimeType,
11009
+ });
11010
+ fileStreams.push(dataUrlFile);
11011
+ totalBytes += parsed.buffer.length;
11012
+ continue;
11013
+ }
11014
+ if (isHttp) {
10775
11015
  const downloadResult = await this.downloadKnowledgeSourceFile({
10776
11016
  source,
10777
11017
  timeoutMs: downloadTimeoutMs,
@@ -17175,11 +17415,14 @@ const _FormattedBookInMarkdownTranspilerRegistration = $bookTranspilersRegister.
17175
17415
  function createEmptyAgentModelRequirements() {
17176
17416
  return {
17177
17417
  systemMessage: '',
17418
+ promptSuffix: '',
17178
17419
  // modelName: 'gpt-5',
17179
17420
  modelName: 'gemini-2.5-flash-lite',
17180
17421
  temperature: 0.7,
17181
17422
  topP: 0.9,
17182
17423
  topK: 50,
17424
+ parentAgentUrl: null,
17425
+ isClosed: false,
17183
17426
  };
17184
17427
  }
17185
17428
  /**
@@ -17296,6 +17539,28 @@ class BaseCommitmentDefinition {
17296
17539
  return currentMessage + separator + content;
17297
17540
  });
17298
17541
  }
17542
+ /**
17543
+ * Helper method to create a new requirements object with updated prompt suffix
17544
+ */
17545
+ updatePromptSuffix(requirements, contentUpdate) {
17546
+ const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
17547
+ return {
17548
+ ...requirements,
17549
+ promptSuffix: newSuffix,
17550
+ };
17551
+ }
17552
+ /**
17553
+ * Helper method to append content to the prompt suffix
17554
+ * Default separator is a single newline for bullet lists.
17555
+ */
17556
+ appendToPromptSuffix(requirements, content, separator = '\n') {
17557
+ return this.updatePromptSuffix(requirements, (currentSuffix) => {
17558
+ if (!currentSuffix.trim()) {
17559
+ return content;
17560
+ }
17561
+ return `${currentSuffix}${separator}${content}`;
17562
+ });
17563
+ }
17299
17564
  /**
17300
17565
  * Helper method to add a comment section to the system message
17301
17566
  * Comments are lines starting with # that will be removed from the final system message
@@ -17473,13 +17738,9 @@ class ClosedCommitmentDefinition extends BaseCommitmentDefinition {
17473
17738
  `);
17474
17739
  }
17475
17740
  applyToAgentModelRequirements(requirements, _content) {
17476
- const updatedMetadata = {
17477
- ...requirements.metadata,
17478
- isClosed: true,
17479
- };
17480
17741
  return {
17481
17742
  ...requirements,
17482
- metadata: updatedMetadata,
17743
+ isClosed: true,
17483
17744
  };
17484
17745
  }
17485
17746
  }
@@ -17757,12 +18018,12 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
17757
18018
  return requirements;
17758
18019
  }
17759
18020
  // Get existing dictionary entries from metadata
17760
- const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
18021
+ const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
17761
18022
  // Merge the new dictionary entry with existing entries
17762
18023
  const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
17763
18024
  // Store the merged dictionary in metadata for debugging and inspection
17764
18025
  const updatedMetadata = {
17765
- ...requirements.metadata,
18026
+ ...requirements._metadata,
17766
18027
  DICTIONARY: mergedDictionary,
17767
18028
  };
17768
18029
  // Create the dictionary section for the system message
@@ -17770,7 +18031,7 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
17770
18031
  const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
17771
18032
  return {
17772
18033
  ...this.appendToSystemMessage(requirements, dictionarySection),
17773
- metadata: updatedMetadata,
18034
+ _metadata: updatedMetadata,
17774
18035
  };
17775
18036
  }
17776
18037
  }
@@ -17910,10 +18171,7 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
17910
18171
  applyToAgentModelRequirements(requirements, content) {
17911
18172
  const trimmedContent = content.trim();
17912
18173
  if (!trimmedContent) {
17913
- return {
17914
- ...requirements,
17915
- parentAgentUrl: undefined,
17916
- };
18174
+ return requirements;
17917
18175
  }
17918
18176
  if (trimmedContent.toUpperCase() === 'VOID' ||
17919
18177
  trimmedContent.toUpperCase() === 'NULL' ||
@@ -18225,9 +18483,13 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
18225
18483
  return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
18226
18484
  }
18227
18485
  else {
18228
- // Direct text knowledge - add to system message
18229
- const knowledgeSection = `Knowledge: ${trimmedContent}`;
18230
- return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
18486
+ const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
18487
+ const updatedRequirements = {
18488
+ ...requirements,
18489
+ knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
18490
+ };
18491
+ const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
18492
+ return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
18231
18493
  }
18232
18494
  }
18233
18495
  }
@@ -18474,16 +18736,16 @@ class AgentMessageCommitmentDefinition extends BaseCommitmentDefinition {
18474
18736
  // and typically doesn't need to be added to the system prompt or model requirements directly.
18475
18737
  // It is extracted separately for the chat interface.
18476
18738
  var _a;
18477
- const pendingUserMessage = (_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
18739
+ const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
18478
18740
  if (pendingUserMessage) {
18479
18741
  const newSample = { question: pendingUserMessage, answer: content };
18480
18742
  const newSamples = [...(requirements.samples || []), newSample];
18481
- const newMetadata = { ...requirements.metadata };
18743
+ const newMetadata = { ...requirements._metadata };
18482
18744
  delete newMetadata.pendingUserMessage;
18483
18745
  return {
18484
18746
  ...requirements,
18485
18747
  samples: newSamples,
18486
- metadata: newMetadata,
18748
+ _metadata: newMetadata,
18487
18749
  };
18488
18750
  }
18489
18751
  return requirements;
@@ -18731,8 +18993,8 @@ class UserMessageCommitmentDefinition extends BaseCommitmentDefinition {
18731
18993
  applyToAgentModelRequirements(requirements, content) {
18732
18994
  return {
18733
18995
  ...requirements,
18734
- metadata: {
18735
- ...requirements.metadata,
18996
+ _metadata: {
18997
+ ...requirements._metadata,
18736
18998
  pendingUserMessage: content,
18737
18999
  },
18738
19000
  };
@@ -19590,11 +19852,7 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
19590
19852
  if (trimmedContent === '') {
19591
19853
  return requirements;
19592
19854
  }
19593
- // Return requirements with updated notes but no changes to system message
19594
- return {
19595
- ...requirements,
19596
- notes: [...(requirements.notes || []), trimmedContent],
19597
- };
19855
+ return requirements;
19598
19856
  }
19599
19857
  }
19600
19858
  /**
@@ -19656,12 +19914,12 @@ class OpenCommitmentDefinition extends BaseCommitmentDefinition {
19656
19914
  // Since OPEN is default, we can just ensure isClosed is false
19657
19915
  // But to be explicit we can set it
19658
19916
  const updatedMetadata = {
19659
- ...requirements.metadata,
19917
+ ...requirements._metadata,
19660
19918
  isClosed: false,
19661
19919
  };
19662
19920
  return {
19663
19921
  ...requirements,
19664
- metadata: updatedMetadata,
19922
+ _metadata: updatedMetadata,
19665
19923
  };
19666
19924
  }
19667
19925
  }
@@ -19742,7 +20000,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
19742
20000
  return requirements;
19743
20001
  }
19744
20002
  // Get existing persona content from metadata
19745
- const existingPersonaContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
20003
+ const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
19746
20004
  // Merge the new content with existing persona content
19747
20005
  // When multiple PERSONA commitments exist, they are merged into one
19748
20006
  const mergedPersonaContent = existingPersonaContent
@@ -19750,12 +20008,12 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
19750
20008
  : trimmedContent;
19751
20009
  // Store the merged persona content in metadata for debugging and inspection
19752
20010
  const updatedMetadata = {
19753
- ...requirements.metadata,
20011
+ ...requirements._metadata,
19754
20012
  PERSONA: mergedPersonaContent,
19755
20013
  };
19756
20014
  // Get the agent name from metadata (which should contain the first line of agent source)
19757
20015
  // If not available, extract from current system message as fallback
19758
- let agentName = (_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.agentName;
20016
+ let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
19759
20017
  if (!agentName) {
19760
20018
  // Fallback: extract from current system message
19761
20019
  const currentMessage = requirements.systemMessage.trim();
@@ -19802,7 +20060,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
19802
20060
  return {
19803
20061
  ...requirements,
19804
20062
  systemMessage: newSystemMessage,
19805
- metadata: updatedMetadata,
20063
+ _metadata: updatedMetadata,
19806
20064
  };
19807
20065
  }
19808
20066
  }
@@ -19885,7 +20143,16 @@ class RuleCommitmentDefinition extends BaseCommitmentDefinition {
19885
20143
  }
19886
20144
  // Add rule to the system message
19887
20145
  const ruleSection = `Rule: ${trimmedContent}`;
19888
- return this.appendToSystemMessage(requirements, ruleSection, '\n\n');
20146
+ const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
20147
+ const ruleLines = trimmedContent
20148
+ .split(/\r?\n/)
20149
+ .map((line) => line.trim())
20150
+ .filter(Boolean)
20151
+ .map((line) => `- ${line}`);
20152
+ if (ruleLines.length === 0) {
20153
+ return requirementsWithRule;
20154
+ }
20155
+ return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
19889
20156
  }
19890
20157
  }
19891
20158
  /**
@@ -20391,7 +20658,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
20391
20658
  if (teammates.length === 0) {
20392
20659
  return requirements;
20393
20660
  }
20394
- const agentName = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
20661
+ const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
20395
20662
  const teamEntries = teammates.map((teammate) => ({
20396
20663
  toolName: createTeamToolName(teammate.url),
20397
20664
  teammate,
@@ -20431,7 +20698,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
20431
20698
  },
20432
20699
  });
20433
20700
  }
20434
- const existingTeammates = ((_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
20701
+ const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
20435
20702
  const updatedTeammates = [...existingTeammates];
20436
20703
  for (const entry of teamEntries) {
20437
20704
  if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
@@ -20460,8 +20727,8 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
20460
20727
  return this.appendToSystemMessage({
20461
20728
  ...requirements,
20462
20729
  tools: updatedTools,
20463
- metadata: {
20464
- ...requirements.metadata,
20730
+ _metadata: {
20731
+ ...requirements._metadata,
20465
20732
  teammates: updatedTeammates,
20466
20733
  },
20467
20734
  }, teamSystemMessage);
@@ -20693,7 +20960,7 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
20693
20960
  if (!trimmedContent) {
20694
20961
  // Store template mode flag in metadata
20695
20962
  const updatedMetadata = {
20696
- ...requirements.metadata,
20963
+ ...requirements._metadata,
20697
20964
  templateMode: true,
20698
20965
  };
20699
20966
  // Add a general instruction about using structured templates
@@ -20703,21 +20970,21 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
20703
20970
  `);
20704
20971
  return {
20705
20972
  ...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
20706
- metadata: updatedMetadata,
20973
+ _metadata: updatedMetadata,
20707
20974
  };
20708
20975
  }
20709
20976
  // If content is provided, add the specific template instructions
20710
20977
  const templateSection = `Response Template: ${trimmedContent}`;
20711
20978
  // Store the template in metadata for potential programmatic access
20712
- const existingTemplates = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
20979
+ const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
20713
20980
  const updatedMetadata = {
20714
- ...requirements.metadata,
20981
+ ...requirements._metadata,
20715
20982
  templates: [...existingTemplates, trimmedContent],
20716
20983
  templateMode: true,
20717
20984
  };
20718
20985
  return {
20719
20986
  ...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
20720
- metadata: updatedMetadata,
20987
+ _metadata: updatedMetadata,
20721
20988
  };
20722
20989
  }
20723
20990
  }
@@ -21054,8 +21321,8 @@ class UseBrowserCommitmentDefinition extends BaseCommitmentDefinition {
21054
21321
  return this.appendToSystemMessage({
21055
21322
  ...requirements,
21056
21323
  tools: updatedTools,
21057
- metadata: {
21058
- ...requirements.metadata,
21324
+ _metadata: {
21325
+ ...requirements._metadata,
21059
21326
  useBrowser: true,
21060
21327
  },
21061
21328
  }, spaceTrim$1(`
@@ -21284,8 +21551,8 @@ class UseEmailCommitmentDefinition extends BaseCommitmentDefinition {
21284
21551
  return this.appendToSystemMessage({
21285
21552
  ...requirements,
21286
21553
  tools: updatedTools,
21287
- metadata: {
21288
- ...requirements.metadata,
21554
+ _metadata: {
21555
+ ...requirements._metadata,
21289
21556
  useEmail: content || true,
21290
21557
  },
21291
21558
  }, spaceTrim$1((block) => `
@@ -21420,8 +21687,8 @@ class UseImageGeneratorCommitmentDefinition extends BaseCommitmentDefinition {
21420
21687
  return this.appendToSystemMessage({
21421
21688
  ...requirements,
21422
21689
  tools: updatedTools,
21423
- metadata: {
21424
- ...requirements.metadata,
21690
+ _metadata: {
21691
+ ...requirements._metadata,
21425
21692
  useImageGenerator: content || true,
21426
21693
  },
21427
21694
  }, spaceTrim$1(`
@@ -21712,8 +21979,8 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
21712
21979
  return this.appendToSystemMessage({
21713
21980
  ...requirements,
21714
21981
  tools: updatedTools,
21715
- metadata: {
21716
- ...requirements.metadata,
21982
+ _metadata: {
21983
+ ...requirements._metadata,
21717
21984
  useSearchEngine: content || true,
21718
21985
  },
21719
21986
  }, spaceTrim$1((block) => `
@@ -21861,8 +22128,8 @@ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
21861
22128
  return this.appendToSystemMessage({
21862
22129
  ...requirements,
21863
22130
  tools: updatedTools,
21864
- metadata: {
21865
- ...requirements.metadata,
22131
+ _metadata: {
22132
+ ...requirements._metadata,
21866
22133
  },
21867
22134
  }, spaceTrim$1((block) => `
21868
22135
  Time and date context:
@@ -22490,8 +22757,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
22490
22757
  // Store the agent name in metadata so commitments can access it
22491
22758
  requirements = {
22492
22759
  ...requirements,
22493
- metadata: {
22494
- ...requirements.metadata,
22760
+ _metadata: {
22761
+ ...requirements._metadata,
22495
22762
  agentName: parseResult.agentName,
22496
22763
  },
22497
22764
  };
@@ -29540,6 +29807,64 @@ function promptbookifyAiText(text) {
29540
29807
  */
29541
29808
 
29542
29809
  const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
29810
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
29811
+ /*
29812
+ TODO: Use or remove
29813
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
29814
+ type: 'object',
29815
+ properties: {},
29816
+ required: [],
29817
+ additionalProperties: true,
29818
+ };
29819
+ */
29820
+ function buildJsonSchemaDefinition(jsonSchema) {
29821
+ var _a, _b, _c;
29822
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
29823
+ return {
29824
+ type: 'json_schema',
29825
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
29826
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
29827
+ schema: {
29828
+ type: 'object',
29829
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
29830
+ required: Array.isArray(schema.required) ? schema.required : [],
29831
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
29832
+ description: schema.description,
29833
+ },
29834
+ };
29835
+ }
29836
+ /**
29837
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
29838
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
29839
+ *
29840
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
29841
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
29842
+ * @private utility of Open AI
29843
+ */
29844
+ function mapResponseFormatToAgentOutputType(responseFormat) {
29845
+ if (!responseFormat) {
29846
+ return undefined;
29847
+ }
29848
+ if (typeof responseFormat === 'string') {
29849
+ if (responseFormat === 'text') {
29850
+ return 'text';
29851
+ }
29852
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
29853
+ return buildJsonSchemaDefinition();
29854
+ }
29855
+ return 'text';
29856
+ }
29857
+ switch (responseFormat.type) {
29858
+ case 'text':
29859
+ return 'text';
29860
+ case 'json_schema':
29861
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
29862
+ case 'json_object':
29863
+ return buildJsonSchemaDefinition();
29864
+ default:
29865
+ return undefined;
29866
+ }
29867
+ }
29543
29868
  /**
29544
29869
  * Execution tools for OpenAI AgentKit (Agents SDK).
29545
29870
  *
@@ -29587,6 +29912,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
29587
29912
  ...parameters,
29588
29913
  modelName: this.agentKitModelName,
29589
29914
  });
29915
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
29590
29916
  const preparedAgentKitAgent = await this.prepareAgentKitAgent({
29591
29917
  name: (prompt.title || 'Agent'),
29592
29918
  instructions: modelRequirements.systemMessage || '',
@@ -29598,6 +29924,7 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
29598
29924
  prompt,
29599
29925
  rawPromptContent,
29600
29926
  onProgress,
29927
+ responseFormatOutputType,
29601
29928
  });
29602
29929
  }
29603
29930
  /**
@@ -29779,16 +30106,21 @@ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
29779
30106
  ...prompt.parameters,
29780
30107
  modelName: this.agentKitModelName,
29781
30108
  });
30109
+ const agentForRun = options.responseFormatOutputType !== undefined
30110
+ ? openAiAgentKitAgent.clone({
30111
+ outputType: options.responseFormatOutputType,
30112
+ })
30113
+ : openAiAgentKitAgent;
29782
30114
  const start = $getCurrentDate();
29783
30115
  let latestContent = '';
29784
30116
  const toolCalls = [];
29785
30117
  const toolCallIndexById = new Map();
29786
30118
  const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
29787
30119
  const rawRequest = {
29788
- agentName: openAiAgentKitAgent.name,
30120
+ agentName: agentForRun.name,
29789
30121
  input: inputItems,
29790
30122
  };
29791
- const streamResult = await run(openAiAgentKitAgent, inputItems, {
30123
+ const streamResult = await run(agentForRun, inputItems, {
29792
30124
  stream: true,
29793
30125
  context: { parameters: prompt.parameters },
29794
30126
  });
@@ -30136,22 +30468,28 @@ class AgentLlmExecutionTools {
30136
30468
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
30137
30469
  }
30138
30470
  const modelRequirements = await this.getModelRequirements();
30471
+ const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
30139
30472
  const chatPrompt = prompt;
30140
30473
  let underlyingLlmResult;
30141
- // Create modified chat prompt with agent system message
30474
+ const chatPromptContentWithSuffix = promptSuffix
30475
+ ? `${chatPrompt.content}\n\n${promptSuffix}`
30476
+ : chatPrompt.content;
30142
30477
  const promptWithAgentModelRequirements = {
30143
30478
  ...chatPrompt,
30479
+ content: chatPromptContentWithSuffix,
30144
30480
  modelRequirements: {
30145
30481
  ...chatPrompt.modelRequirements,
30146
- ...modelRequirements,
30482
+ ...sanitizedRequirements,
30147
30483
  // Spread tools to convert readonly array to mutable
30148
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
30484
+ tools: sanitizedRequirements.tools
30485
+ ? [...sanitizedRequirements.tools]
30486
+ : chatPrompt.modelRequirements.tools,
30149
30487
  // Spread knowledgeSources to convert readonly array to mutable
30150
- knowledgeSources: modelRequirements.knowledgeSources
30151
- ? [...modelRequirements.knowledgeSources]
30488
+ knowledgeSources: sanitizedRequirements.knowledgeSources
30489
+ ? [...sanitizedRequirements.knowledgeSources]
30152
30490
  : undefined,
30153
30491
  // Prepend agent system message to existing system message
30154
- systemMessage: modelRequirements.systemMessage +
30492
+ systemMessage: sanitizedRequirements.systemMessage +
30155
30493
  (chatPrompt.modelRequirements.systemMessage
30156
30494
  ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
30157
30495
  : ''),
@@ -30159,8 +30497,8 @@ class AgentLlmExecutionTools {
30159
30497
  };
30160
30498
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
30161
30499
  if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
30162
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
30163
- const vectorStoreHash = SHA256(JSON.stringify((_a = modelRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
30500
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
30501
+ const vectorStoreHash = SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
30164
30502
  const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
30165
30503
  const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
30166
30504
  let preparedAgentKit = this.options.assistantPreparationMode === 'external'
@@ -30187,7 +30525,7 @@ class AgentLlmExecutionTools {
30187
30525
  agent: this.title,
30188
30526
  });
30189
30527
  }
30190
- if (!vectorStoreId && ((_b = modelRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
30528
+ if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
30191
30529
  emitAssistantPreparationProgress({
30192
30530
  onProgress,
30193
30531
  prompt,
@@ -30203,9 +30541,9 @@ class AgentLlmExecutionTools {
30203
30541
  });
30204
30542
  preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
30205
30543
  name: this.title,
30206
- instructions: modelRequirements.systemMessage || '',
30207
- knowledgeSources: modelRequirements.knowledgeSources,
30208
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
30544
+ instructions: sanitizedRequirements.systemMessage || '',
30545
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
30546
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
30209
30547
  vectorStoreId,
30210
30548
  });
30211
30549
  }
@@ -30220,15 +30558,17 @@ class AgentLlmExecutionTools {
30220
30558
  requirementsHash,
30221
30559
  vectorStoreId: preparedAgentKit.vectorStoreId,
30222
30560
  });
30561
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
30223
30562
  underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
30224
30563
  openAiAgentKitAgent: preparedAgentKit.agent,
30225
30564
  prompt: promptWithAgentModelRequirements,
30226
30565
  onProgress,
30566
+ responseFormatOutputType,
30227
30567
  });
30228
30568
  }
30229
30569
  else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
30230
30570
  // ... deprecated path ...
30231
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
30571
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
30232
30572
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
30233
30573
  let assistant;
30234
30574
  if (this.options.assistantPreparationMode === 'external') {
@@ -30270,9 +30610,9 @@ class AgentLlmExecutionTools {
30270
30610
  assistant = await this.options.llmTools.updateAssistant({
30271
30611
  assistantId: cached.assistantId,
30272
30612
  name: this.title,
30273
- instructions: modelRequirements.systemMessage,
30274
- knowledgeSources: modelRequirements.knowledgeSources,
30275
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
30613
+ instructions: sanitizedRequirements.systemMessage,
30614
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
30615
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
30276
30616
  });
30277
30617
  AgentLlmExecutionTools.assistantCache.set(this.title, {
30278
30618
  assistantId: assistant.assistantId,
@@ -30295,9 +30635,9 @@ class AgentLlmExecutionTools {
30295
30635
  });
30296
30636
  assistant = await this.options.llmTools.createNewAssistant({
30297
30637
  name: this.title,
30298
- instructions: modelRequirements.systemMessage,
30299
- knowledgeSources: modelRequirements.knowledgeSources,
30300
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
30638
+ instructions: sanitizedRequirements.systemMessage,
30639
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
30640
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
30301
30641
  /*
30302
30642
  !!!
30303
30643
  metadata: {
@@ -30339,13 +30679,19 @@ class AgentLlmExecutionTools {
30339
30679
  }
30340
30680
  }
30341
30681
  let content = underlyingLlmResult.content;
30342
- // Note: Cleanup the AI artifacts from the content
30343
- content = humanizeAiText(content);
30344
- // Note: Make sure the content is Promptbook-like
30345
- content = promptbookifyAiText(content);
30682
+ if (typeof content === 'string') {
30683
+ // Note: Cleanup the AI artifacts from the content
30684
+ content = humanizeAiText(content);
30685
+ // Note: Make sure the content is Promptbook-like
30686
+ content = promptbookifyAiText(content);
30687
+ }
30688
+ else {
30689
+ // TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
30690
+ content = JSON.stringify(content);
30691
+ }
30346
30692
  const agentResult = {
30347
30693
  ...underlyingLlmResult,
30348
- content,
30694
+ content: content,
30349
30695
  modelName: this.modelName,
30350
30696
  };
30351
30697
  return agentResult;
@@ -30534,7 +30880,6 @@ class Agent extends AgentLlmExecutionTools {
30534
30880
  * Note: This method also implements the learning mechanism
30535
30881
  */
30536
30882
  async callChatModelStream(prompt, onProgress) {
30537
- var _a;
30538
30883
  // [1] Check if the user is asking the same thing as in the samples
30539
30884
  const modelRequirements = await this.getModelRequirements();
30540
30885
  if (modelRequirements.samples) {
@@ -30582,7 +30927,7 @@ class Agent extends AgentLlmExecutionTools {
30582
30927
  if (result.rawResponse && 'sample' in result.rawResponse) {
30583
30928
  return result;
30584
30929
  }
30585
- if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
30930
+ if (modelRequirements.isClosed) {
30586
30931
  return result;
30587
30932
  }
30588
30933
  // Note: [0] Notify start of self-learning