@promptbook/browser 0.110.0-0 → 0.110.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/esm/index.es.js +1785 -510
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +8 -4
  6. package/esm/typings/src/_packages/types.index.d.ts +12 -4
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  8. package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
  9. package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
  11. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
  13. package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
  14. package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +4 -2
  15. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  16. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  17. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  18. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
  19. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
  20. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +10 -1
  21. package/esm/typings/src/book-components/Chat/SourceChip/SourceChip.d.ts +5 -1
  22. package/esm/typings/src/book-components/Chat/utils/collectTeamToolCallSummary.d.ts +69 -0
  23. package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +13 -13
  24. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +9 -0
  25. package/esm/typings/src/book-components/Chat/utils/toolCallParsing.d.ts +4 -0
  26. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +0 -3
  27. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  28. package/esm/typings/src/execution/LlmExecutionTools.d.ts +2 -1
  29. package/esm/typings/src/llm-providers/agent/Agent.d.ts +1 -1
  30. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
  31. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  32. package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +10 -0
  33. package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +13 -2
  34. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -1
  35. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +150 -0
  36. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionToolsOptions.d.ts +15 -0
  37. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +3 -3
  38. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +3 -4
  39. package/esm/typings/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +135 -0
  40. package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +1 -1
  41. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  42. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  43. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  44. package/esm/typings/src/utils/agents/resolveAgentAvatarImageUrl.d.ts +29 -0
  45. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  46. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  47. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  48. package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
  49. package/esm/typings/src/version.d.ts +1 -1
  50. package/package.json +7 -3
  51. package/umd/index.umd.js +1788 -514
  52. package/umd/index.umd.js.map +1 -1
  53. package/esm/typings/src/llm-providers/openai/OpenAiAgentExecutionTools.d.ts +0 -43
  54. package/esm/typings/src/llm-providers/openai/createOpenAiAgentExecutionTools.d.ts +0 -11
package/esm/index.es.js CHANGED
@@ -11,6 +11,7 @@ import moment from 'moment';
11
11
  import sha256 from 'crypto-js/sha256';
12
12
  import { lookup, extension } from 'mime-types';
13
13
  import { parse, unparse } from 'papaparse';
14
+ import { Agent as Agent$1, setDefaultOpenAIClient, setDefaultOpenAIKey, fileSearchTool, tool, run } from '@openai/agents';
14
15
  import Bottleneck from 'bottleneck';
15
16
  import OpenAI from 'openai';
16
17
 
@@ -28,7 +29,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
28
29
  * @generated
29
30
  * @see https://github.com/webgptorg/promptbook
30
31
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-0';
32
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-10';
32
33
  /**
33
34
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
35
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1329,6 +1330,28 @@ class BaseCommitmentDefinition {
1329
1330
  return currentMessage + separator + content;
1330
1331
  });
1331
1332
  }
1333
+ /**
1334
+ * Helper method to create a new requirements object with updated prompt suffix
1335
+ */
1336
+ updatePromptSuffix(requirements, contentUpdate) {
1337
+ const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
1338
+ return {
1339
+ ...requirements,
1340
+ promptSuffix: newSuffix,
1341
+ };
1342
+ }
1343
+ /**
1344
+ * Helper method to append content to the prompt suffix
1345
+ * Default separator is a single newline for bullet lists.
1346
+ */
1347
+ appendToPromptSuffix(requirements, content, separator = '\n') {
1348
+ return this.updatePromptSuffix(requirements, (currentSuffix) => {
1349
+ if (!currentSuffix.trim()) {
1350
+ return content;
1351
+ }
1352
+ return `${currentSuffix}${separator}${content}`;
1353
+ });
1354
+ }
1332
1355
  /**
1333
1356
  * Helper method to add a comment section to the system message
1334
1357
  * Comments are lines starting with # that will be removed from the final system message
@@ -1523,13 +1546,9 @@ class ClosedCommitmentDefinition extends BaseCommitmentDefinition {
1523
1546
  `);
1524
1547
  }
1525
1548
  applyToAgentModelRequirements(requirements, _content) {
1526
- const updatedMetadata = {
1527
- ...requirements.metadata,
1528
- isClosed: true,
1529
- };
1530
1549
  return {
1531
1550
  ...requirements,
1532
- metadata: updatedMetadata,
1551
+ isClosed: true,
1533
1552
  };
1534
1553
  }
1535
1554
  }
@@ -1807,12 +1826,12 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
1807
1826
  return requirements;
1808
1827
  }
1809
1828
  // Get existing dictionary entries from metadata
1810
- const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
1829
+ const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
1811
1830
  // Merge the new dictionary entry with existing entries
1812
1831
  const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
1813
1832
  // Store the merged dictionary in metadata for debugging and inspection
1814
1833
  const updatedMetadata = {
1815
- ...requirements.metadata,
1834
+ ...requirements._metadata,
1816
1835
  DICTIONARY: mergedDictionary,
1817
1836
  };
1818
1837
  // Create the dictionary section for the system message
@@ -1820,7 +1839,7 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
1820
1839
  const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
1821
1840
  return {
1822
1841
  ...this.appendToSystemMessage(requirements, dictionarySection),
1823
- metadata: updatedMetadata,
1842
+ _metadata: updatedMetadata,
1824
1843
  };
1825
1844
  }
1826
1845
  }
@@ -5965,10 +5984,7 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
5965
5984
  applyToAgentModelRequirements(requirements, content) {
5966
5985
  const trimmedContent = content.trim();
5967
5986
  if (!trimmedContent) {
5968
- return {
5969
- ...requirements,
5970
- parentAgentUrl: undefined,
5971
- };
5987
+ return requirements;
5972
5988
  }
5973
5989
  if (trimmedContent.toUpperCase() === 'VOID' ||
5974
5990
  trimmedContent.toUpperCase() === 'NULL' ||
@@ -6182,6 +6198,136 @@ class ImportCommitmentDefinition extends BaseCommitmentDefinition {
6182
6198
  * Note: [💞] Ignore a discrepancy between file name and entity name
6183
6199
  */
6184
6200
 
6201
+ /**
6202
+ * @@@
6203
+ *
6204
+ * @private thing of inline knowledge
6205
+ */
6206
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
6207
+ /**
6208
+ * @@@
6209
+ *
6210
+ * @private thing of inline knowledge
6211
+ */
6212
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
6213
+ /**
6214
+ * @@@
6215
+ *
6216
+ * @private thing of inline knowledge
6217
+ */
6218
+ const DATA_URL_PREFIX = 'data:';
6219
+ /**
6220
+ * @@@
6221
+ *
6222
+ * @private thing of inline knowledge
6223
+ */
6224
+ function getFirstNonEmptyLine(content) {
6225
+ const lines = content.split(/\r?\n/);
6226
+ for (const line of lines) {
6227
+ const trimmed = line.trim();
6228
+ if (trimmed) {
6229
+ return trimmed;
6230
+ }
6231
+ }
6232
+ return null;
6233
+ }
6234
+ /**
6235
+ * @@@
6236
+ *
6237
+ * @private thing of inline knowledge
6238
+ */
6239
+ function deriveBaseFilename(content) {
6240
+ const firstLine = getFirstNonEmptyLine(content);
6241
+ if (!firstLine) {
6242
+ return INLINE_KNOWLEDGE_BASE_NAME;
6243
+ }
6244
+ const normalized = normalizeToKebabCase(firstLine);
6245
+ return normalized || INLINE_KNOWLEDGE_BASE_NAME;
6246
+ }
6247
+ /**
6248
+ * Creates a data URL that represents the inline knowledge content as a text file.
6249
+ *
6250
+ * @private thing of inline knowledge
6251
+ */
6252
+ function createInlineKnowledgeSourceFile(content) {
6253
+ const trimmedContent = content.trim();
6254
+ const baseName = deriveBaseFilename(trimmedContent);
6255
+ const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
6256
+ const mimeType = 'text/plain';
6257
+ const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
6258
+ const encodedFilename = encodeURIComponent(filename);
6259
+ const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
6260
+ return {
6261
+ filename,
6262
+ mimeType,
6263
+ url,
6264
+ };
6265
+ }
6266
+ /**
6267
+ * Checks whether the provided source string is a data URL that can be decoded.
6268
+ *
6269
+ * @private thing of inline knowledge
6270
+ */
6271
+ function isDataUrlKnowledgeSource(source) {
6272
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
6273
+ }
6274
+ /**
6275
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
6276
+ *
6277
+ * @private thing of inline knowledge
6278
+ */
6279
+ function parseDataUrlKnowledgeSource(source) {
6280
+ if (!isDataUrlKnowledgeSource(source)) {
6281
+ return null;
6282
+ }
6283
+ const commaIndex = source.indexOf(',');
6284
+ if (commaIndex === -1) {
6285
+ return null;
6286
+ }
6287
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
6288
+ const payload = source.slice(commaIndex + 1);
6289
+ const tokens = header.split(';');
6290
+ const mediaType = tokens[0] || 'text/plain';
6291
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
6292
+ let isBase64 = false;
6293
+ for (let i = 1; i < tokens.length; i++) {
6294
+ const token = tokens[i];
6295
+ if (!token) {
6296
+ continue;
6297
+ }
6298
+ if (token.toLowerCase() === 'base64') {
6299
+ isBase64 = true;
6300
+ continue;
6301
+ }
6302
+ const [key, value] = token.split('=');
6303
+ if (key === 'name' && value !== undefined) {
6304
+ try {
6305
+ filename = decodeURIComponent(value);
6306
+ }
6307
+ catch (_a) {
6308
+ filename = value;
6309
+ }
6310
+ }
6311
+ }
6312
+ if (!isBase64) {
6313
+ return null;
6314
+ }
6315
+ try {
6316
+ const buffer = Buffer.from(payload, 'base64');
6317
+ return {
6318
+ buffer,
6319
+ filename,
6320
+ mimeType: mediaType,
6321
+ };
6322
+ }
6323
+ catch (_b) {
6324
+ return null;
6325
+ }
6326
+ }
6327
+ /**
6328
+ * Note: [💞] Ignore a discrepancy between file name and entity name
6329
+ */
6330
+
6185
6331
  /**
6186
6332
  * KNOWLEDGE commitment definition
6187
6333
  *
@@ -6280,9 +6426,13 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
6280
6426
  return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
6281
6427
  }
6282
6428
  else {
6283
- // Direct text knowledge - add to system message
6284
- const knowledgeSection = `Knowledge: ${trimmedContent}`;
6285
- return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
6429
+ const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
6430
+ const updatedRequirements = {
6431
+ ...requirements,
6432
+ knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
6433
+ };
6434
+ const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
6435
+ return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
6286
6436
  }
6287
6437
  }
6288
6438
  }
@@ -6529,16 +6679,16 @@ class AgentMessageCommitmentDefinition extends BaseCommitmentDefinition {
6529
6679
  // and typically doesn't need to be added to the system prompt or model requirements directly.
6530
6680
  // It is extracted separately for the chat interface.
6531
6681
  var _a;
6532
- const pendingUserMessage = (_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
6682
+ const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
6533
6683
  if (pendingUserMessage) {
6534
6684
  const newSample = { question: pendingUserMessage, answer: content };
6535
6685
  const newSamples = [...(requirements.samples || []), newSample];
6536
- const newMetadata = { ...requirements.metadata };
6686
+ const newMetadata = { ...requirements._metadata };
6537
6687
  delete newMetadata.pendingUserMessage;
6538
6688
  return {
6539
6689
  ...requirements,
6540
6690
  samples: newSamples,
6541
- metadata: newMetadata,
6691
+ _metadata: newMetadata,
6542
6692
  };
6543
6693
  }
6544
6694
  return requirements;
@@ -6786,8 +6936,8 @@ class UserMessageCommitmentDefinition extends BaseCommitmentDefinition {
6786
6936
  applyToAgentModelRequirements(requirements, content) {
6787
6937
  return {
6788
6938
  ...requirements,
6789
- metadata: {
6790
- ...requirements.metadata,
6939
+ _metadata: {
6940
+ ...requirements._metadata,
6791
6941
  pendingUserMessage: content,
6792
6942
  },
6793
6943
  };
@@ -7645,11 +7795,7 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
7645
7795
  if (trimmedContent === '') {
7646
7796
  return requirements;
7647
7797
  }
7648
- // Return requirements with updated notes but no changes to system message
7649
- return {
7650
- ...requirements,
7651
- notes: [...(requirements.notes || []), trimmedContent],
7652
- };
7798
+ return requirements;
7653
7799
  }
7654
7800
  }
7655
7801
  /**
@@ -7711,12 +7857,12 @@ class OpenCommitmentDefinition extends BaseCommitmentDefinition {
7711
7857
  // Since OPEN is default, we can just ensure isClosed is false
7712
7858
  // But to be explicit we can set it
7713
7859
  const updatedMetadata = {
7714
- ...requirements.metadata,
7860
+ ...requirements._metadata,
7715
7861
  isClosed: false,
7716
7862
  };
7717
7863
  return {
7718
7864
  ...requirements,
7719
- metadata: updatedMetadata,
7865
+ _metadata: updatedMetadata,
7720
7866
  };
7721
7867
  }
7722
7868
  }
@@ -7797,7 +7943,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
7797
7943
  return requirements;
7798
7944
  }
7799
7945
  // Get existing persona content from metadata
7800
- const existingPersonaContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
7946
+ const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
7801
7947
  // Merge the new content with existing persona content
7802
7948
  // When multiple PERSONA commitments exist, they are merged into one
7803
7949
  const mergedPersonaContent = existingPersonaContent
@@ -7805,12 +7951,12 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
7805
7951
  : trimmedContent;
7806
7952
  // Store the merged persona content in metadata for debugging and inspection
7807
7953
  const updatedMetadata = {
7808
- ...requirements.metadata,
7954
+ ...requirements._metadata,
7809
7955
  PERSONA: mergedPersonaContent,
7810
7956
  };
7811
7957
  // Get the agent name from metadata (which should contain the first line of agent source)
7812
7958
  // If not available, extract from current system message as fallback
7813
- let agentName = (_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.agentName;
7959
+ let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
7814
7960
  if (!agentName) {
7815
7961
  // Fallback: extract from current system message
7816
7962
  const currentMessage = requirements.systemMessage.trim();
@@ -7857,7 +8003,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
7857
8003
  return {
7858
8004
  ...requirements,
7859
8005
  systemMessage: newSystemMessage,
7860
- metadata: updatedMetadata,
8006
+ _metadata: updatedMetadata,
7861
8007
  };
7862
8008
  }
7863
8009
  }
@@ -7940,7 +8086,16 @@ class RuleCommitmentDefinition extends BaseCommitmentDefinition {
7940
8086
  }
7941
8087
  // Add rule to the system message
7942
8088
  const ruleSection = `Rule: ${trimmedContent}`;
7943
- return this.appendToSystemMessage(requirements, ruleSection, '\n\n');
8089
+ const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
8090
+ const ruleLines = trimmedContent
8091
+ .split(/\r?\n/)
8092
+ .map((line) => line.trim())
8093
+ .filter(Boolean)
8094
+ .map((line) => `- ${line}`);
8095
+ if (ruleLines.length === 0) {
8096
+ return requirementsWithRule;
8097
+ }
8098
+ return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
7944
8099
  }
7945
8100
  }
7946
8101
  /**
@@ -8446,7 +8601,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
8446
8601
  if (teammates.length === 0) {
8447
8602
  return requirements;
8448
8603
  }
8449
- const agentName = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
8604
+ const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
8450
8605
  const teamEntries = teammates.map((teammate) => ({
8451
8606
  toolName: createTeamToolName(teammate.url),
8452
8607
  teammate,
@@ -8486,7 +8641,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
8486
8641
  },
8487
8642
  });
8488
8643
  }
8489
- const existingTeammates = ((_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
8644
+ const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
8490
8645
  const updatedTeammates = [...existingTeammates];
8491
8646
  for (const entry of teamEntries) {
8492
8647
  if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
@@ -8515,8 +8670,8 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
8515
8670
  return this.appendToSystemMessage({
8516
8671
  ...requirements,
8517
8672
  tools: updatedTools,
8518
- metadata: {
8519
- ...requirements.metadata,
8673
+ _metadata: {
8674
+ ...requirements._metadata,
8520
8675
  teammates: updatedTeammates,
8521
8676
  },
8522
8677
  }, teamSystemMessage);
@@ -8616,11 +8771,16 @@ function createTeamToolFunction(entry) {
8616
8771
  const request = buildTeammateRequest(message, args.context);
8617
8772
  let response = '';
8618
8773
  let error = null;
8774
+ let toolCalls;
8619
8775
  try {
8620
8776
  const remoteAgent = await getRemoteTeammateAgent(entry.teammate.url);
8621
8777
  const prompt = buildTeammatePrompt(request);
8622
8778
  const teammateResult = await remoteAgent.callChatModel(prompt);
8623
8779
  response = teammateResult.content || '';
8780
+ toolCalls =
8781
+ 'toolCalls' in teammateResult && Array.isArray(teammateResult.toolCalls)
8782
+ ? teammateResult.toolCalls
8783
+ : undefined;
8624
8784
  }
8625
8785
  catch (err) {
8626
8786
  error = err instanceof Error ? err.message : String(err);
@@ -8630,6 +8790,7 @@ function createTeamToolFunction(entry) {
8630
8790
  teammate: teammateMetadata,
8631
8791
  request,
8632
8792
  response: teammateReply,
8793
+ toolCalls: toolCalls && toolCalls.length > 0 ? toolCalls : undefined,
8633
8794
  error,
8634
8795
  conversation: [
8635
8796
  {
@@ -8742,7 +8903,7 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
8742
8903
  if (!trimmedContent) {
8743
8904
  // Store template mode flag in metadata
8744
8905
  const updatedMetadata = {
8745
- ...requirements.metadata,
8906
+ ...requirements._metadata,
8746
8907
  templateMode: true,
8747
8908
  };
8748
8909
  // Add a general instruction about using structured templates
@@ -8752,21 +8913,21 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
8752
8913
  `);
8753
8914
  return {
8754
8915
  ...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
8755
- metadata: updatedMetadata,
8916
+ _metadata: updatedMetadata,
8756
8917
  };
8757
8918
  }
8758
8919
  // If content is provided, add the specific template instructions
8759
8920
  const templateSection = `Response Template: ${trimmedContent}`;
8760
8921
  // Store the template in metadata for potential programmatic access
8761
- const existingTemplates = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
8922
+ const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
8762
8923
  const updatedMetadata = {
8763
- ...requirements.metadata,
8924
+ ...requirements._metadata,
8764
8925
  templates: [...existingTemplates, trimmedContent],
8765
8926
  templateMode: true,
8766
8927
  };
8767
8928
  return {
8768
8929
  ...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
8769
- metadata: updatedMetadata,
8930
+ _metadata: updatedMetadata,
8770
8931
  };
8771
8932
  }
8772
8933
  }
@@ -9103,8 +9264,8 @@ class UseBrowserCommitmentDefinition extends BaseCommitmentDefinition {
9103
9264
  return this.appendToSystemMessage({
9104
9265
  ...requirements,
9105
9266
  tools: updatedTools,
9106
- metadata: {
9107
- ...requirements.metadata,
9267
+ _metadata: {
9268
+ ...requirements._metadata,
9108
9269
  useBrowser: true,
9109
9270
  },
9110
9271
  }, spaceTrim$1(`
@@ -9333,8 +9494,8 @@ class UseEmailCommitmentDefinition extends BaseCommitmentDefinition {
9333
9494
  return this.appendToSystemMessage({
9334
9495
  ...requirements,
9335
9496
  tools: updatedTools,
9336
- metadata: {
9337
- ...requirements.metadata,
9497
+ _metadata: {
9498
+ ...requirements._metadata,
9338
9499
  useEmail: content || true,
9339
9500
  },
9340
9501
  }, spaceTrim$1((block) => `
@@ -9469,8 +9630,8 @@ class UseImageGeneratorCommitmentDefinition extends BaseCommitmentDefinition {
9469
9630
  return this.appendToSystemMessage({
9470
9631
  ...requirements,
9471
9632
  tools: updatedTools,
9472
- metadata: {
9473
- ...requirements.metadata,
9633
+ _metadata: {
9634
+ ...requirements._metadata,
9474
9635
  useImageGenerator: content || true,
9475
9636
  },
9476
9637
  }, spaceTrim$1(`
@@ -9761,8 +9922,8 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
9761
9922
  return this.appendToSystemMessage({
9762
9923
  ...requirements,
9763
9924
  tools: updatedTools,
9764
- metadata: {
9765
- ...requirements.metadata,
9925
+ _metadata: {
9926
+ ...requirements._metadata,
9766
9927
  useSearchEngine: content || true,
9767
9928
  },
9768
9929
  }, spaceTrim$1((block) => `
@@ -9910,8 +10071,8 @@ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
9910
10071
  return this.appendToSystemMessage({
9911
10072
  ...requirements,
9912
10073
  tools: updatedTools,
9913
- metadata: {
9914
- ...requirements.metadata,
10074
+ _metadata: {
10075
+ ...requirements._metadata,
9915
10076
  },
9916
10077
  }, spaceTrim$1((block) => `
9917
10078
  Time and date context:
@@ -11509,6 +11670,40 @@ function isAssistantPreparationToolCall(toolCall) {
11509
11670
  return toolCall.name === ASSISTANT_PREPARATION_TOOL_CALL_NAME;
11510
11671
  }
11511
11672
 
11673
+ /**
11674
+ * Builds a stable identity string for tool calls across partial updates.
11675
+ *
11676
+ * @param toolCall - Tool call entry to identify.
11677
+ * @returns Stable identity string for deduplication.
11678
+ *
11679
+ * @private function of <Chat/>
11680
+ */
11681
+ function getToolCallIdentity(toolCall) {
11682
+ const rawToolCall = toolCall.rawToolCall;
11683
+ const rawId = (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.id) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.callId) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.call_id);
11684
+ if (rawId) {
11685
+ return `id:${rawId}`;
11686
+ }
11687
+ if (toolCall.createdAt) {
11688
+ return `time:${toolCall.createdAt}:${toolCall.name}`;
11689
+ }
11690
+ const argsKey = (() => {
11691
+ if (typeof toolCall.arguments === 'string') {
11692
+ return toolCall.arguments;
11693
+ }
11694
+ if (!toolCall.arguments) {
11695
+ return '';
11696
+ }
11697
+ try {
11698
+ return JSON.stringify(toolCall.arguments);
11699
+ }
11700
+ catch (_a) {
11701
+ return '';
11702
+ }
11703
+ })();
11704
+ return `fallback:${toolCall.name}:${argsKey}`;
11705
+ }
11706
+
11512
11707
  /*! *****************************************************************************
11513
11708
  Copyright (c) Microsoft Corporation.
11514
11709
 
@@ -15607,11 +15802,14 @@ async function preparePersona(personaDescription, tools, options) {
15607
15802
  function createEmptyAgentModelRequirements() {
15608
15803
  return {
15609
15804
  systemMessage: '',
15805
+ promptSuffix: '',
15610
15806
  // modelName: 'gpt-5',
15611
15807
  modelName: 'gemini-2.5-flash-lite',
15612
15808
  temperature: 0.7,
15613
15809
  topP: 0.9,
15614
15810
  topK: 50,
15811
+ parentAgentUrl: null,
15812
+ isClosed: false,
15615
15813
  };
15616
15814
  }
15617
15815
  /**
@@ -15757,14 +15955,26 @@ function removeCommentsFromSystemMessage(systemMessage) {
15757
15955
  }
15758
15956
 
15759
15957
  /**
15760
- * Creates agent model requirements using the new commitment system
15958
+ * Creates agent model requirements using the new commitment system.
15959
+ *
15761
15960
  * This function uses a reduce-like pattern where each commitment applies its changes
15762
- * to build the final requirements starting from a basic empty model
15961
+ * to build the final requirements starting from a basic empty model.
15763
15962
  *
15764
- * @public exported from `@promptbook/core`
15963
+ * @param agentSource - Agent source book to parse.
15964
+ * @param modelName - Optional override for the agent model name.
15965
+ * @param options - Additional options such as the agent reference resolver.
15966
+ *
15967
+ * @private @@@
15968
+ */
15969
+ const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
15970
+ /**
15971
+ * @@@
15972
+ *
15973
+ * @private @@@
15765
15974
  */
15766
- async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
15975
+ async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
15767
15976
  var _a;
15977
+ const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
15768
15978
  // Parse the agent source to extract commitments
15769
15979
  const parseResult = parseAgentSourceWithCommitments(agentSource);
15770
15980
  // Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
@@ -15801,8 +16011,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
15801
16011
  // Store the agent name in metadata so commitments can access it
15802
16012
  requirements = {
15803
16013
  ...requirements,
15804
- metadata: {
15805
- ...requirements.metadata,
16014
+ _metadata: {
16015
+ ...requirements._metadata,
15806
16016
  agentName: parseResult.agentName,
15807
16017
  },
15808
16018
  };
@@ -15816,6 +16026,11 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
15816
16026
  // Apply each commitment in order using reduce-like pattern
15817
16027
  for (let i = 0; i < filteredCommitments.length; i++) {
15818
16028
  const commitment = filteredCommitments[i];
16029
+ const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
16030
+ let commitmentContent = commitment.content;
16031
+ if (isReferenceCommitment && agentReferenceResolver) {
16032
+ commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
16033
+ }
15819
16034
  // CLOSED commitment should work only if its the last commitment in the book
15820
16035
  if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
15821
16036
  continue;
@@ -15823,7 +16038,7 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
15823
16038
  const definition = getCommitmentDefinition(commitment.type);
15824
16039
  if (definition) {
15825
16040
  try {
15826
- requirements = definition.applyToAgentModelRequirements(requirements, commitment.content);
16041
+ requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
15827
16042
  }
15828
16043
  catch (error) {
15829
16044
  console.warn(`Failed to apply commitment ${commitment.type}:`, error);
@@ -15971,23 +16186,28 @@ function isBinaryMimeType(mimeType) {
15971
16186
  }
15972
16187
 
15973
16188
  /**
15974
- * Creates model requirements for an agent based on its source
16189
+ * Creates model requirements for an agent based on its source.
15975
16190
  *
15976
16191
  * There are 2 similar functions:
15977
16192
  * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
15978
16193
  * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
15979
16194
  *
16195
+ * @param agentSource - Book describing the agent.
16196
+ * @param modelName - Optional override for the agent's model.
16197
+ * @param availableModels - Models that could fulfill the agent.
16198
+ * @param llmTools - Execution tools used when selecting a best model.
16199
+ * @param options - Optional hooks such as the agent reference resolver.
15980
16200
  * @public exported from `@promptbook/core`
15981
16201
  */
15982
- async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
16202
+ async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
15983
16203
  // If availableModels are provided and no specific modelName is given,
15984
16204
  // use preparePersona to select the best model
15985
16205
  if (availableModels && !modelName && llmTools) {
15986
16206
  const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
15987
- return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
16207
+ return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
15988
16208
  }
15989
16209
  // Use the new commitment-based system with provided or default model
15990
- return createAgentModelRequirementsWithCommitments(agentSource, modelName);
16210
+ return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
15991
16211
  }
15992
16212
  /**
15993
16213
  * Selects the best model using the preparePersona function
@@ -16285,6 +16505,66 @@ const OPENAI_MODELS = exportJson({
16285
16505
  },
16286
16506
  /**/
16287
16507
  /**/
16508
+ {
16509
+ modelVariant: 'CHAT',
16510
+ modelTitle: 'gpt-5.2-codex',
16511
+ modelName: 'gpt-5.2-codex',
16512
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
16513
+ pricing: {
16514
+ prompt: pricing(`$1.75 / 1M tokens`),
16515
+ output: pricing(`$14.00 / 1M tokens`),
16516
+ },
16517
+ },
16518
+ /**/
16519
+ /**/
16520
+ {
16521
+ modelVariant: 'CHAT',
16522
+ modelTitle: 'gpt-5.1-codex-max',
16523
+ modelName: 'gpt-5.1-codex-max',
16524
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
16525
+ pricing: {
16526
+ prompt: pricing(`$1.25 / 1M tokens`),
16527
+ output: pricing(`$10.00 / 1M tokens`),
16528
+ },
16529
+ },
16530
+ /**/
16531
+ /**/
16532
+ {
16533
+ modelVariant: 'CHAT',
16534
+ modelTitle: 'gpt-5.1-codex',
16535
+ modelName: 'gpt-5.1-codex',
16536
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
16537
+ pricing: {
16538
+ prompt: pricing(`$1.25 / 1M tokens`),
16539
+ output: pricing(`$10.00 / 1M tokens`),
16540
+ },
16541
+ },
16542
+ /**/
16543
+ /**/
16544
+ {
16545
+ modelVariant: 'CHAT',
16546
+ modelTitle: 'gpt-5.1-codex-mini',
16547
+ modelName: 'gpt-5.1-codex-mini',
16548
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
16549
+ pricing: {
16550
+ prompt: pricing(`$0.25 / 1M tokens`),
16551
+ output: pricing(`$2.00 / 1M tokens`),
16552
+ },
16553
+ },
16554
+ /**/
16555
+ /**/
16556
+ {
16557
+ modelVariant: 'CHAT',
16558
+ modelTitle: 'gpt-5-codex',
16559
+ modelName: 'gpt-5-codex',
16560
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
16561
+ pricing: {
16562
+ prompt: pricing(`$1.25 / 1M tokens`),
16563
+ output: pricing(`$10.00 / 1M tokens`),
16564
+ },
16565
+ },
16566
+ /**/
16567
+ /**/
16288
16568
  {
16289
16569
  modelVariant: 'CHAT',
16290
16570
  modelTitle: 'gpt-5-mini',
@@ -16989,6 +17269,32 @@ function isUnsupportedParameterError(error) {
16989
17269
  errorMessage.includes('does not support'));
16990
17270
  }
16991
17271
 
17272
+ /**
17273
+ * Provides access to the structured clone implementation when available.
17274
+ */
17275
+ function getStructuredCloneFunction() {
17276
+ return globalThis.structuredClone;
17277
+ }
17278
+ /**
17279
+ * Checks whether the prompt is a chat prompt that carries file attachments.
17280
+ */
17281
+ function hasChatPromptFiles(prompt) {
17282
+ return 'files' in prompt && Array.isArray(prompt.files);
17283
+ }
17284
+ /**
17285
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
17286
+ */
17287
+ function clonePromptPreservingFiles(prompt) {
17288
+ const structuredCloneFn = getStructuredCloneFunction();
17289
+ if (typeof structuredCloneFn === 'function') {
17290
+ return structuredCloneFn(prompt);
17291
+ }
17292
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
17293
+ if (hasChatPromptFiles(prompt)) {
17294
+ clonedPrompt.files = prompt.files;
17295
+ }
17296
+ return clonedPrompt;
17297
+ }
16992
17298
  /**
16993
17299
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
16994
17300
  *
@@ -17018,16 +17324,11 @@ class OpenAiCompatibleExecutionTools {
17018
17324
  const openAiOptions = { ...this.options };
17019
17325
  delete openAiOptions.isVerbose;
17020
17326
  delete openAiOptions.userId;
17021
- // Enhanced configuration for better ECONNRESET handling
17327
+ // Enhanced configuration with retries and timeouts.
17022
17328
  const enhancedOptions = {
17023
17329
  ...openAiOptions,
17024
17330
  timeout: API_REQUEST_TIMEOUT,
17025
17331
  maxRetries: CONNECTION_RETRIES_LIMIT,
17026
- defaultHeaders: {
17027
- Connection: 'keep-alive',
17028
- 'Keep-Alive': 'timeout=30, max=100',
17029
- ...openAiOptions.defaultHeaders,
17030
- },
17031
17332
  };
17032
17333
  this.client = new OpenAI(enhancedOptions);
17033
17334
  }
@@ -17078,7 +17379,7 @@ class OpenAiCompatibleExecutionTools {
17078
17379
  */
17079
17380
  async callChatModelStream(prompt, onProgress) {
17080
17381
  // Deep clone prompt and modelRequirements to avoid mutation across calls
17081
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
17382
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
17082
17383
  // Use local Set for retried parameters to ensure independence and thread safety
17083
17384
  const retriedUnsupportedParameters = new Set();
17084
17385
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -17105,7 +17406,10 @@ class OpenAiCompatibleExecutionTools {
17105
17406
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
17106
17407
  // <- Note: [🧆]
17107
17408
  }; // <- TODO: [💩] Guard here types better
17108
- if (format === 'JSON') {
17409
+ if (currentModelRequirements.responseFormat !== undefined) {
17410
+ modelSettings.response_format = currentModelRequirements.responseFormat;
17411
+ }
17412
+ else if (format === 'JSON') {
17109
17413
  modelSettings.response_format = {
17110
17414
  type: 'json_object',
17111
17415
  };
@@ -17916,18 +18220,6 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
17916
18220
  get profile() {
17917
18221
  return OPENAI_PROVIDER_PROFILE;
17918
18222
  }
17919
- /*
17920
- Note: Commenting this out to avoid circular dependency
17921
- /**
17922
- * Create (sub)tools for calling OpenAI API Assistants
17923
- *
17924
- * @param assistantId Which assistant to use
17925
- * @returns Tools for calling OpenAI API Assistants with same token
17926
- * /
17927
- public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
17928
- return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
17929
- }
17930
- */
17931
18223
  /**
17932
18224
  * List all available models (non dynamically)
17933
18225
  *
@@ -17962,206 +18254,1259 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
17962
18254
  }
17963
18255
  }
17964
18256
 
18257
+ const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
18258
+ const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
18259
+ const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
18260
+ const VECTOR_STORE_STALL_LOG_THRESHOLD_MS = 30000;
17965
18261
  /**
17966
- * Execution Tools for calling OpenAI API using the Responses API (Agents)
18262
+ * Base class for OpenAI execution tools that need hosted vector stores.
17967
18263
  *
17968
18264
  * @public exported from `@promptbook/openai`
17969
18265
  */
17970
- class OpenAiAgentExecutionTools extends OpenAiExecutionTools {
17971
- constructor(options) {
17972
- super(options);
17973
- this.vectorStoreId = options.vectorStoreId;
18266
+ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
18267
+ /**
18268
+ * Returns the per-knowledge-source download timeout in milliseconds.
18269
+ */
18270
+ getKnowledgeSourceDownloadTimeoutMs() {
18271
+ var _a;
18272
+ return (_a = this.vectorStoreOptions.knowledgeSourceDownloadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS;
17974
18273
  }
17975
- get title() {
17976
- return 'OpenAI Agent';
18274
+ /**
18275
+ * Returns the max concurrency for knowledge source uploads.
18276
+ */
18277
+ getKnowledgeSourceUploadMaxConcurrency() {
18278
+ var _a;
18279
+ return (_a = this.vectorStoreOptions.knowledgeSourceUploadMaxConcurrency) !== null && _a !== void 0 ? _a : 5;
17977
18280
  }
17978
- get description() {
17979
- return 'Use OpenAI Responses API (Agentic)';
18281
+ /**
18282
+ * Returns the polling interval in milliseconds for vector store uploads.
18283
+ */
18284
+ getKnowledgeSourceUploadPollIntervalMs() {
18285
+ var _a;
18286
+ return (_a = this.vectorStoreOptions.knowledgeSourceUploadPollIntervalMs) !== null && _a !== void 0 ? _a : 5000;
17980
18287
  }
17981
18288
  /**
17982
- * Calls OpenAI API to use a chat model with streaming.
18289
+ * Returns the overall upload timeout in milliseconds for vector store uploads.
17983
18290
  */
17984
- async callChatModelStream(prompt, onProgress) {
18291
+ getKnowledgeSourceUploadTimeoutMs() {
18292
+ var _a;
18293
+ return (_a = this.vectorStoreOptions.knowledgeSourceUploadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS;
18294
+ }
18295
+ /**
18296
+ * Returns true if we should continue even if vector store ingestion stalls.
18297
+ */
18298
+ shouldContinueOnVectorStoreStall() {
18299
+ var _a;
18300
+ return (_a = this.vectorStoreOptions.shouldContinueOnVectorStoreStall) !== null && _a !== void 0 ? _a : true;
18301
+ }
18302
+ /**
18303
+ * Returns vector-store-specific options with extended settings.
18304
+ */
18305
+ get vectorStoreOptions() {
18306
+ return this.options;
18307
+ }
18308
+ /**
18309
+ * Returns the OpenAI vector stores API surface, supporting stable and beta SDKs.
18310
+ */
18311
+ getVectorStoresApi(client) {
18312
+ var _a, _b;
18313
+ const vectorStores = (_a = client.vectorStores) !== null && _a !== void 0 ? _a : (_b = client.beta) === null || _b === void 0 ? void 0 : _b.vectorStores;
18314
+ if (!vectorStores) {
18315
+ throw new Error('OpenAI client does not support vector stores. Please ensure you are using a compatible version of the OpenAI SDK with vector store support.');
18316
+ }
18317
+ return vectorStores;
18318
+ }
18319
+ /**
18320
+ * Downloads a knowledge source URL into a File for vector store upload.
18321
+ */
18322
+ async downloadKnowledgeSourceFile(options) {
18323
+ var _a;
18324
+ const { source, timeoutMs, logLabel } = options;
18325
+ const startedAtMs = Date.now();
18326
+ const controller = new AbortController();
18327
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
17985
18328
  if (this.options.isVerbose) {
17986
- console.info('💬 OpenAI Agent callChatModel call', { prompt });
18329
+ console.info('[🤰]', 'Downloading knowledge source', {
18330
+ source,
18331
+ timeoutMs,
18332
+ logLabel,
18333
+ });
17987
18334
  }
17988
- const { content, parameters, modelRequirements } = prompt;
17989
- const client = await this.getClient();
17990
- if (modelRequirements.modelVariant !== 'CHAT') {
17991
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
18335
+ try {
18336
+ const response = await fetch(source, { signal: controller.signal });
18337
+ const contentType = (_a = response.headers.get('content-type')) !== null && _a !== void 0 ? _a : undefined;
18338
+ if (!response.ok) {
18339
+ console.error('[🤰]', 'Failed to download knowledge source', {
18340
+ source,
18341
+ status: response.status,
18342
+ statusText: response.statusText,
18343
+ contentType,
18344
+ elapsedMs: Date.now() - startedAtMs,
18345
+ logLabel,
18346
+ });
18347
+ return null;
18348
+ }
18349
+ const buffer = await response.arrayBuffer();
18350
+ let filename = source.split('/').pop() || 'downloaded-file';
18351
+ try {
18352
+ const url = new URL(source);
18353
+ filename = url.pathname.split('/').pop() || filename;
18354
+ }
18355
+ catch (error) {
18356
+ // Keep default filename
18357
+ }
18358
+ const file = new File([buffer], filename, contentType ? { type: contentType } : undefined);
18359
+ const elapsedMs = Date.now() - startedAtMs;
18360
+ const sizeBytes = buffer.byteLength;
18361
+ if (this.options.isVerbose) {
18362
+ console.info('[🤰]', 'Downloaded knowledge source', {
18363
+ source,
18364
+ filename,
18365
+ sizeBytes,
18366
+ contentType,
18367
+ elapsedMs,
18368
+ logLabel,
18369
+ });
18370
+ }
18371
+ return { file, sizeBytes, filename, elapsedMs };
17992
18372
  }
17993
- const rawPromptContent = templateParameters(content, {
17994
- ...parameters,
17995
- modelName: 'agent',
17996
- });
17997
- // Build input items
17998
- const input = []; // TODO: Type properly when OpenAI types are updated
17999
- // Add previous messages from thread (if any)
18000
- if ('thread' in prompt && Array.isArray(prompt.thread)) {
18001
- const previousMessages = prompt.thread.map((msg) => ({
18002
- role: msg.sender === 'assistant' ? 'assistant' : 'user',
18003
- content: msg.content,
18004
- }));
18005
- input.push(...previousMessages);
18373
+ catch (error) {
18374
+ assertsError(error);
18375
+ console.error('[🤰]', 'Error downloading knowledge source', {
18376
+ source,
18377
+ elapsedMs: Date.now() - startedAtMs,
18378
+ logLabel,
18379
+ error: serializeError(error),
18380
+ });
18381
+ return null;
18006
18382
  }
18007
- // Add current user message
18008
- input.push({
18009
- role: 'user',
18010
- content: rawPromptContent,
18011
- });
18012
- // Prepare tools
18013
- const tools = modelRequirements.tools ? mapToolsToOpenAi(modelRequirements.tools) : undefined;
18014
- // Add file_search if vector store is present
18015
- const agentTools = tools ? [...tools] : [];
18016
- let toolResources = undefined;
18017
- if (this.vectorStoreId) {
18018
- agentTools.push({ type: 'file_search' });
18019
- toolResources = {
18020
- file_search: {
18021
- vector_store_ids: [this.vectorStoreId],
18022
- },
18383
+ finally {
18384
+ clearTimeout(timeoutId);
18385
+ }
18386
+ }
18387
+ /**
18388
+ * Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
18389
+ */
18390
+ async logVectorStoreFileBatchDiagnostics(options) {
18391
+ var _a, _b, _c, _d, _e;
18392
+ const { client, vectorStoreId, batchId, uploadedFiles, logLabel, reason } = options;
18393
+ if (reason === 'stalled' && !this.options.isVerbose) {
18394
+ return;
18395
+ }
18396
+ if (!batchId.startsWith('vsfb_')) {
18397
+ console.error('[🤰]', 'Vector store file batch diagnostics skipped (invalid batch id)', {
18398
+ vectorStoreId,
18399
+ batchId,
18400
+ reason,
18401
+ logLabel,
18402
+ });
18403
+ return;
18404
+ }
18405
+ const fileIdToMetadata = new Map();
18406
+ for (const file of uploadedFiles) {
18407
+ fileIdToMetadata.set(file.fileId, file);
18408
+ }
18409
+ try {
18410
+ const vectorStores = this.getVectorStoresApi(client);
18411
+ const limit = Math.min(100, Math.max(10, uploadedFiles.length));
18412
+ const batchFilesPage = await vectorStores.fileBatches.listFiles(batchId, {
18413
+ vector_store_id: vectorStoreId,
18414
+ limit,
18415
+ });
18416
+ const batchFiles = (_a = batchFilesPage.data) !== null && _a !== void 0 ? _a : [];
18417
+ const statusCounts = {
18418
+ in_progress: 0,
18419
+ completed: 0,
18420
+ failed: 0,
18421
+ cancelled: 0,
18422
+ };
18423
+ const errorSamples = [];
18424
+ const inProgressSamples = [];
18425
+ const batchFileIds = new Set();
18426
+ for (const file of batchFiles) {
18427
+ const status = (_b = file.status) !== null && _b !== void 0 ? _b : 'unknown';
18428
+ statusCounts[status] = ((_c = statusCounts[status]) !== null && _c !== void 0 ? _c : 0) + 1;
18429
+ const vectorStoreFileId = file.id;
18430
+ const uploadedFileId = (_d = file.file_id) !== null && _d !== void 0 ? _d : file.fileId;
18431
+ const fileId = uploadedFileId !== null && uploadedFileId !== void 0 ? uploadedFileId : vectorStoreFileId;
18432
+ batchFileIds.add(fileId);
18433
+ const metadata = fileIdToMetadata.get(fileId);
18434
+ if (status === 'failed') {
18435
+ errorSamples.push({
18436
+ fileId,
18437
+ status,
18438
+ error: (_e = file.last_error) === null || _e === void 0 ? void 0 : _e.message,
18439
+ filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
18440
+ vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
18441
+ });
18442
+ }
18443
+ if (status === 'in_progress') {
18444
+ inProgressSamples.push({
18445
+ fileId,
18446
+ filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
18447
+ vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
18448
+ });
18449
+ }
18450
+ }
18451
+ const missingSamples = uploadedFiles
18452
+ .filter((file) => !batchFileIds.has(file.fileId))
18453
+ .slice(0, 5)
18454
+ .map((file) => ({
18455
+ fileId: file.fileId,
18456
+ filename: file.filename,
18457
+ sizeBytes: file.sizeBytes,
18458
+ }));
18459
+ const vectorStore = await vectorStores.retrieve(vectorStoreId);
18460
+ const logPayload = {
18461
+ vectorStoreId,
18462
+ batchId,
18463
+ reason,
18464
+ vectorStoreStatus: vectorStore.status,
18465
+ vectorStoreFileCounts: vectorStore.file_counts,
18466
+ vectorStoreUsageBytes: vectorStore.usage_bytes,
18467
+ batchFileCount: batchFiles.length,
18468
+ statusCounts,
18469
+ errorSamples: errorSamples.slice(0, 5),
18470
+ inProgressSamples,
18471
+ missingFileCount: uploadedFiles.length - batchFileIds.size,
18472
+ missingSamples,
18473
+ logLabel,
18023
18474
  };
18475
+ const logFunction = reason === 'stalled' ? console.info : console.error;
18476
+ logFunction('[🤰]', 'Vector store file batch diagnostics', logPayload);
18477
+ }
18478
+ catch (error) {
18479
+ assertsError(error);
18480
+ console.error('[🤰]', 'Vector store file batch diagnostics failed', {
18481
+ vectorStoreId,
18482
+ batchId,
18483
+ reason,
18484
+ logLabel,
18485
+ error: serializeError(error),
18486
+ });
18487
+ }
18488
+ }
18489
+ /**
18490
+ * Uploads knowledge source files to the vector store and polls until processing completes.
18491
+ */
18492
+ async uploadKnowledgeSourceFilesToVectorStore(options) {
18493
+ var _a, _b, _c, _d, _e, _f;
18494
+ const { client, vectorStoreId, files, totalBytes, logLabel } = options;
18495
+ const vectorStores = this.getVectorStoresApi(client);
18496
+ const uploadStartedAtMs = Date.now();
18497
+ const maxConcurrency = Math.max(1, this.getKnowledgeSourceUploadMaxConcurrency());
18498
+ const pollIntervalMs = Math.max(1000, this.getKnowledgeSourceUploadPollIntervalMs());
18499
+ const uploadTimeoutMs = Math.max(1000, this.getKnowledgeSourceUploadTimeoutMs());
18500
+ if (this.options.isVerbose) {
18501
+ console.info('[🤰]', 'Uploading knowledge source files to OpenAI', {
18502
+ vectorStoreId,
18503
+ fileCount: files.length,
18504
+ totalBytes,
18505
+ maxConcurrency,
18506
+ pollIntervalMs,
18507
+ uploadTimeoutMs,
18508
+ logLabel,
18509
+ });
18510
+ }
18511
+ const fileTypeSummary = {};
18512
+ for (const file of files) {
18513
+ const filename = (_a = file.name) !== null && _a !== void 0 ? _a : '';
18514
+ const extension = filename.includes('.')
18515
+ ? (_c = (_b = filename.split('.').pop()) === null || _b === void 0 ? void 0 : _b.toLowerCase()) !== null && _c !== void 0 ? _c : 'unknown'
18516
+ : 'unknown';
18517
+ const sizeBytes = typeof file.size === 'number' ? file.size : 0;
18518
+ const summary = (_d = fileTypeSummary[extension]) !== null && _d !== void 0 ? _d : { count: 0, totalBytes: 0 };
18519
+ summary.count += 1;
18520
+ summary.totalBytes += sizeBytes;
18521
+ fileTypeSummary[extension] = summary;
18522
+ }
18523
+ if (this.options.isVerbose) {
18524
+ console.info('[🤰]', 'Knowledge source file summary', {
18525
+ vectorStoreId,
18526
+ fileCount: files.length,
18527
+ totalBytes,
18528
+ fileTypeSummary,
18529
+ logLabel,
18530
+ });
18531
+ }
18532
+ const fileEntries = files.map((file, index) => ({ file, index }));
18533
+ const fileIterator = fileEntries.values();
18534
+ const fileIds = [];
18535
+ const uploadedFiles = [];
18536
+ const failedUploads = [];
18537
+ let uploadedCount = 0;
18538
+ const processFiles = async (iterator) => {
18539
+ var _a, _b;
18540
+ for (const { file, index } of iterator) {
18541
+ const uploadIndex = index + 1;
18542
+ const filename = file.name || `knowledge-source-${uploadIndex}`;
18543
+ const extension = filename.includes('.')
18544
+ ? (_b = (_a = filename.split('.').pop()) === null || _a === void 0 ? void 0 : _a.toLowerCase()) !== null && _b !== void 0 ? _b : 'unknown'
18545
+ : 'unknown';
18546
+ const sizeBytes = typeof file.size === 'number' ? file.size : undefined;
18547
+ const fileUploadStartedAtMs = Date.now();
18548
+ if (this.options.isVerbose) {
18549
+ console.info('[🤰]', 'Uploading knowledge source file', {
18550
+ index: uploadIndex,
18551
+ total: files.length,
18552
+ filename,
18553
+ extension,
18554
+ sizeBytes,
18555
+ logLabel,
18556
+ });
18557
+ }
18558
+ try {
18559
+ const uploaded = await client.files.create({ file, purpose: 'assistants' });
18560
+ fileIds.push(uploaded.id);
18561
+ uploadedFiles.push({ fileId: uploaded.id, filename, sizeBytes });
18562
+ uploadedCount += 1;
18563
+ if (this.options.isVerbose) {
18564
+ console.info('[🤰]', 'Uploaded knowledge source file', {
18565
+ index: uploadIndex,
18566
+ total: files.length,
18567
+ filename,
18568
+ sizeBytes,
18569
+ fileId: uploaded.id,
18570
+ elapsedMs: Date.now() - fileUploadStartedAtMs,
18571
+ logLabel,
18572
+ });
18573
+ }
18574
+ }
18575
+ catch (error) {
18576
+ assertsError(error);
18577
+ const serializedError = serializeError(error);
18578
+ failedUploads.push({ index: uploadIndex, filename, error: serializedError });
18579
+ console.error('[🤰]', 'Failed to upload knowledge source file', {
18580
+ index: uploadIndex,
18581
+ total: files.length,
18582
+ filename,
18583
+ sizeBytes,
18584
+ elapsedMs: Date.now() - fileUploadStartedAtMs,
18585
+ logLabel,
18586
+ error: serializedError,
18587
+ });
18588
+ }
18589
+ }
18590
+ };
18591
+ const workerCount = Math.min(maxConcurrency, files.length);
18592
+ const workers = Array.from({ length: workerCount }, () => processFiles(fileIterator));
18593
+ await Promise.all(workers);
18594
+ if (this.options.isVerbose) {
18595
+ console.info('[🤰]', 'Finished uploading knowledge source files', {
18596
+ vectorStoreId,
18597
+ fileCount: files.length,
18598
+ uploadedCount,
18599
+ failedCount: failedUploads.length,
18600
+ elapsedMs: Date.now() - uploadStartedAtMs,
18601
+ failedSamples: failedUploads.slice(0, 3),
18602
+ logLabel,
18603
+ });
18604
+ }
18605
+ if (fileIds.length === 0) {
18606
+ console.error('[🤰]', 'No knowledge source files were uploaded', {
18607
+ vectorStoreId,
18608
+ fileCount: files.length,
18609
+ failedCount: failedUploads.length,
18610
+ logLabel,
18611
+ });
18612
+ return null;
18613
+ }
18614
+ const batch = await vectorStores.fileBatches.create(vectorStoreId, {
18615
+ file_ids: fileIds,
18616
+ });
18617
+ const expectedBatchId = batch.id;
18618
+ const expectedBatchIdValid = expectedBatchId.startsWith('vsfb_');
18619
+ if (!expectedBatchIdValid) {
18620
+ console.error('[🤰]', 'Vector store file batch id looks invalid', {
18621
+ vectorStoreId,
18622
+ batchId: expectedBatchId,
18623
+ batchVectorStoreId: batch.vector_store_id,
18624
+ logLabel,
18625
+ });
18626
+ }
18627
+ else if (batch.vector_store_id !== vectorStoreId) {
18628
+ console.error('[🤰]', 'Vector store file batch vector store id mismatch', {
18629
+ vectorStoreId,
18630
+ batchId: expectedBatchId,
18631
+ batchVectorStoreId: batch.vector_store_id,
18632
+ logLabel,
18633
+ });
18634
+ }
18635
+ if (this.options.isVerbose) {
18636
+ console.info('[🤰]', 'Created vector store file batch', {
18637
+ vectorStoreId,
18638
+ batchId: expectedBatchId,
18639
+ fileCount: fileIds.length,
18640
+ logLabel,
18641
+ });
18642
+ }
18643
+ const pollStartedAtMs = Date.now();
18644
+ const progressLogIntervalMs = Math.max(VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS, pollIntervalMs);
18645
+ const diagnosticsIntervalMs = Math.max(60000, pollIntervalMs * 5);
18646
+ // let lastStatus: string | undefined;
18647
+ let lastCountsKey = '';
18648
+ let lastProgressKey = '';
18649
+ let lastLogAtMs = 0;
18650
+ let lastProgressAtMs = pollStartedAtMs;
18651
+ let lastDiagnosticsAtMs = pollStartedAtMs;
18652
+ let latestBatch = batch;
18653
+ let loggedBatchIdMismatch = false;
18654
+ let loggedBatchIdFallback = false;
18655
+ let loggedBatchIdInvalid = false;
18656
+ let shouldPoll = true;
18657
+ while (shouldPoll) {
18658
+ const nowMs = Date.now();
18659
+ // [🤰] Note: Sometimes OpenAI returns Vector Store object instead of Batch object, or IDs get swapped.
18660
+ const rawBatchId = typeof latestBatch.id === 'string' ? latestBatch.id : '';
18661
+ const rawVectorStoreId = latestBatch.vector_store_id;
18662
+ let returnedBatchId = rawBatchId;
18663
+ let returnedBatchIdValid = typeof returnedBatchId === 'string' && returnedBatchId.startsWith('vsfb_');
18664
+ if (!returnedBatchIdValid && expectedBatchIdValid) {
18665
+ if (!loggedBatchIdFallback) {
18666
+ console.error('[🤰]', 'Vector store file batch id missing from response; falling back to expected', {
18667
+ vectorStoreId,
18668
+ expectedBatchId,
18669
+ returnedBatchId,
18670
+ rawVectorStoreId,
18671
+ logLabel,
18672
+ });
18673
+ loggedBatchIdFallback = true;
18674
+ }
18675
+ returnedBatchId = expectedBatchId;
18676
+ returnedBatchIdValid = true;
18677
+ }
18678
+ if (!returnedBatchIdValid && !loggedBatchIdInvalid) {
18679
+ console.error('[🤰]', 'Vector store file batch id is invalid; stopping polling', {
18680
+ vectorStoreId,
18681
+ expectedBatchId,
18682
+ returnedBatchId,
18683
+ rawVectorStoreId,
18684
+ logLabel,
18685
+ });
18686
+ loggedBatchIdInvalid = true;
18687
+ }
18688
+ const batchIdMismatch = expectedBatchIdValid && returnedBatchIdValid && returnedBatchId !== expectedBatchId;
18689
+ if (batchIdMismatch && !loggedBatchIdMismatch) {
18690
+ console.error('[🤰]', 'Vector store file batch id mismatch', {
18691
+ vectorStoreId,
18692
+ expectedBatchId,
18693
+ returnedBatchId,
18694
+ logLabel,
18695
+ });
18696
+ loggedBatchIdMismatch = true;
18697
+ }
18698
+ if (returnedBatchIdValid) {
18699
+ latestBatch = await vectorStores.fileBatches.retrieve(returnedBatchId, {
18700
+ vector_store_id: vectorStoreId,
18701
+ });
18702
+ }
18703
+ else {
18704
+ shouldPoll = false;
18705
+ continue;
18706
+ }
18707
+ const status = (_e = latestBatch.status) !== null && _e !== void 0 ? _e : 'unknown';
18708
+ const fileCounts = (_f = latestBatch.file_counts) !== null && _f !== void 0 ? _f : {};
18709
+ const progressKey = JSON.stringify(fileCounts);
18710
+ const statusCountsKey = `${status}-${progressKey}`;
18711
+ const isProgressing = progressKey !== lastProgressKey;
18712
+ if (isProgressing) {
18713
+ lastProgressAtMs = nowMs;
18714
+ lastProgressKey = progressKey;
18715
+ }
18716
+ if (this.options.isVerbose &&
18717
+ (statusCountsKey !== lastCountsKey || nowMs - lastLogAtMs >= progressLogIntervalMs)) {
18718
+ console.info('[🤰]', 'Vector store file batch status', {
18719
+ vectorStoreId,
18720
+ batchId: returnedBatchId,
18721
+ status,
18722
+ fileCounts,
18723
+ elapsedMs: nowMs - pollStartedAtMs,
18724
+ logLabel,
18725
+ });
18726
+ lastCountsKey = statusCountsKey;
18727
+ lastLogAtMs = nowMs;
18728
+ }
18729
+ if (status === 'in_progress' &&
18730
+ nowMs - lastProgressAtMs >= VECTOR_STORE_STALL_LOG_THRESHOLD_MS &&
18731
+ nowMs - lastDiagnosticsAtMs >= diagnosticsIntervalMs) {
18732
+ lastDiagnosticsAtMs = nowMs;
18733
+ await this.logVectorStoreFileBatchDiagnostics({
18734
+ client,
18735
+ vectorStoreId,
18736
+ batchId: returnedBatchId,
18737
+ uploadedFiles,
18738
+ logLabel,
18739
+ reason: 'stalled',
18740
+ });
18741
+ }
18742
+ if (status === 'completed') {
18743
+ if (this.options.isVerbose) {
18744
+ console.info('[🤰]', 'Vector store file batch completed', {
18745
+ vectorStoreId,
18746
+ batchId: returnedBatchId,
18747
+ fileCounts,
18748
+ elapsedMs: nowMs - pollStartedAtMs,
18749
+ logLabel,
18750
+ });
18751
+ }
18752
+ shouldPoll = false;
18753
+ continue;
18754
+ }
18755
+ if (status === 'failed') {
18756
+ console.error('[🤰]', 'Vector store file batch completed with failures', {
18757
+ vectorStoreId,
18758
+ batchId: returnedBatchId,
18759
+ fileCounts,
18760
+ elapsedMs: nowMs - pollStartedAtMs,
18761
+ logLabel,
18762
+ });
18763
+ await this.logVectorStoreFileBatchDiagnostics({
18764
+ client,
18765
+ vectorStoreId,
18766
+ batchId: returnedBatchId,
18767
+ uploadedFiles,
18768
+ logLabel,
18769
+ reason: 'failed',
18770
+ });
18771
+ shouldPoll = false;
18772
+ continue;
18773
+ }
18774
+ if (status === 'cancelled') {
18775
+ console.error('[🤰]', 'Vector store file batch did not complete', {
18776
+ vectorStoreId,
18777
+ batchId: returnedBatchId,
18778
+ status,
18779
+ fileCounts,
18780
+ elapsedMs: nowMs - pollStartedAtMs,
18781
+ logLabel,
18782
+ });
18783
+ await this.logVectorStoreFileBatchDiagnostics({
18784
+ client,
18785
+ vectorStoreId,
18786
+ batchId: returnedBatchId,
18787
+ uploadedFiles,
18788
+ logLabel,
18789
+ reason: 'failed',
18790
+ });
18791
+ shouldPoll = false;
18792
+ continue;
18793
+ }
18794
+ if (nowMs - pollStartedAtMs >= uploadTimeoutMs) {
18795
+ console.error('[🤰]', 'Timed out waiting for vector store file batch', {
18796
+ vectorStoreId,
18797
+ batchId: returnedBatchId,
18798
+ fileCounts,
18799
+ elapsedMs: nowMs - pollStartedAtMs,
18800
+ uploadTimeoutMs,
18801
+ logLabel,
18802
+ });
18803
+ await this.logVectorStoreFileBatchDiagnostics({
18804
+ client,
18805
+ vectorStoreId,
18806
+ batchId: returnedBatchId,
18807
+ uploadedFiles,
18808
+ logLabel,
18809
+ reason: 'timeout',
18810
+ });
18811
+ if (this.shouldContinueOnVectorStoreStall()) {
18812
+ console.warn('[🤰]', 'Continuing despite vector store timeout as requested', {
18813
+ vectorStoreId,
18814
+ logLabel,
18815
+ });
18816
+ shouldPoll = false;
18817
+ continue;
18818
+ }
18819
+ try {
18820
+ const cancelBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
18821
+ if (!cancelBatchId.startsWith('vsfb_')) {
18822
+ console.error('[🤰]', 'Skipping vector store file batch cancel (invalid batch id)', {
18823
+ vectorStoreId,
18824
+ batchId: cancelBatchId,
18825
+ logLabel,
18826
+ });
18827
+ }
18828
+ else {
18829
+ await vectorStores.fileBatches.cancel(cancelBatchId, {
18830
+ vector_store_id: vectorStoreId,
18831
+ });
18832
+ }
18833
+ if (this.options.isVerbose) {
18834
+ console.info('[🤰]', 'Cancelled vector store file batch after timeout', {
18835
+ vectorStoreId,
18836
+ batchId: batchIdMismatch && returnedBatchId.startsWith('vsfb_')
18837
+ ? returnedBatchId
18838
+ : expectedBatchId,
18839
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
18840
+ logLabel,
18841
+ });
18842
+ }
18843
+ }
18844
+ catch (error) {
18845
+ assertsError(error);
18846
+ console.error('[🤰]', 'Failed to cancel vector store file batch after timeout', {
18847
+ vectorStoreId,
18848
+ batchId: expectedBatchId,
18849
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
18850
+ logLabel,
18851
+ error: serializeError(error),
18852
+ });
18853
+ }
18854
+ shouldPoll = false;
18855
+ continue;
18856
+ }
18857
+ await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
18858
+ }
18859
+ return latestBatch;
18860
+ }
18861
+ /**
18862
+ * Creates a vector store and uploads knowledge sources, returning its ID.
18863
+ */
18864
+ async createVectorStoreWithKnowledgeSources(options) {
18865
+ const { client, name, knowledgeSources, logLabel } = options;
18866
+ const vectorStores = this.getVectorStoresApi(client);
18867
+ const knowledgeSourcesCount = knowledgeSources.length;
18868
+ const downloadTimeoutMs = this.getKnowledgeSourceDownloadTimeoutMs();
18869
+ if (this.options.isVerbose) {
18870
+ console.info('[🤰]', 'Creating vector store with knowledge sources', {
18871
+ name,
18872
+ knowledgeSourcesCount,
18873
+ downloadTimeoutMs,
18874
+ logLabel,
18875
+ });
18876
+ }
18877
+ const vectorStore = await vectorStores.create({
18878
+ name: `${name} Knowledge Base`,
18879
+ });
18880
+ const vectorStoreId = vectorStore.id;
18881
+ if (this.options.isVerbose) {
18882
+ console.info('[🤰]', 'Vector store created', {
18883
+ vectorStoreId,
18884
+ logLabel,
18885
+ });
18886
+ }
18887
+ const fileStreams = [];
18888
+ const skippedSources = [];
18889
+ let totalBytes = 0;
18890
+ const processingStartedAtMs = Date.now();
18891
+ for (const [index, source] of knowledgeSources.entries()) {
18892
+ try {
18893
+ const isDataUrl = isDataUrlKnowledgeSource(source);
18894
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
18895
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
18896
+ if (this.options.isVerbose) {
18897
+ console.info('[🤰]', 'Processing knowledge source', {
18898
+ index: index + 1,
18899
+ total: knowledgeSourcesCount,
18900
+ source,
18901
+ sourceType,
18902
+ logLabel,
18903
+ });
18904
+ }
18905
+ if (isDataUrl) {
18906
+ const parsed = parseDataUrlKnowledgeSource(source);
18907
+ if (!parsed) {
18908
+ skippedSources.push({ source, reason: 'invalid_data_url' });
18909
+ if (this.options.isVerbose) {
18910
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
18911
+ source,
18912
+ sourceType,
18913
+ logLabel,
18914
+ });
18915
+ }
18916
+ continue;
18917
+ }
18918
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
18919
+ type: parsed.mimeType,
18920
+ });
18921
+ fileStreams.push(dataUrlFile);
18922
+ totalBytes += parsed.buffer.length;
18923
+ continue;
18924
+ }
18925
+ if (isHttp) {
18926
+ const downloadResult = await this.downloadKnowledgeSourceFile({
18927
+ source,
18928
+ timeoutMs: downloadTimeoutMs,
18929
+ logLabel,
18930
+ });
18931
+ if (downloadResult) {
18932
+ fileStreams.push(downloadResult.file);
18933
+ totalBytes += downloadResult.sizeBytes;
18934
+ }
18935
+ else {
18936
+ skippedSources.push({ source, reason: 'download_failed' });
18937
+ }
18938
+ }
18939
+ else {
18940
+ skippedSources.push({ source, reason: 'unsupported_source_type' });
18941
+ if (this.options.isVerbose) {
18942
+ console.info('[🤰]', 'Skipping knowledge source (unsupported type)', {
18943
+ source,
18944
+ sourceType,
18945
+ logLabel,
18946
+ });
18947
+ }
18948
+ /*
18949
+ TODO: [🤰] Resolve problem with browser environment
18950
+ // Assume it's a local file path
18951
+ // Note: This will work in Node.js environment
18952
+ // For browser environments, this would need different handling
18953
+ const fs = await import('fs');
18954
+ const fileStream = fs.createReadStream(source);
18955
+ fileStreams.push(fileStream);
18956
+ */
18957
+ }
18958
+ }
18959
+ catch (error) {
18960
+ assertsError(error);
18961
+ skippedSources.push({ source, reason: 'processing_error' });
18962
+ console.error('[🤰]', 'Error processing knowledge source', {
18963
+ source,
18964
+ logLabel,
18965
+ error: serializeError(error),
18966
+ });
18967
+ }
18968
+ }
18969
+ if (this.options.isVerbose) {
18970
+ console.info('[🤰]', 'Finished processing knowledge sources', {
18971
+ total: knowledgeSourcesCount,
18972
+ downloadedCount: fileStreams.length,
18973
+ skippedCount: skippedSources.length,
18974
+ totalBytes,
18975
+ elapsedMs: Date.now() - processingStartedAtMs,
18976
+ skippedSamples: skippedSources.slice(0, 3),
18977
+ logLabel,
18978
+ });
18979
+ }
18980
+ if (fileStreams.length > 0) {
18981
+ if (this.options.isVerbose) {
18982
+ console.info('[🤰]', 'Uploading files to vector store', {
18983
+ vectorStoreId,
18984
+ fileCount: fileStreams.length,
18985
+ totalBytes,
18986
+ maxConcurrency: this.getKnowledgeSourceUploadMaxConcurrency(),
18987
+ pollIntervalMs: this.getKnowledgeSourceUploadPollIntervalMs(),
18988
+ uploadTimeoutMs: this.getKnowledgeSourceUploadTimeoutMs(),
18989
+ logLabel,
18990
+ });
18991
+ }
18992
+ try {
18993
+ await this.uploadKnowledgeSourceFilesToVectorStore({
18994
+ client,
18995
+ vectorStoreId,
18996
+ files: fileStreams,
18997
+ totalBytes,
18998
+ logLabel,
18999
+ });
19000
+ }
19001
+ catch (error) {
19002
+ assertsError(error);
19003
+ console.error('[🤰]', 'Error uploading files to vector store', {
19004
+ vectorStoreId,
19005
+ logLabel,
19006
+ error: serializeError(error),
19007
+ });
19008
+ }
19009
+ }
19010
+ else if (this.options.isVerbose) {
19011
+ console.info('[🤰]', 'No knowledge source files to upload', {
19012
+ vectorStoreId,
19013
+ skippedCount: skippedSources.length,
19014
+ logLabel,
19015
+ });
19016
+ }
19017
+ return {
19018
+ vectorStoreId,
19019
+ uploadedFileCount: fileStreams.length,
19020
+ skippedCount: skippedSources.length,
19021
+ totalBytes,
19022
+ };
19023
+ }
19024
+ }
19025
+
19026
+ const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
19027
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
19028
+ /*
19029
+ TODO: Use or remove
19030
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
19031
+ type: 'object',
19032
+ properties: {},
19033
+ required: [],
19034
+ additionalProperties: true,
19035
+ };
19036
+ */
19037
+ function buildJsonSchemaDefinition(jsonSchema) {
19038
+ var _a, _b, _c;
19039
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
19040
+ return {
19041
+ type: 'json_schema',
19042
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
19043
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
19044
+ schema: {
19045
+ type: 'object',
19046
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
19047
+ required: Array.isArray(schema.required) ? schema.required : [],
19048
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
19049
+ description: schema.description,
19050
+ },
19051
+ };
19052
+ }
19053
+ /**
19054
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
19055
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
19056
+ *
19057
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
19058
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
19059
+ * @private utility of Open AI
19060
+ */
19061
+ function mapResponseFormatToAgentOutputType(responseFormat) {
19062
+ if (!responseFormat) {
19063
+ return undefined;
19064
+ }
19065
+ if (typeof responseFormat === 'string') {
19066
+ if (responseFormat === 'text') {
19067
+ return 'text';
19068
+ }
19069
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
19070
+ return buildJsonSchemaDefinition();
19071
+ }
19072
+ return 'text';
19073
+ }
19074
+ switch (responseFormat.type) {
19075
+ case 'text':
19076
+ return 'text';
19077
+ case 'json_schema':
19078
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
19079
+ case 'json_object':
19080
+ return buildJsonSchemaDefinition();
19081
+ default:
19082
+ return undefined;
19083
+ }
19084
+ }
19085
+ /**
19086
+ * Execution tools for OpenAI AgentKit (Agents SDK).
19087
+ *
19088
+ * @public exported from `@promptbook/openai`
19089
+ */
19090
+ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
19091
+ /**
19092
+ * Creates OpenAI AgentKit execution tools.
19093
+ */
19094
+ constructor(options) {
19095
+ var _a;
19096
+ if (options.isProxied) {
19097
+ throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI AgentKit`);
19098
+ }
19099
+ super(options);
19100
+ this.preparedAgentKitAgent = null;
19101
+ this.agentKitModelName = (_a = options.agentKitModelName) !== null && _a !== void 0 ? _a : DEFAULT_AGENT_KIT_MODEL_NAME;
19102
+ }
19103
+ get title() {
19104
+ return 'OpenAI AgentKit';
19105
+ }
19106
+ get description() {
19107
+ return 'Use OpenAI AgentKit for agent-style chat with tools and knowledge';
19108
+ }
19109
+ /**
19110
+ * Calls OpenAI AgentKit with a chat prompt (non-streaming).
19111
+ */
19112
+ async callChatModel(prompt) {
19113
+ return this.callChatModelStream(prompt, () => { });
19114
+ }
19115
+ /**
19116
+ * Calls OpenAI AgentKit with a chat prompt (streaming).
19117
+ */
19118
+ async callChatModelStream(prompt, onProgress) {
19119
+ const { content, parameters, modelRequirements } = prompt;
19120
+ if (modelRequirements.modelVariant !== 'CHAT') {
19121
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
19122
+ }
19123
+ for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
19124
+ if (modelRequirements[key] !== undefined) {
19125
+ throw new NotYetImplementedError(`In \`OpenAiAgentKitExecutionTools\` you cannot specify \`${key}\``);
19126
+ }
19127
+ }
19128
+ const rawPromptContent = templateParameters(content, {
19129
+ ...parameters,
19130
+ modelName: this.agentKitModelName,
19131
+ });
19132
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
19133
+ const preparedAgentKitAgent = await this.prepareAgentKitAgent({
19134
+ name: (prompt.title || 'Agent'),
19135
+ instructions: modelRequirements.systemMessage || '',
19136
+ knowledgeSources: modelRequirements.knowledgeSources,
19137
+ tools: 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : modelRequirements.tools,
19138
+ });
19139
+ return this.callChatModelStreamWithPreparedAgent({
19140
+ openAiAgentKitAgent: preparedAgentKitAgent.agent,
19141
+ prompt,
19142
+ rawPromptContent,
19143
+ onProgress,
19144
+ responseFormatOutputType,
19145
+ });
19146
+ }
19147
+ /**
19148
+ * Returns a prepared AgentKit agent when the server wants to manage caching externally.
19149
+ */
19150
+ getPreparedAgentKitAgent() {
19151
+ return this.preparedAgentKitAgent;
19152
+ }
19153
+ /**
19154
+ * Stores a prepared AgentKit agent for later reuse by external cache managers.
19155
+ */
19156
+ setPreparedAgentKitAgent(preparedAgent) {
19157
+ this.preparedAgentKitAgent = preparedAgent;
19158
+ }
19159
+ /**
19160
+ * Creates a new tools instance bound to a prepared AgentKit agent.
19161
+ */
19162
+ getPreparedAgentTools(preparedAgent) {
19163
+ const tools = new OpenAiAgentKitExecutionTools(this.agentKitOptions);
19164
+ tools.setPreparedAgentKitAgent(preparedAgent);
19165
+ return tools;
19166
+ }
19167
+ /**
19168
+ * Prepares an AgentKit agent with optional knowledge sources and tool definitions.
19169
+ */
19170
+ async prepareAgentKitAgent(options) {
19171
+ var _a, _b;
19172
+ const { name, instructions, knowledgeSources, tools, vectorStoreId: cachedVectorStoreId, storeAsPrepared, } = options;
19173
+ await this.ensureAgentKitDefaults();
19174
+ if (this.options.isVerbose) {
19175
+ console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
19176
+ name,
19177
+ instructionsLength: instructions.length,
19178
+ knowledgeSourcesCount: (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0,
19179
+ toolsCount: (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0,
19180
+ });
19181
+ }
19182
+ let vectorStoreId = cachedVectorStoreId;
19183
+ if (!vectorStoreId && knowledgeSources && knowledgeSources.length > 0) {
19184
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
19185
+ client: await this.getClient(),
19186
+ name,
19187
+ knowledgeSources,
19188
+ logLabel: 'agentkit preparation',
19189
+ });
19190
+ vectorStoreId = vectorStoreResult.vectorStoreId;
19191
+ }
19192
+ else if (vectorStoreId && this.options.isVerbose) {
19193
+ console.info('[🤰]', 'Using cached vector store for AgentKit agent', {
19194
+ name,
19195
+ vectorStoreId,
19196
+ });
19197
+ }
19198
+ const agentKitTools = this.buildAgentKitTools({ tools, vectorStoreId });
19199
+ const openAiAgentKitAgent = new Agent$1({
19200
+ name,
19201
+ model: this.agentKitModelName,
19202
+ instructions: instructions || 'You are a helpful assistant.',
19203
+ tools: agentKitTools,
19204
+ });
19205
+ const preparedAgent = {
19206
+ agent: openAiAgentKitAgent,
19207
+ vectorStoreId,
19208
+ };
19209
+ if (storeAsPrepared) {
19210
+ this.setPreparedAgentKitAgent(preparedAgent);
19211
+ }
19212
+ if (this.options.isVerbose) {
19213
+ console.info('[🤰]', 'OpenAI AgentKit agent ready', {
19214
+ name,
19215
+ model: this.agentKitModelName,
19216
+ toolCount: agentKitTools.length,
19217
+ hasVectorStore: Boolean(vectorStoreId),
19218
+ });
19219
+ }
19220
+ return preparedAgent;
19221
+ }
19222
+ /**
19223
+ * Ensures the AgentKit SDK is wired to the OpenAI client and API key.
19224
+ */
19225
+ async ensureAgentKitDefaults() {
19226
+ const client = await this.getClient();
19227
+ setDefaultOpenAIClient(client);
19228
+ const apiKey = this.agentKitOptions.apiKey;
19229
+ if (apiKey && typeof apiKey === 'string') {
19230
+ setDefaultOpenAIKey(apiKey);
19231
+ }
19232
+ }
19233
+ /**
19234
+ * Builds the tool list for AgentKit, including hosted file search when applicable.
19235
+ */
19236
+ buildAgentKitTools(options) {
19237
+ var _a;
19238
+ const { tools, vectorStoreId } = options;
19239
+ const agentKitTools = [];
19240
+ if (vectorStoreId) {
19241
+ agentKitTools.push(fileSearchTool(vectorStoreId));
19242
+ }
19243
+ if (tools && tools.length > 0) {
19244
+ const scriptTools = this.resolveScriptTools();
19245
+ for (const toolDefinition of tools) {
19246
+ agentKitTools.push(tool({
19247
+ name: toolDefinition.name,
19248
+ description: toolDefinition.description,
19249
+ parameters: toolDefinition.parameters
19250
+ ? {
19251
+ ...toolDefinition.parameters,
19252
+ additionalProperties: false,
19253
+ required: (_a = toolDefinition.parameters.required) !== null && _a !== void 0 ? _a : [],
19254
+ }
19255
+ : undefined,
19256
+ strict: false,
19257
+ execute: async (input, runContext, details) => {
19258
+ var _a, _b, _c;
19259
+ const scriptTool = scriptTools[0];
19260
+ const functionName = toolDefinition.name;
19261
+ const calledAt = $getCurrentDate();
19262
+ const callId = (_a = details === null || details === void 0 ? void 0 : details.toolCall) === null || _a === void 0 ? void 0 : _a.callId;
19263
+ const functionArgs = input !== null && input !== void 0 ? input : {};
19264
+ if (this.options.isVerbose) {
19265
+ console.info('[🤰]', 'Executing AgentKit tool', {
19266
+ functionName,
19267
+ callId,
19268
+ calledAt,
19269
+ });
19270
+ }
19271
+ try {
19272
+ return await scriptTool.execute({
19273
+ scriptLanguage: 'javascript',
19274
+ script: `
19275
+ const args = ${JSON.stringify(functionArgs)};
19276
+ return await ${functionName}(args);
19277
+ `,
19278
+ parameters: (_c = (_b = runContext === null || runContext === void 0 ? void 0 : runContext.context) === null || _b === void 0 ? void 0 : _b.parameters) !== null && _c !== void 0 ? _c : {},
19279
+ });
19280
+ }
19281
+ catch (error) {
19282
+ assertsError(error);
19283
+ const serializedError = serializeError(error);
19284
+ const errorMessage = spaceTrim$2((block) => `
19285
+
19286
+ The invoked tool \`${functionName}\` failed with error:
19287
+
19288
+ \`\`\`json
19289
+ ${block(JSON.stringify(serializedError, null, 4))}
19290
+ \`\`\`
19291
+
19292
+ `);
19293
+ console.error('[🤰]', 'AgentKit tool execution failed', {
19294
+ functionName,
19295
+ callId,
19296
+ error: serializedError,
19297
+ });
19298
+ return errorMessage;
19299
+ }
19300
+ },
19301
+ }));
19302
+ }
18024
19303
  }
18025
- // Add file_search also if knowledgeSources are present in the prompt (passed via AgentLlmExecutionTools)
18026
- if (modelRequirements.knowledgeSources &&
18027
- modelRequirements.knowledgeSources.length > 0 &&
18028
- !this.vectorStoreId) {
18029
- // Note: Vector store should have been created by AgentLlmExecutionTools and passed via options.
18030
- // If we are here, it means we have knowledge sources but no vector store ID.
18031
- // We can't easily create one here without persisting it.
18032
- console.warn('Knowledge sources provided but no vector store ID. Creating temporary vector store is not implemented in callChatModelStream.');
19304
+ return agentKitTools;
19305
+ }
19306
+ /**
19307
+ * Resolves the configured script tools for tool execution.
19308
+ */
19309
+ resolveScriptTools() {
19310
+ const executionTools = this.options.executionTools;
19311
+ if (!executionTools || !executionTools.script) {
19312
+ throw new PipelineExecutionError(`Model requested tools but no executionTools.script were provided in OpenAiAgentKitExecutionTools options`);
18033
19313
  }
19314
+ return Array.isArray(executionTools.script) ? executionTools.script : [executionTools.script];
19315
+ }
19316
+ /**
19317
+ * Runs a prepared AgentKit agent and streams results back to the caller.
19318
+ */
19319
+ async callChatModelStreamWithPreparedAgent(options) {
19320
+ var _a, _b, _c, _d;
19321
+ const { openAiAgentKitAgent, prompt, onProgress } = options;
19322
+ const rawPromptContent = (_a = options.rawPromptContent) !== null && _a !== void 0 ? _a : templateParameters(prompt.content, {
19323
+ ...prompt.parameters,
19324
+ modelName: this.agentKitModelName,
19325
+ });
19326
+ const agentForRun = options.responseFormatOutputType !== undefined
19327
+ ? openAiAgentKitAgent.clone({
19328
+ outputType: options.responseFormatOutputType,
19329
+ })
19330
+ : openAiAgentKitAgent;
18034
19331
  const start = $getCurrentDate();
18035
- // Construct the request
19332
+ let latestContent = '';
19333
+ const toolCalls = [];
19334
+ const toolCallIndexById = new Map();
19335
+ const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
18036
19336
  const rawRequest = {
18037
- // TODO: Type properly as OpenAI.Responses.CreateResponseParams
18038
- model: modelRequirements.modelName || 'gpt-4o',
18039
- input,
18040
- instructions: modelRequirements.systemMessage,
18041
- tools: agentTools.length > 0 ? agentTools : undefined,
18042
- tool_resources: toolResources,
18043
- store: false, // Stateless by default as we pass full history
19337
+ agentName: agentForRun.name,
19338
+ input: inputItems,
18044
19339
  };
18045
- if (this.options.isVerbose) {
18046
- console.info(colors.bgWhite('rawRequest (Responses API)'), JSON.stringify(rawRequest, null, 4));
18047
- }
18048
- // Call Responses API
18049
- // Note: Using any cast because types might not be updated yet
18050
- const response = await client.responses.create(rawRequest);
18051
- if (this.options.isVerbose) {
18052
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(response, null, 4));
18053
- }
18054
- const complete = $getCurrentDate();
18055
- let resultContent = '';
18056
- const toolCalls = [];
18057
- // Parse output items
18058
- if (response.output) {
18059
- for (const item of response.output) {
18060
- if (item.type === 'message' && item.role === 'assistant') {
18061
- for (const contentPart of item.content) {
18062
- if (contentPart.type === 'output_text') {
18063
- // "output_text" based on migration guide, or "text"? Guide says "output_text" in example.
18064
- resultContent += contentPart.text;
18065
- }
18066
- else if (contentPart.type === 'text') {
18067
- resultContent += contentPart.text.value || contentPart.text;
18068
- }
19340
+ const streamResult = await run(agentForRun, inputItems, {
19341
+ stream: true,
19342
+ context: { parameters: prompt.parameters },
19343
+ });
19344
+ for await (const event of streamResult) {
19345
+ if (event.type === 'raw_model_stream_event' && ((_b = event.data) === null || _b === void 0 ? void 0 : _b.type) === 'output_text_delta') {
19346
+ latestContent += event.data.delta;
19347
+ onProgress({
19348
+ content: latestContent,
19349
+ modelName: this.agentKitModelName,
19350
+ timing: { start, complete: $getCurrentDate() },
19351
+ usage: UNCERTAIN_USAGE,
19352
+ rawPromptContent: rawPromptContent,
19353
+ rawRequest: null,
19354
+ rawResponse: {},
19355
+ });
19356
+ continue;
19357
+ }
19358
+ if (event.type === 'run_item_stream_event') {
19359
+ const rawItem = (_c = event.item) === null || _c === void 0 ? void 0 : _c.rawItem;
19360
+ if (event.name === 'tool_called' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call') {
19361
+ const toolCall = {
19362
+ name: rawItem.name,
19363
+ arguments: rawItem.arguments,
19364
+ rawToolCall: rawItem,
19365
+ createdAt: $getCurrentDate(),
19366
+ };
19367
+ toolCallIndexById.set(rawItem.callId, toolCalls.length);
19368
+ toolCalls.push(toolCall);
19369
+ onProgress({
19370
+ content: latestContent,
19371
+ modelName: this.agentKitModelName,
19372
+ timing: { start, complete: $getCurrentDate() },
19373
+ usage: UNCERTAIN_USAGE,
19374
+ rawPromptContent: rawPromptContent,
19375
+ rawRequest: null,
19376
+ rawResponse: {},
19377
+ toolCalls: [toolCall],
19378
+ });
19379
+ }
19380
+ if (event.name === 'tool_output' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call_result') {
19381
+ const index = toolCallIndexById.get(rawItem.callId);
19382
+ const result = this.formatAgentKitToolOutput(rawItem.output);
19383
+ if (index !== undefined) {
19384
+ const existingToolCall = toolCalls[index];
19385
+ const completedToolCall = {
19386
+ ...existingToolCall,
19387
+ result,
19388
+ rawToolCall: rawItem,
19389
+ };
19390
+ toolCalls[index] = completedToolCall;
19391
+ onProgress({
19392
+ content: latestContent,
19393
+ modelName: this.agentKitModelName,
19394
+ timing: { start, complete: $getCurrentDate() },
19395
+ usage: UNCERTAIN_USAGE,
19396
+ rawPromptContent: rawPromptContent,
19397
+ rawRequest: null,
19398
+ rawResponse: {},
19399
+ toolCalls: [completedToolCall],
19400
+ });
18069
19401
  }
18070
19402
  }
18071
- else if (item.type === 'function_call') ;
18072
19403
  }
18073
19404
  }
18074
- // Use output_text helper if available (mentioned in guide)
18075
- if (response.output_text) {
18076
- resultContent = response.output_text;
18077
- }
18078
- // TODO: Handle tool calls properly (Requires clearer docs or experimentation)
18079
- onProgress({
18080
- content: resultContent,
18081
- modelName: response.model || 'agent',
19405
+ await streamResult.completed;
19406
+ const complete = $getCurrentDate();
19407
+ const finalContent = ((_d = streamResult.finalOutput) !== null && _d !== void 0 ? _d : latestContent);
19408
+ const finalResult = {
19409
+ content: finalContent,
19410
+ modelName: this.agentKitModelName,
18082
19411
  timing: { start, complete },
18083
19412
  usage: UNCERTAIN_USAGE,
18084
- rawPromptContent,
19413
+ rawPromptContent: rawPromptContent,
18085
19414
  rawRequest,
18086
- rawResponse: response,
18087
- });
18088
- return exportJson({
18089
- name: 'promptResult',
18090
- message: `Result of \`OpenAiAgentExecutionTools.callChatModelStream\``,
18091
- order: [],
18092
- value: {
18093
- content: resultContent,
18094
- modelName: response.model || 'agent',
18095
- timing: { start, complete },
18096
- usage: UNCERTAIN_USAGE,
18097
- rawPromptContent,
18098
- rawRequest,
18099
- rawResponse: response,
18100
- toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
18101
- },
18102
- });
19415
+ rawResponse: { runResult: streamResult },
19416
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
19417
+ };
19418
+ onProgress(finalResult);
19419
+ return finalResult;
18103
19420
  }
18104
19421
  /**
18105
- * Creates a vector store from knowledge sources
19422
+ * Builds AgentKit input items from the prompt and optional thread.
18106
19423
  */
18107
- static async createVectorStore(client, name, knowledgeSources) {
18108
- // Create a vector store
18109
- const vectorStore = await client.beta.vectorStores.create({
18110
- name: `${name} Knowledge Base`,
18111
- });
18112
- const vectorStoreId = vectorStore.id;
18113
- // Upload files from knowledge sources to the vector store
18114
- const fileStreams = [];
18115
- for (const source of knowledgeSources) {
18116
- try {
18117
- // Check if it's a URL
18118
- if (source.startsWith('http://') || source.startsWith('https://')) {
18119
- // Download the file
18120
- const response = await fetch(source);
18121
- if (!response.ok) {
18122
- console.error(`Failed to download ${source}: ${response.statusText}`);
18123
- continue;
18124
- }
18125
- const buffer = await response.arrayBuffer();
18126
- const filename = source.split('/').pop() || 'downloaded-file';
18127
- const blob = new Blob([buffer]);
18128
- const file = new File([blob], filename);
18129
- fileStreams.push(file);
19424
+ async buildAgentKitInputItems(prompt, rawPromptContent) {
19425
+ var _a;
19426
+ const inputItems = [];
19427
+ if ('thread' in prompt && Array.isArray(prompt.thread)) {
19428
+ for (const message of prompt.thread) {
19429
+ const sender = message.sender;
19430
+ const content = (_a = message.content) !== null && _a !== void 0 ? _a : '';
19431
+ if (sender === 'assistant' || sender === 'agent') {
19432
+ inputItems.push({
19433
+ role: 'assistant',
19434
+ status: 'completed',
19435
+ content: [{ type: 'output_text', text: content }],
19436
+ });
18130
19437
  }
18131
19438
  else {
18132
- // Local files not supported in browser env easily, same as before
19439
+ inputItems.push({
19440
+ role: 'user',
19441
+ content,
19442
+ });
18133
19443
  }
18134
19444
  }
18135
- catch (error) {
18136
- console.error(`Error processing knowledge source ${source}:`, error);
18137
- }
18138
19445
  }
18139
- // Batch upload files to the vector store
18140
- if (fileStreams.length > 0) {
18141
- try {
18142
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
18143
- files: fileStreams,
18144
- });
18145
- }
18146
- catch (error) {
18147
- console.error('Error uploading files to vector store:', error);
19446
+ const userContent = await this.buildAgentKitUserContent(prompt, rawPromptContent);
19447
+ inputItems.push({
19448
+ role: 'user',
19449
+ content: userContent,
19450
+ });
19451
+ return inputItems;
19452
+ }
19453
+ /**
19454
+ * Builds the user message content for AgentKit runs, including file inputs when provided.
19455
+ */
19456
+ async buildAgentKitUserContent(prompt, rawPromptContent) {
19457
+ if ('files' in prompt && Array.isArray(prompt.files) && prompt.files.length > 0) {
19458
+ const fileItems = await Promise.all(prompt.files.map(async (file) => {
19459
+ const arrayBuffer = await file.arrayBuffer();
19460
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
19461
+ return {
19462
+ type: 'input_image',
19463
+ image: `data:${file.type};base64,${base64}`,
19464
+ };
19465
+ }));
19466
+ return [{ type: 'input_text', text: rawPromptContent }, ...fileItems];
19467
+ }
19468
+ return rawPromptContent;
19469
+ }
19470
+ /**
19471
+ * Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
19472
+ */
19473
+ formatAgentKitToolOutput(output) {
19474
+ if (typeof output === 'string') {
19475
+ return output;
19476
+ }
19477
+ if (output && typeof output === 'object') {
19478
+ const textOutput = output;
19479
+ if (textOutput.type === 'text' && typeof textOutput.text === 'string') {
19480
+ return textOutput.text;
18148
19481
  }
18149
19482
  }
18150
- return vectorStoreId;
19483
+ return JSON.stringify(output !== null && output !== void 0 ? output : null);
18151
19484
  }
18152
19485
  /**
18153
- * Discriminant for type guards
19486
+ * Returns AgentKit-specific options.
19487
+ */
19488
+ get agentKitOptions() {
19489
+ return this.options;
19490
+ }
19491
+ /**
19492
+ * Discriminant for type guards.
18154
19493
  */
18155
19494
  get discriminant() {
18156
- return 'OPEN_AI_AGENT';
19495
+ return DISCRIMINANT$1;
18157
19496
  }
18158
19497
  /**
18159
- * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentExecutionTools`
19498
+ * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
18160
19499
  */
18161
- static isOpenAiAgentExecutionTools(llmExecutionTools) {
18162
- return llmExecutionTools.discriminant === 'OPEN_AI_AGENT';
19500
+ static isOpenAiAgentKitExecutionTools(llmExecutionTools) {
19501
+ return llmExecutionTools.discriminant === DISCRIMINANT$1;
18163
19502
  }
18164
19503
  }
19504
+ /**
19505
+ * Discriminant for type guards.
19506
+ *
19507
+ * @private const of `OpenAiAgentKitExecutionTools`
19508
+ */
19509
+ const DISCRIMINANT$1 = 'OPEN_AI_AGENT_KIT_V1';
18165
19510
 
18166
19511
  /**
18167
19512
  * Uploads files to OpenAI and returns their IDs
@@ -18196,10 +19541,10 @@ async function uploadFilesToOpenAi(client, files) {
18196
19541
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
18197
19542
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
18198
19543
  *
19544
+ * @deprecated Use `OpenAiAgentKitExecutionTools` instead.
18199
19545
  * @public exported from `@promptbook/openai`
18200
- * @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
18201
19546
  */
18202
- class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19547
+ class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
18203
19548
  /**
18204
19549
  * Creates OpenAI Execution Tools.
18205
19550
  *
@@ -18328,8 +19673,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
18328
19673
  console.info(colors.bgWhite('rawRequest (non-streaming with tools)'), JSON.stringify(rawRequest, null, 4));
18329
19674
  }
18330
19675
  // Create thread and run
18331
- const threadAndRun = await client.beta.threads.createAndRun(rawRequest);
18332
- let run = threadAndRun;
19676
+ let run = (await client.beta.threads.createAndRun(rawRequest));
18333
19677
  const completedToolCalls = [];
18334
19678
  const toolCallStartedAt = new Map();
18335
19679
  // Poll until run completes or requires action
@@ -18424,14 +19768,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
18424
19768
  }
18425
19769
  }
18426
19770
  // Submit tool outputs
18427
- run = await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
19771
+ run = (await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
18428
19772
  tool_outputs: toolOutputs,
18429
- });
19773
+ }));
18430
19774
  }
18431
19775
  else {
18432
19776
  // Wait a bit before polling again
18433
19777
  await new Promise((resolve) => setTimeout(resolve, 500));
18434
- run = await client.beta.threads.runs.retrieve(run.thread_id, run.id);
19778
+ run = (await client.beta.threads.runs.retrieve(run.thread_id, run.id));
18435
19779
  }
18436
19780
  }
18437
19781
  if (run.status !== 'completed') {
@@ -18630,6 +19974,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
18630
19974
  getAssistant(assistantId) {
18631
19975
  return new OpenAiAssistantExecutionTools({
18632
19976
  ...this.options,
19977
+ isCreatingNewAssistantsAllowed: this.isCreatingNewAssistantsAllowed,
18633
19978
  assistantId,
18634
19979
  });
18635
19980
  }
@@ -18655,88 +20000,13 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
18655
20000
  let vectorStoreId;
18656
20001
  // If knowledge sources are provided, create a vector store with them
18657
20002
  if (knowledgeSources && knowledgeSources.length > 0) {
18658
- if (this.options.isVerbose) {
18659
- console.info('[🤰]', 'Creating vector store with knowledge sources', {
18660
- name,
18661
- knowledgeSourcesCount,
18662
- });
18663
- }
18664
- // Create a vector store
18665
- const vectorStore = await client.beta.vectorStores.create({
18666
- name: `${name} Knowledge Base`,
20003
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
20004
+ client,
20005
+ name,
20006
+ knowledgeSources,
20007
+ logLabel: 'assistant creation',
18667
20008
  });
18668
- vectorStoreId = vectorStore.id;
18669
- if (this.options.isVerbose) {
18670
- console.info('[🤰]', 'Vector store created', {
18671
- vectorStoreId,
18672
- });
18673
- }
18674
- // Upload files from knowledge sources to the vector store
18675
- const fileStreams = [];
18676
- for (const [index, source] of knowledgeSources.entries()) {
18677
- try {
18678
- if (this.options.isVerbose) {
18679
- console.info('[🤰]', 'Processing knowledge source', {
18680
- index: index + 1,
18681
- total: knowledgeSources.length,
18682
- source,
18683
- sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
18684
- });
18685
- }
18686
- // Check if it's a URL
18687
- if (source.startsWith('http://') || source.startsWith('https://')) {
18688
- // Download the file
18689
- const response = await fetch(source);
18690
- if (!response.ok) {
18691
- console.error(`Failed to download ${source}: ${response.statusText}`);
18692
- continue;
18693
- }
18694
- const buffer = await response.arrayBuffer();
18695
- let filename = source.split('/').pop() || 'downloaded-file';
18696
- try {
18697
- const url = new URL(source);
18698
- filename = url.pathname.split('/').pop() || filename;
18699
- }
18700
- catch (error) {
18701
- // Keep default filename
18702
- }
18703
- const blob = new Blob([buffer]);
18704
- const file = new File([blob], filename);
18705
- fileStreams.push(file);
18706
- }
18707
- else {
18708
- /*
18709
- TODO: [🐱‍🚀] Resolve problem with browser environment
18710
- // Assume it's a local file path
18711
- // Note: This will work in Node.js environment
18712
- // For browser environments, this would need different handling
18713
- const fs = await import('fs');
18714
- const fileStream = fs.createReadStream(source);
18715
- fileStreams.push(fileStream);
18716
- */
18717
- }
18718
- }
18719
- catch (error) {
18720
- console.error(`Error processing knowledge source ${source}:`, error);
18721
- }
18722
- }
18723
- // Batch upload files to the vector store
18724
- if (fileStreams.length > 0) {
18725
- try {
18726
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
18727
- files: fileStreams,
18728
- });
18729
- if (this.options.isVerbose) {
18730
- console.info('[🤰]', 'Uploaded files to vector store', {
18731
- vectorStoreId,
18732
- fileCount: fileStreams.length,
18733
- });
18734
- }
18735
- }
18736
- catch (error) {
18737
- console.error('Error uploading files to vector store:', error);
18738
- }
18739
- }
20009
+ vectorStoreId = vectorStoreResult.vectorStoreId;
18740
20010
  }
18741
20011
  // Create assistant with vector store attached
18742
20012
  const assistantConfig = {
@@ -18803,91 +20073,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
18803
20073
  const client = await this.getClient();
18804
20074
  let vectorStoreId;
18805
20075
  // If knowledge sources are provided, create a vector store with them
18806
- // TODO: [🧠] Reuse vector store creation logic from createNewAssistant
18807
20076
  if (knowledgeSources && knowledgeSources.length > 0) {
18808
- if (this.options.isVerbose) {
18809
- console.info('[🤰]', 'Creating vector store for assistant update', {
18810
- assistantId,
18811
- name,
18812
- knowledgeSourcesCount,
18813
- });
18814
- }
18815
- // Create a vector store
18816
- const vectorStore = await client.beta.vectorStores.create({
18817
- name: `${name} Knowledge Base`,
20077
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
20078
+ client,
20079
+ name: name !== null && name !== void 0 ? name : assistantId,
20080
+ knowledgeSources,
20081
+ logLabel: 'assistant update',
18818
20082
  });
18819
- vectorStoreId = vectorStore.id;
18820
- if (this.options.isVerbose) {
18821
- console.info('[🤰]', 'Vector store created for assistant update', {
18822
- vectorStoreId,
18823
- });
18824
- }
18825
- // Upload files from knowledge sources to the vector store
18826
- const fileStreams = [];
18827
- for (const [index, source] of knowledgeSources.entries()) {
18828
- try {
18829
- if (this.options.isVerbose) {
18830
- console.info('[🤰]', 'Processing knowledge source for update', {
18831
- index: index + 1,
18832
- total: knowledgeSources.length,
18833
- source,
18834
- sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
18835
- });
18836
- }
18837
- // Check if it's a URL
18838
- if (source.startsWith('http://') || source.startsWith('https://')) {
18839
- // Download the file
18840
- const response = await fetch(source);
18841
- if (!response.ok) {
18842
- console.error(`Failed to download ${source}: ${response.statusText}`);
18843
- continue;
18844
- }
18845
- const buffer = await response.arrayBuffer();
18846
- let filename = source.split('/').pop() || 'downloaded-file';
18847
- try {
18848
- const url = new URL(source);
18849
- filename = url.pathname.split('/').pop() || filename;
18850
- }
18851
- catch (error) {
18852
- // Keep default filename
18853
- }
18854
- const blob = new Blob([buffer]);
18855
- const file = new File([blob], filename);
18856
- fileStreams.push(file);
18857
- }
18858
- else {
18859
- /*
18860
- TODO: [🐱‍🚀] Resolve problem with browser environment
18861
- // Assume it's a local file path
18862
- // Note: This will work in Node.js environment
18863
- // For browser environments, this would need different handling
18864
- const fs = await import('fs');
18865
- const fileStream = fs.createReadStream(source);
18866
- fileStreams.push(fileStream);
18867
- */
18868
- }
18869
- }
18870
- catch (error) {
18871
- console.error(`Error processing knowledge source ${source}:`, error);
18872
- }
18873
- }
18874
- // Batch upload files to the vector store
18875
- if (fileStreams.length > 0) {
18876
- try {
18877
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
18878
- files: fileStreams,
18879
- });
18880
- if (this.options.isVerbose) {
18881
- console.info('[🤰]', 'Uploaded files to vector store for update', {
18882
- vectorStoreId,
18883
- fileCount: fileStreams.length,
18884
- });
18885
- }
18886
- }
18887
- catch (error) {
18888
- console.error('Error uploading files to vector store:', error);
18889
- }
18890
- }
20083
+ vectorStoreId = vectorStoreResult.vectorStoreId;
18891
20084
  }
18892
20085
  const assistantUpdate = {
18893
20086
  name,
@@ -18991,8 +20184,8 @@ function emitAssistantPreparationProgress(options) {
18991
20184
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
18992
20185
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
18993
20186
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
18994
- * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
18995
20187
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
20188
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
18996
20189
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
18997
20190
  *
18998
20191
  * @public exported from `@promptbook/core`
@@ -19127,97 +20320,129 @@ class AgentLlmExecutionTools {
19127
20320
  * Calls the chat model with agent-specific system prompt and requirements with streaming
19128
20321
  */
19129
20322
  async callChatModelStream(prompt, onProgress) {
20323
+ var _a, _b;
19130
20324
  // Ensure we're working with a chat prompt
19131
20325
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
19132
20326
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
19133
20327
  }
19134
20328
  const modelRequirements = await this.getModelRequirements();
20329
+ const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
19135
20330
  const chatPrompt = prompt;
19136
20331
  let underlyingLlmResult;
19137
- // Create modified chat prompt with agent system message
20332
+ const chatPromptContentWithSuffix = promptSuffix
20333
+ ? `${chatPrompt.content}\n\n${promptSuffix}`
20334
+ : chatPrompt.content;
19138
20335
  const promptWithAgentModelRequirements = {
19139
20336
  ...chatPrompt,
20337
+ content: chatPromptContentWithSuffix,
19140
20338
  modelRequirements: {
19141
20339
  ...chatPrompt.modelRequirements,
19142
- ...modelRequirements,
20340
+ ...sanitizedRequirements,
19143
20341
  // Spread tools to convert readonly array to mutable
19144
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
20342
+ tools: sanitizedRequirements.tools
20343
+ ? [...sanitizedRequirements.tools]
20344
+ : chatPrompt.modelRequirements.tools,
19145
20345
  // Spread knowledgeSources to convert readonly array to mutable
19146
- knowledgeSources: modelRequirements.knowledgeSources
19147
- ? [...modelRequirements.knowledgeSources]
20346
+ knowledgeSources: sanitizedRequirements.knowledgeSources
20347
+ ? [...sanitizedRequirements.knowledgeSources]
19148
20348
  : undefined,
19149
20349
  // Prepend agent system message to existing system message
19150
- systemMessage: modelRequirements.systemMessage +
20350
+ systemMessage: sanitizedRequirements.systemMessage +
19151
20351
  (chatPrompt.modelRequirements.systemMessage
19152
20352
  ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
19153
20353
  : ''),
19154
20354
  }, // Cast to avoid readonly mismatch from spread
19155
20355
  };
19156
20356
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
19157
- if (OpenAiAgentExecutionTools.isOpenAiAgentExecutionTools(this.options.llmTools)) {
19158
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
19159
- const cached = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
19160
- let agentTools;
19161
- if (cached && cached.requirementsHash === requirementsHash) {
20357
+ if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
20358
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
20359
+ const vectorStoreHash = SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
20360
+ const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
20361
+ const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
20362
+ let preparedAgentKit = this.options.assistantPreparationMode === 'external'
20363
+ ? this.options.llmTools.getPreparedAgentKitAgent()
20364
+ : null;
20365
+ const vectorStoreId = (preparedAgentKit === null || preparedAgentKit === void 0 ? void 0 : preparedAgentKit.vectorStoreId) ||
20366
+ (cachedVectorStore && cachedVectorStore.requirementsHash === vectorStoreHash
20367
+ ? cachedVectorStore.vectorStoreId
20368
+ : undefined);
20369
+ if (!preparedAgentKit && cachedAgentKit && cachedAgentKit.requirementsHash === requirementsHash) {
19162
20370
  if (this.options.isVerbose) {
19163
- console.log(`1️⃣ Using cached OpenAI Agent Vector Store for agent ${this.title}...`);
20371
+ console.info('[🤰]', 'Using cached OpenAI AgentKit agent', {
20372
+ agent: this.title,
20373
+ });
19164
20374
  }
19165
- // Create new instance with cached vectorStoreId
19166
- // We need to access options from the original tool.
19167
- // We assume isOpenAiAgentExecutionTools implies it has options we can clone.
19168
- // But protected options are not accessible.
19169
- // We can cast to access options if they were public, or use a method to clone.
19170
- // OpenAiAgentExecutionTools doesn't have a clone method.
19171
- // However, we can just assume the passed tool *might* not have the vector store yet, or we are replacing it.
19172
- // Actually, if the passed tool IS OpenAiAgentExecutionTools, we should use it as a base.
19173
- // TODO: [🧠] This is a bit hacky, accessing protected options or recreating tools.
19174
- // Ideally OpenAiAgentExecutionTools should have a method `withVectorStoreId`.
19175
- agentTools = new OpenAiAgentExecutionTools({
19176
- ...this.options.llmTools.options,
19177
- vectorStoreId: cached.vectorStoreId,
19178
- });
20375
+ preparedAgentKit = {
20376
+ agent: cachedAgentKit.agent,
20377
+ vectorStoreId: cachedAgentKit.vectorStoreId,
20378
+ };
19179
20379
  }
19180
- else {
20380
+ if (!preparedAgentKit) {
19181
20381
  if (this.options.isVerbose) {
19182
- console.log(`1️⃣ Creating/Updating OpenAI Agent Vector Store for agent ${this.title}...`);
19183
- }
19184
- let vectorStoreId;
19185
- if (modelRequirements.knowledgeSources && modelRequirements.knowledgeSources.length > 0) {
19186
- const client = await this.options.llmTools.getClient();
19187
- vectorStoreId = await OpenAiAgentExecutionTools.createVectorStore(client, this.title, modelRequirements.knowledgeSources);
20382
+ console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
20383
+ agent: this.title,
20384
+ });
19188
20385
  }
19189
- if (vectorStoreId) {
19190
- AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
19191
- vectorStoreId,
19192
- requirementsHash,
20386
+ if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
20387
+ emitAssistantPreparationProgress({
20388
+ onProgress,
20389
+ prompt,
20390
+ modelName: this.modelName,
20391
+ phase: 'Creating knowledge base',
19193
20392
  });
19194
20393
  }
19195
- agentTools = new OpenAiAgentExecutionTools({
19196
- ...this.options.llmTools.options,
20394
+ emitAssistantPreparationProgress({
20395
+ onProgress,
20396
+ prompt,
20397
+ modelName: this.modelName,
20398
+ phase: 'Preparing AgentKit agent',
20399
+ });
20400
+ preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
20401
+ name: this.title,
20402
+ instructions: sanitizedRequirements.systemMessage || '',
20403
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
20404
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
19197
20405
  vectorStoreId,
19198
20406
  });
19199
20407
  }
19200
- // Create modified chat prompt with agent system message specific to OpenAI Agent
19201
- // Note: Unlike Assistants API, Responses API expects instructions (system message) to be passed in the call.
19202
- // So we use promptWithAgentModelRequirements which has the system message prepended.
19203
- // But we need to make sure we pass knowledgeSources in modelRequirements so OpenAiAgentExecutionTools can fallback to warning if vectorStoreId is missing (though we just handled it).
19204
- const promptForAgent = {
19205
- ...promptWithAgentModelRequirements,
19206
- modelRequirements: {
19207
- ...promptWithAgentModelRequirements.modelRequirements,
19208
- knowledgeSources: modelRequirements.knowledgeSources
19209
- ? [...modelRequirements.knowledgeSources]
19210
- : undefined, // Pass knowledge sources explicitly
19211
- },
19212
- };
19213
- underlyingLlmResult = await agentTools.callChatModelStream(promptForAgent, onProgress);
20408
+ if (preparedAgentKit.vectorStoreId) {
20409
+ AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
20410
+ vectorStoreId: preparedAgentKit.vectorStoreId,
20411
+ requirementsHash: vectorStoreHash,
20412
+ });
20413
+ }
20414
+ AgentLlmExecutionTools.agentKitAgentCache.set(this.title, {
20415
+ agent: preparedAgentKit.agent,
20416
+ requirementsHash,
20417
+ vectorStoreId: preparedAgentKit.vectorStoreId,
20418
+ });
20419
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
20420
+ underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
20421
+ openAiAgentKitAgent: preparedAgentKit.agent,
20422
+ prompt: promptWithAgentModelRequirements,
20423
+ onProgress,
20424
+ responseFormatOutputType,
20425
+ });
19214
20426
  }
19215
20427
  else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
19216
20428
  // ... deprecated path ...
19217
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
20429
+ const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
19218
20430
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
19219
20431
  let assistant;
19220
- if (cached) {
20432
+ if (this.options.assistantPreparationMode === 'external') {
20433
+ assistant = this.options.llmTools;
20434
+ if (this.options.isVerbose) {
20435
+ console.info('[🤰]', 'Using externally managed OpenAI Assistant', {
20436
+ agent: this.title,
20437
+ assistantId: assistant.assistantId,
20438
+ });
20439
+ }
20440
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
20441
+ assistantId: assistant.assistantId,
20442
+ requirementsHash,
20443
+ });
20444
+ }
20445
+ else if (cached) {
19221
20446
  if (cached.requirementsHash === requirementsHash) {
19222
20447
  if (this.options.isVerbose) {
19223
20448
  console.info('[🤰]', 'Using cached OpenAI Assistant', {
@@ -19243,9 +20468,9 @@ class AgentLlmExecutionTools {
19243
20468
  assistant = await this.options.llmTools.updateAssistant({
19244
20469
  assistantId: cached.assistantId,
19245
20470
  name: this.title,
19246
- instructions: modelRequirements.systemMessage,
19247
- knowledgeSources: modelRequirements.knowledgeSources,
19248
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
20471
+ instructions: sanitizedRequirements.systemMessage,
20472
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
20473
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
19249
20474
  });
19250
20475
  AgentLlmExecutionTools.assistantCache.set(this.title, {
19251
20476
  assistantId: assistant.assistantId,
@@ -19268,9 +20493,9 @@ class AgentLlmExecutionTools {
19268
20493
  });
19269
20494
  assistant = await this.options.llmTools.createNewAssistant({
19270
20495
  name: this.title,
19271
- instructions: modelRequirements.systemMessage,
19272
- knowledgeSources: modelRequirements.knowledgeSources,
19273
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
20496
+ instructions: sanitizedRequirements.systemMessage,
20497
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
20498
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
19274
20499
  /*
19275
20500
  !!!
19276
20501
  metadata: {
@@ -19312,18 +20537,28 @@ class AgentLlmExecutionTools {
19312
20537
  }
19313
20538
  }
19314
20539
  let content = underlyingLlmResult.content;
19315
- // Note: Cleanup the AI artifacts from the content
19316
- content = humanizeAiText(content);
19317
- // Note: Make sure the content is Promptbook-like
19318
- content = promptbookifyAiText(content);
20540
+ if (typeof content === 'string') {
20541
+ // Note: Cleanup the AI artifacts from the content
20542
+ content = humanizeAiText(content);
20543
+ // Note: Make sure the content is Promptbook-like
20544
+ content = promptbookifyAiText(content);
20545
+ }
20546
+ else {
20547
+ // TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
20548
+ content = JSON.stringify(content);
20549
+ }
19319
20550
  const agentResult = {
19320
20551
  ...underlyingLlmResult,
19321
- content,
20552
+ content: content,
19322
20553
  modelName: this.modelName,
19323
20554
  };
19324
20555
  return agentResult;
19325
20556
  }
19326
20557
  }
20558
+ /**
20559
+ * Cached AgentKit agents to avoid rebuilding identical instances.
20560
+ */
20561
+ AgentLlmExecutionTools.agentKitAgentCache = new Map();
19327
20562
  /**
19328
20563
  * Cache of OpenAI assistants to avoid creating duplicates
19329
20564
  */
@@ -19404,8 +20639,8 @@ function buildTeacherSummary(commitments, used) {
19404
20639
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
19405
20640
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
19406
20641
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
19407
- * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
19408
20642
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
20643
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
19409
20644
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
19410
20645
  *
19411
20646
  * @public exported from `@promptbook/core`
@@ -19436,6 +20671,7 @@ class Agent extends AgentLlmExecutionTools {
19436
20671
  super({
19437
20672
  isVerbose: options.isVerbose,
19438
20673
  llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
20674
+ assistantPreparationMode: options.assistantPreparationMode,
19439
20675
  agentSource: agentSource.value, // <- TODO: [🐱‍🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
19440
20676
  });
19441
20677
  _Agent_instances.add(this);
@@ -19502,7 +20738,6 @@ class Agent extends AgentLlmExecutionTools {
19502
20738
  * Note: This method also implements the learning mechanism
19503
20739
  */
19504
20740
  async callChatModelStream(prompt, onProgress) {
19505
- var _a;
19506
20741
  // [1] Check if the user is asking the same thing as in the samples
19507
20742
  const modelRequirements = await this.getModelRequirements();
19508
20743
  if (modelRequirements.samples) {
@@ -19550,7 +20785,7 @@ class Agent extends AgentLlmExecutionTools {
19550
20785
  if (result.rawResponse && 'sample' in result.rawResponse) {
19551
20786
  return result;
19552
20787
  }
19553
- if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
20788
+ if (modelRequirements.isClosed) {
19554
20789
  return result;
19555
20790
  }
19556
20791
  // Note: [0] Notify start of self-learning
@@ -19711,6 +20946,63 @@ async function _Agent_selfLearnTeacher(prompt, result) {
19711
20946
  * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
19712
20947
  */
19713
20948
 
20949
+ /**
20950
+ * Resolve a remote META IMAGE value into an absolute URL when possible.
20951
+ */
20952
+ function resolveRemoteImageUrl(imageUrl, agentUrl) {
20953
+ if (!imageUrl) {
20954
+ return undefined;
20955
+ }
20956
+ if (imageUrl.startsWith('http://') ||
20957
+ imageUrl.startsWith('https://') ||
20958
+ imageUrl.startsWith('data:') ||
20959
+ imageUrl.startsWith('blob:')) {
20960
+ return imageUrl;
20961
+ }
20962
+ try {
20963
+ return new URL(imageUrl, agentUrl).href;
20964
+ }
20965
+ catch (_a) {
20966
+ return imageUrl;
20967
+ }
20968
+ }
20969
+ /**
20970
+ * Format a META commitment line when the value is provided.
20971
+ */
20972
+ function formatMetaLine(label, value) {
20973
+ if (!value) {
20974
+ return null;
20975
+ }
20976
+ return `META ${label} ${value}`;
20977
+ }
20978
+ /**
20979
+ * Build a minimal agent source snapshot for remote agents.
20980
+ */
20981
+ function buildRemoteAgentSource(profile, meta) {
20982
+ const metaLines = [
20983
+ formatMetaLine('FULLNAME', meta === null || meta === void 0 ? void 0 : meta.fullname),
20984
+ formatMetaLine('IMAGE', meta === null || meta === void 0 ? void 0 : meta.image),
20985
+ formatMetaLine('DESCRIPTION', meta === null || meta === void 0 ? void 0 : meta.description),
20986
+ formatMetaLine('COLOR', meta === null || meta === void 0 ? void 0 : meta.color),
20987
+ formatMetaLine('FONT', meta === null || meta === void 0 ? void 0 : meta.font),
20988
+ formatMetaLine('LINK', meta === null || meta === void 0 ? void 0 : meta.link),
20989
+ ]
20990
+ .filter((line) => Boolean(line))
20991
+ .join('\n');
20992
+ const personaBlock = profile.personaDescription
20993
+ ? spaceTrim$2((block) => `
20994
+ PERSONA
20995
+ ${block(profile.personaDescription || '')}
20996
+ `)
20997
+ : '';
20998
+ return book `
20999
+ ${profile.agentName}
21000
+
21001
+ ${metaLines}
21002
+
21003
+ ${personaBlock}
21004
+ `;
21005
+ }
19714
21006
  /**
19715
21007
  * Represents one AI Agent
19716
21008
  *
@@ -19718,13 +21010,15 @@ async function _Agent_selfLearnTeacher(prompt, result) {
19718
21010
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
19719
21011
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
19720
21012
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
19721
- * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
21013
+ * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
21014
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
19722
21015
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
19723
21016
  *
19724
21017
  * @public exported from `@promptbook/core`
19725
21018
  */
19726
21019
  class RemoteAgent extends Agent {
19727
21020
  static async connect(options) {
21021
+ var _a, _b, _c;
19728
21022
  const agentProfileUrl = `${options.agentUrl}/api/profile`;
19729
21023
  const profileResponse = await fetch(agentProfileUrl);
19730
21024
  // <- TODO: [🐱‍🚀] What about closed-source agents?
@@ -19744,14 +21038,14 @@ class RemoteAgent extends Agent {
19744
21038
 
19745
21039
  `));
19746
21040
  }
19747
- const profile = await profileResponse.json();
21041
+ const profile = (await profileResponse.json());
21042
+ const resolvedMeta = {
21043
+ ...(profile.meta || {}),
21044
+ image: resolveRemoteImageUrl((_a = profile.meta) === null || _a === void 0 ? void 0 : _a.image, options.agentUrl),
21045
+ };
19748
21046
  // Note: We are creating dummy agent source because we don't have the source from the remote agent
19749
21047
  // But we populate the metadata from the profile
19750
- const agentSource = new BehaviorSubject(book `
19751
- ${profile.agentName}
19752
-
19753
- ${profile.personaDescription}
19754
- `);
21048
+ const agentSource = new BehaviorSubject(buildRemoteAgentSource(profile, resolvedMeta));
19755
21049
  // <- TODO: [🐱‍🚀] createBookFromProfile
19756
21050
  // <- TODO: [🐱‍🚀] Support updating and self-updating
19757
21051
  const remoteAgent = new RemoteAgent({
@@ -19774,10 +21068,10 @@ class RemoteAgent extends Agent {
19774
21068
  });
19775
21069
  remoteAgent._remoteAgentName = profile.agentName;
19776
21070
  remoteAgent._remoteAgentHash = profile.agentHash;
19777
- remoteAgent.personaDescription = profile.personaDescription;
19778
- remoteAgent.initialMessage = profile.initialMessage;
19779
- remoteAgent.links = profile.links;
19780
- remoteAgent.meta = profile.meta;
21071
+ remoteAgent.personaDescription = (_b = profile.personaDescription) !== null && _b !== void 0 ? _b : null;
21072
+ remoteAgent.initialMessage = (_c = profile.initialMessage) !== null && _c !== void 0 ? _c : null;
21073
+ remoteAgent.links = profile.links || [];
21074
+ remoteAgent.meta = resolvedMeta;
19781
21075
  remoteAgent.capabilities = profile.capabilities || [];
19782
21076
  remoteAgent.samples = profile.samples || [];
19783
21077
  remoteAgent.toolTitles = profile.toolTitles || {};
@@ -19881,26 +21175,7 @@ class RemoteAgent extends Agent {
19881
21175
  };
19882
21176
  };
19883
21177
  const getToolCallKey = (toolCall) => {
19884
- var _a;
19885
- const rawId = (_a = toolCall.rawToolCall) === null || _a === void 0 ? void 0 : _a.id;
19886
- if (rawId) {
19887
- return `id:${rawId}`;
19888
- }
19889
- const argsKey = (() => {
19890
- if (typeof toolCall.arguments === 'string') {
19891
- return toolCall.arguments;
19892
- }
19893
- if (!toolCall.arguments) {
19894
- return '';
19895
- }
19896
- try {
19897
- return JSON.stringify(toolCall.arguments);
19898
- }
19899
- catch (_a) {
19900
- return '';
19901
- }
19902
- })();
19903
- return `${toolCall.name}:${toolCall.createdAt || ''}:${argsKey}`;
21178
+ return getToolCallIdentity(toolCall);
19904
21179
  };
19905
21180
  const mergeToolCall = (existing, incoming) => {
19906
21181
  const incomingResult = incoming.result;