@promptbook/cli 0.110.0-0 → 0.110.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/esm/index.es.js +1778 -502
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +8 -4
  6. package/esm/typings/src/_packages/types.index.d.ts +12 -4
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
  8. package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
  9. package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
  11. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
  13. package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
  14. package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +4 -2
  15. package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
  16. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
  17. package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
  18. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
  19. package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
  20. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +10 -1
  21. package/esm/typings/src/book-components/Chat/SourceChip/SourceChip.d.ts +5 -1
  22. package/esm/typings/src/book-components/Chat/utils/collectTeamToolCallSummary.d.ts +69 -0
  23. package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +13 -13
  24. package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +9 -0
  25. package/esm/typings/src/book-components/Chat/utils/toolCallParsing.d.ts +4 -0
  26. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +0 -3
  27. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
  28. package/esm/typings/src/execution/LlmExecutionTools.d.ts +2 -1
  29. package/esm/typings/src/llm-providers/agent/Agent.d.ts +1 -1
  30. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
  31. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
  32. package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +10 -0
  33. package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +13 -2
  34. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -1
  35. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +150 -0
  36. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionToolsOptions.d.ts +15 -0
  37. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +3 -3
  38. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +3 -4
  39. package/esm/typings/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +135 -0
  40. package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +1 -1
  41. package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
  42. package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
  43. package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
  44. package/esm/typings/src/utils/agents/resolveAgentAvatarImageUrl.d.ts +29 -0
  45. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
  46. package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
  47. package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
  48. package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
  49. package/esm/typings/src/version.d.ts +1 -1
  50. package/package.json +6 -2
  51. package/umd/index.umd.js +1781 -506
  52. package/umd/index.umd.js.map +1 -1
  53. package/esm/typings/src/llm-providers/openai/OpenAiAgentExecutionTools.d.ts +0 -43
  54. package/esm/typings/src/llm-providers/openai/createOpenAiAgentExecutionTools.d.ts +0 -11
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('commander'), require('spacetrim'), require('waitasecond'), require('prompts'), require('path'), require('fs/promises'), require('dotenv'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('socket.io-client'), require('crypto-js'), require('child_process'), require('jszip'), require('crypto'), require('@mozilla/readability'), require('jsdom'), require('showdown'), require('glob-promise'), require('moment'), require('express'), require('express-openapi-validator'), require('http'), require('socket.io'), require('swagger-ui-express'), require('react'), require('react-dom/server'), require('@anthropic-ai/sdk'), require('bottleneck'), require('@azure/openai'), require('rxjs'), require('mime-types'), require('papaparse'), require('openai')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'colors', 'commander', 'spacetrim', 'waitasecond', 'prompts', 'path', 'fs/promises', 'dotenv', 'crypto-js/enc-hex', 'crypto-js/sha256', 'socket.io-client', 'crypto-js', 'child_process', 'jszip', 'crypto', '@mozilla/readability', 'jsdom', 'showdown', 'glob-promise', 'moment', 'express', 'express-openapi-validator', 'http', 'socket.io', 'swagger-ui-express', 'react', 'react-dom/server', '@anthropic-ai/sdk', 'bottleneck', '@azure/openai', 'rxjs', 'mime-types', 'papaparse', 'openai'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.colors, global.commander, global.spaceTrim$1, global.waitasecond, global.prompts, global.path, global.promises, global.dotenv, global.hexEncoder, global.sha256, global.socket_ioClient, global.cryptoJs, global.child_process, global.JSZip, global.crypto, global.readability, global.jsdom, global.showdown, global.glob, global.moment, global.express, global.OpenApiValidator, global.http, global.socket_io, global.swaggerUi, global.react, global.server, global.Anthropic, global.Bottleneck, global.openai, global.rxjs, global.mimeTypes, global.papaparse, global.OpenAI));
5
- })(this, (function (exports, colors, commander, spaceTrim$1, waitasecond, prompts, path, promises, dotenv, hexEncoder, sha256, socket_ioClient, cryptoJs, child_process, JSZip, crypto, readability, jsdom, showdown, glob, moment, express, OpenApiValidator, http, socket_io, swaggerUi, react, server, Anthropic, Bottleneck, openai, rxjs, mimeTypes, papaparse, OpenAI) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('commander'), require('spacetrim'), require('waitasecond'), require('prompts'), require('path'), require('fs/promises'), require('dotenv'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('socket.io-client'), require('crypto-js'), require('child_process'), require('jszip'), require('crypto'), require('@mozilla/readability'), require('jsdom'), require('showdown'), require('glob-promise'), require('moment'), require('express'), require('express-openapi-validator'), require('http'), require('socket.io'), require('swagger-ui-express'), require('react'), require('react-dom/server'), require('@anthropic-ai/sdk'), require('bottleneck'), require('@azure/openai'), require('rxjs'), require('mime-types'), require('papaparse'), require('@openai/agents'), require('openai')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'colors', 'commander', 'spacetrim', 'waitasecond', 'prompts', 'path', 'fs/promises', 'dotenv', 'crypto-js/enc-hex', 'crypto-js/sha256', 'socket.io-client', 'crypto-js', 'child_process', 'jszip', 'crypto', '@mozilla/readability', 'jsdom', 'showdown', 'glob-promise', 'moment', 'express', 'express-openapi-validator', 'http', 'socket.io', 'swagger-ui-express', 'react', 'react-dom/server', '@anthropic-ai/sdk', 'bottleneck', '@azure/openai', 'rxjs', 'mime-types', 'papaparse', '@openai/agents', 'openai'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.colors, global.commander, global.spaceTrim$1, global.waitasecond, global.prompts, global.path, global.promises, global.dotenv, global.hexEncoder, global.sha256, global.socket_ioClient, global.cryptoJs, global.child_process, global.JSZip, global.crypto, global.readability, global.jsdom, global.showdown, global.glob, global.moment, global.express, global.OpenApiValidator, global.http, global.socket_io, global.swaggerUi, global.react, global.server, global.Anthropic, global.Bottleneck, global.openai, global.rxjs, global.mimeTypes, global.papaparse, global.agents, global.OpenAI));
5
+ })(this, (function (exports, colors, commander, spaceTrim$1, waitasecond, prompts, path, promises, dotenv, hexEncoder, sha256, socket_ioClient, cryptoJs, child_process, JSZip, crypto, readability, jsdom, showdown, glob, moment, express, OpenApiValidator, http, socket_io, swaggerUi, react, server, Anthropic, Bottleneck, openai, rxjs, mimeTypes, papaparse, agents, OpenAI) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -56,7 +56,7 @@
56
56
  * @generated
57
57
  * @see https://github.com/webgptorg/promptbook
58
58
  */
59
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-0';
59
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-10';
60
60
  /**
61
61
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
62
62
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -15836,6 +15836,28 @@
15836
15836
  return currentMessage + separator + content;
15837
15837
  });
15838
15838
  }
15839
+ /**
15840
+ * Helper method to create a new requirements object with updated prompt suffix
15841
+ */
15842
+ updatePromptSuffix(requirements, contentUpdate) {
15843
+ const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
15844
+ return {
15845
+ ...requirements,
15846
+ promptSuffix: newSuffix,
15847
+ };
15848
+ }
15849
+ /**
15850
+ * Helper method to append content to the prompt suffix
15851
+ * Default separator is a single newline for bullet lists.
15852
+ */
15853
+ appendToPromptSuffix(requirements, content, separator = '\n') {
15854
+ return this.updatePromptSuffix(requirements, (currentSuffix) => {
15855
+ if (!currentSuffix.trim()) {
15856
+ return content;
15857
+ }
15858
+ return `${currentSuffix}${separator}${content}`;
15859
+ });
15860
+ }
15839
15861
  /**
15840
15862
  * Helper method to add a comment section to the system message
15841
15863
  * Comments are lines starting with # that will be removed from the final system message
@@ -16013,13 +16035,9 @@
16013
16035
  `);
16014
16036
  }
16015
16037
  applyToAgentModelRequirements(requirements, _content) {
16016
- const updatedMetadata = {
16017
- ...requirements.metadata,
16018
- isClosed: true,
16019
- };
16020
16038
  return {
16021
16039
  ...requirements,
16022
- metadata: updatedMetadata,
16040
+ isClosed: true,
16023
16041
  };
16024
16042
  }
16025
16043
  }
@@ -16297,12 +16315,12 @@
16297
16315
  return requirements;
16298
16316
  }
16299
16317
  // Get existing dictionary entries from metadata
16300
- const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
16318
+ const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
16301
16319
  // Merge the new dictionary entry with existing entries
16302
16320
  const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
16303
16321
  // Store the merged dictionary in metadata for debugging and inspection
16304
16322
  const updatedMetadata = {
16305
- ...requirements.metadata,
16323
+ ...requirements._metadata,
16306
16324
  DICTIONARY: mergedDictionary,
16307
16325
  };
16308
16326
  // Create the dictionary section for the system message
@@ -16310,7 +16328,7 @@
16310
16328
  const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
16311
16329
  return {
16312
16330
  ...this.appendToSystemMessage(requirements, dictionarySection),
16313
- metadata: updatedMetadata,
16331
+ _metadata: updatedMetadata,
16314
16332
  };
16315
16333
  }
16316
16334
  }
@@ -16450,10 +16468,7 @@
16450
16468
  applyToAgentModelRequirements(requirements, content) {
16451
16469
  const trimmedContent = content.trim();
16452
16470
  if (!trimmedContent) {
16453
- return {
16454
- ...requirements,
16455
- parentAgentUrl: undefined,
16456
- };
16471
+ return requirements;
16457
16472
  }
16458
16473
  if (trimmedContent.toUpperCase() === 'VOID' ||
16459
16474
  trimmedContent.toUpperCase() === 'NULL' ||
@@ -16667,6 +16682,136 @@
16667
16682
  * Note: [💞] Ignore a discrepancy between file name and entity name
16668
16683
  */
16669
16684
 
16685
+ /**
16686
+ * @@@
16687
+ *
16688
+ * @private thing of inline knowledge
16689
+ */
16690
+ const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
16691
+ /**
16692
+ * @@@
16693
+ *
16694
+ * @private thing of inline knowledge
16695
+ */
16696
+ const INLINE_KNOWLEDGE_EXTENSION = '.txt';
16697
+ /**
16698
+ * @@@
16699
+ *
16700
+ * @private thing of inline knowledge
16701
+ */
16702
+ const DATA_URL_PREFIX = 'data:';
16703
+ /**
16704
+ * @@@
16705
+ *
16706
+ * @private thing of inline knowledge
16707
+ */
16708
+ function getFirstNonEmptyLine(content) {
16709
+ const lines = content.split(/\r?\n/);
16710
+ for (const line of lines) {
16711
+ const trimmed = line.trim();
16712
+ if (trimmed) {
16713
+ return trimmed;
16714
+ }
16715
+ }
16716
+ return null;
16717
+ }
16718
+ /**
16719
+ * @@@
16720
+ *
16721
+ * @private thing of inline knowledge
16722
+ */
16723
+ function deriveBaseFilename(content) {
16724
+ const firstLine = getFirstNonEmptyLine(content);
16725
+ if (!firstLine) {
16726
+ return INLINE_KNOWLEDGE_BASE_NAME;
16727
+ }
16728
+ const normalized = normalizeToKebabCase(firstLine);
16729
+ return normalized || INLINE_KNOWLEDGE_BASE_NAME;
16730
+ }
16731
+ /**
16732
+ * Creates a data URL that represents the inline knowledge content as a text file.
16733
+ *
16734
+ * @private thing of inline knowledge
16735
+ */
16736
+ function createInlineKnowledgeSourceFile(content) {
16737
+ const trimmedContent = content.trim();
16738
+ const baseName = deriveBaseFilename(trimmedContent);
16739
+ const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
16740
+ const mimeType = 'text/plain';
16741
+ const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
16742
+ const encodedFilename = encodeURIComponent(filename);
16743
+ const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
16744
+ return {
16745
+ filename,
16746
+ mimeType,
16747
+ url,
16748
+ };
16749
+ }
16750
+ /**
16751
+ * Checks whether the provided source string is a data URL that can be decoded.
16752
+ *
16753
+ * @private thing of inline knowledge
16754
+ */
16755
+ function isDataUrlKnowledgeSource(source) {
16756
+ return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
16757
+ }
16758
+ /**
16759
+ * Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
16760
+ *
16761
+ * @private thing of inline knowledge
16762
+ */
16763
+ function parseDataUrlKnowledgeSource(source) {
16764
+ if (!isDataUrlKnowledgeSource(source)) {
16765
+ return null;
16766
+ }
16767
+ const commaIndex = source.indexOf(',');
16768
+ if (commaIndex === -1) {
16769
+ return null;
16770
+ }
16771
+ const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
16772
+ const payload = source.slice(commaIndex + 1);
16773
+ const tokens = header.split(';');
16774
+ const mediaType = tokens[0] || 'text/plain';
16775
+ let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
16776
+ let isBase64 = false;
16777
+ for (let i = 1; i < tokens.length; i++) {
16778
+ const token = tokens[i];
16779
+ if (!token) {
16780
+ continue;
16781
+ }
16782
+ if (token.toLowerCase() === 'base64') {
16783
+ isBase64 = true;
16784
+ continue;
16785
+ }
16786
+ const [key, value] = token.split('=');
16787
+ if (key === 'name' && value !== undefined) {
16788
+ try {
16789
+ filename = decodeURIComponent(value);
16790
+ }
16791
+ catch (_a) {
16792
+ filename = value;
16793
+ }
16794
+ }
16795
+ }
16796
+ if (!isBase64) {
16797
+ return null;
16798
+ }
16799
+ try {
16800
+ const buffer = Buffer.from(payload, 'base64');
16801
+ return {
16802
+ buffer,
16803
+ filename,
16804
+ mimeType: mediaType,
16805
+ };
16806
+ }
16807
+ catch (_b) {
16808
+ return null;
16809
+ }
16810
+ }
16811
+ /**
16812
+ * Note: [💞] Ignore a discrepancy between file name and entity name
16813
+ */
16814
+
16670
16815
  /**
16671
16816
  * KNOWLEDGE commitment definition
16672
16817
  *
@@ -16765,9 +16910,13 @@
16765
16910
  return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
16766
16911
  }
16767
16912
  else {
16768
- // Direct text knowledge - add to system message
16769
- const knowledgeSection = `Knowledge: ${trimmedContent}`;
16770
- return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
16913
+ const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
16914
+ const updatedRequirements = {
16915
+ ...requirements,
16916
+ knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
16917
+ };
16918
+ const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
16919
+ return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
16771
16920
  }
16772
16921
  }
16773
16922
  }
@@ -17014,16 +17163,16 @@
17014
17163
  // and typically doesn't need to be added to the system prompt or model requirements directly.
17015
17164
  // It is extracted separately for the chat interface.
17016
17165
  var _a;
17017
- const pendingUserMessage = (_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
17166
+ const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
17018
17167
  if (pendingUserMessage) {
17019
17168
  const newSample = { question: pendingUserMessage, answer: content };
17020
17169
  const newSamples = [...(requirements.samples || []), newSample];
17021
- const newMetadata = { ...requirements.metadata };
17170
+ const newMetadata = { ...requirements._metadata };
17022
17171
  delete newMetadata.pendingUserMessage;
17023
17172
  return {
17024
17173
  ...requirements,
17025
17174
  samples: newSamples,
17026
- metadata: newMetadata,
17175
+ _metadata: newMetadata,
17027
17176
  };
17028
17177
  }
17029
17178
  return requirements;
@@ -17271,8 +17420,8 @@
17271
17420
  applyToAgentModelRequirements(requirements, content) {
17272
17421
  return {
17273
17422
  ...requirements,
17274
- metadata: {
17275
- ...requirements.metadata,
17423
+ _metadata: {
17424
+ ...requirements._metadata,
17276
17425
  pendingUserMessage: content,
17277
17426
  },
17278
17427
  };
@@ -18130,11 +18279,7 @@
18130
18279
  if (trimmedContent === '') {
18131
18280
  return requirements;
18132
18281
  }
18133
- // Return requirements with updated notes but no changes to system message
18134
- return {
18135
- ...requirements,
18136
- notes: [...(requirements.notes || []), trimmedContent],
18137
- };
18282
+ return requirements;
18138
18283
  }
18139
18284
  }
18140
18285
  /**
@@ -18196,12 +18341,12 @@
18196
18341
  // Since OPEN is default, we can just ensure isClosed is false
18197
18342
  // But to be explicit we can set it
18198
18343
  const updatedMetadata = {
18199
- ...requirements.metadata,
18344
+ ...requirements._metadata,
18200
18345
  isClosed: false,
18201
18346
  };
18202
18347
  return {
18203
18348
  ...requirements,
18204
- metadata: updatedMetadata,
18349
+ _metadata: updatedMetadata,
18205
18350
  };
18206
18351
  }
18207
18352
  }
@@ -18282,7 +18427,7 @@
18282
18427
  return requirements;
18283
18428
  }
18284
18429
  // Get existing persona content from metadata
18285
- const existingPersonaContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
18430
+ const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
18286
18431
  // Merge the new content with existing persona content
18287
18432
  // When multiple PERSONA commitments exist, they are merged into one
18288
18433
  const mergedPersonaContent = existingPersonaContent
@@ -18290,12 +18435,12 @@
18290
18435
  : trimmedContent;
18291
18436
  // Store the merged persona content in metadata for debugging and inspection
18292
18437
  const updatedMetadata = {
18293
- ...requirements.metadata,
18438
+ ...requirements._metadata,
18294
18439
  PERSONA: mergedPersonaContent,
18295
18440
  };
18296
18441
  // Get the agent name from metadata (which should contain the first line of agent source)
18297
18442
  // If not available, extract from current system message as fallback
18298
- let agentName = (_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.agentName;
18443
+ let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
18299
18444
  if (!agentName) {
18300
18445
  // Fallback: extract from current system message
18301
18446
  const currentMessage = requirements.systemMessage.trim();
@@ -18342,7 +18487,7 @@
18342
18487
  return {
18343
18488
  ...requirements,
18344
18489
  systemMessage: newSystemMessage,
18345
- metadata: updatedMetadata,
18490
+ _metadata: updatedMetadata,
18346
18491
  };
18347
18492
  }
18348
18493
  }
@@ -18425,7 +18570,16 @@
18425
18570
  }
18426
18571
  // Add rule to the system message
18427
18572
  const ruleSection = `Rule: ${trimmedContent}`;
18428
- return this.appendToSystemMessage(requirements, ruleSection, '\n\n');
18573
+ const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
18574
+ const ruleLines = trimmedContent
18575
+ .split(/\r?\n/)
18576
+ .map((line) => line.trim())
18577
+ .filter(Boolean)
18578
+ .map((line) => `- ${line}`);
18579
+ if (ruleLines.length === 0) {
18580
+ return requirementsWithRule;
18581
+ }
18582
+ return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
18429
18583
  }
18430
18584
  }
18431
18585
  /**
@@ -18931,7 +19085,7 @@
18931
19085
  if (teammates.length === 0) {
18932
19086
  return requirements;
18933
19087
  }
18934
- const agentName = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
19088
+ const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
18935
19089
  const teamEntries = teammates.map((teammate) => ({
18936
19090
  toolName: createTeamToolName(teammate.url),
18937
19091
  teammate,
@@ -18971,7 +19125,7 @@
18971
19125
  },
18972
19126
  });
18973
19127
  }
18974
- const existingTeammates = ((_b = requirements.metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
19128
+ const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
18975
19129
  const updatedTeammates = [...existingTeammates];
18976
19130
  for (const entry of teamEntries) {
18977
19131
  if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
@@ -19000,8 +19154,8 @@
19000
19154
  return this.appendToSystemMessage({
19001
19155
  ...requirements,
19002
19156
  tools: updatedTools,
19003
- metadata: {
19004
- ...requirements.metadata,
19157
+ _metadata: {
19158
+ ...requirements._metadata,
19005
19159
  teammates: updatedTeammates,
19006
19160
  },
19007
19161
  }, teamSystemMessage);
@@ -19101,11 +19255,16 @@
19101
19255
  const request = buildTeammateRequest(message, args.context);
19102
19256
  let response = '';
19103
19257
  let error = null;
19258
+ let toolCalls;
19104
19259
  try {
19105
19260
  const remoteAgent = await getRemoteTeammateAgent(entry.teammate.url);
19106
19261
  const prompt = buildTeammatePrompt(request);
19107
19262
  const teammateResult = await remoteAgent.callChatModel(prompt);
19108
19263
  response = teammateResult.content || '';
19264
+ toolCalls =
19265
+ 'toolCalls' in teammateResult && Array.isArray(teammateResult.toolCalls)
19266
+ ? teammateResult.toolCalls
19267
+ : undefined;
19109
19268
  }
19110
19269
  catch (err) {
19111
19270
  error = err instanceof Error ? err.message : String(err);
@@ -19115,6 +19274,7 @@
19115
19274
  teammate: teammateMetadata,
19116
19275
  request,
19117
19276
  response: teammateReply,
19277
+ toolCalls: toolCalls && toolCalls.length > 0 ? toolCalls : undefined,
19118
19278
  error,
19119
19279
  conversation: [
19120
19280
  {
@@ -19227,7 +19387,7 @@
19227
19387
  if (!trimmedContent) {
19228
19388
  // Store template mode flag in metadata
19229
19389
  const updatedMetadata = {
19230
- ...requirements.metadata,
19390
+ ...requirements._metadata,
19231
19391
  templateMode: true,
19232
19392
  };
19233
19393
  // Add a general instruction about using structured templates
@@ -19237,21 +19397,21 @@
19237
19397
  `);
19238
19398
  return {
19239
19399
  ...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
19240
- metadata: updatedMetadata,
19400
+ _metadata: updatedMetadata,
19241
19401
  };
19242
19402
  }
19243
19403
  // If content is provided, add the specific template instructions
19244
19404
  const templateSection = `Response Template: ${trimmedContent}`;
19245
19405
  // Store the template in metadata for potential programmatic access
19246
- const existingTemplates = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
19406
+ const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
19247
19407
  const updatedMetadata = {
19248
- ...requirements.metadata,
19408
+ ...requirements._metadata,
19249
19409
  templates: [...existingTemplates, trimmedContent],
19250
19410
  templateMode: true,
19251
19411
  };
19252
19412
  return {
19253
19413
  ...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
19254
- metadata: updatedMetadata,
19414
+ _metadata: updatedMetadata,
19255
19415
  };
19256
19416
  }
19257
19417
  }
@@ -19588,8 +19748,8 @@
19588
19748
  return this.appendToSystemMessage({
19589
19749
  ...requirements,
19590
19750
  tools: updatedTools,
19591
- metadata: {
19592
- ...requirements.metadata,
19751
+ _metadata: {
19752
+ ...requirements._metadata,
19593
19753
  useBrowser: true,
19594
19754
  },
19595
19755
  }, spaceTrim$1.spaceTrim(`
@@ -19818,8 +19978,8 @@
19818
19978
  return this.appendToSystemMessage({
19819
19979
  ...requirements,
19820
19980
  tools: updatedTools,
19821
- metadata: {
19822
- ...requirements.metadata,
19981
+ _metadata: {
19982
+ ...requirements._metadata,
19823
19983
  useEmail: content || true,
19824
19984
  },
19825
19985
  }, spaceTrim$1.spaceTrim((block) => `
@@ -19954,8 +20114,8 @@
19954
20114
  return this.appendToSystemMessage({
19955
20115
  ...requirements,
19956
20116
  tools: updatedTools,
19957
- metadata: {
19958
- ...requirements.metadata,
20117
+ _metadata: {
20118
+ ...requirements._metadata,
19959
20119
  useImageGenerator: content || true,
19960
20120
  },
19961
20121
  }, spaceTrim$1.spaceTrim(`
@@ -20246,8 +20406,8 @@
20246
20406
  return this.appendToSystemMessage({
20247
20407
  ...requirements,
20248
20408
  tools: updatedTools,
20249
- metadata: {
20250
- ...requirements.metadata,
20409
+ _metadata: {
20410
+ ...requirements._metadata,
20251
20411
  useSearchEngine: content || true,
20252
20412
  },
20253
20413
  }, spaceTrim$1.spaceTrim((block) => `
@@ -20395,8 +20555,8 @@
20395
20555
  return this.appendToSystemMessage({
20396
20556
  ...requirements,
20397
20557
  tools: updatedTools,
20398
- metadata: {
20399
- ...requirements.metadata,
20558
+ _metadata: {
20559
+ ...requirements._metadata,
20400
20560
  },
20401
20561
  }, spaceTrim$1.spaceTrim((block) => `
20402
20562
  Time and date context:
@@ -24974,6 +25134,66 @@
24974
25134
  },
24975
25135
  /**/
24976
25136
  /**/
25137
+ {
25138
+ modelVariant: 'CHAT',
25139
+ modelTitle: 'gpt-5.2-codex',
25140
+ modelName: 'gpt-5.2-codex',
25141
+ modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
25142
+ pricing: {
25143
+ prompt: pricing(`$1.75 / 1M tokens`),
25144
+ output: pricing(`$14.00 / 1M tokens`),
25145
+ },
25146
+ },
25147
+ /**/
25148
+ /**/
25149
+ {
25150
+ modelVariant: 'CHAT',
25151
+ modelTitle: 'gpt-5.1-codex-max',
25152
+ modelName: 'gpt-5.1-codex-max',
25153
+ modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
25154
+ pricing: {
25155
+ prompt: pricing(`$1.25 / 1M tokens`),
25156
+ output: pricing(`$10.00 / 1M tokens`),
25157
+ },
25158
+ },
25159
+ /**/
25160
+ /**/
25161
+ {
25162
+ modelVariant: 'CHAT',
25163
+ modelTitle: 'gpt-5.1-codex',
25164
+ modelName: 'gpt-5.1-codex',
25165
+ modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
25166
+ pricing: {
25167
+ prompt: pricing(`$1.25 / 1M tokens`),
25168
+ output: pricing(`$10.00 / 1M tokens`),
25169
+ },
25170
+ },
25171
+ /**/
25172
+ /**/
25173
+ {
25174
+ modelVariant: 'CHAT',
25175
+ modelTitle: 'gpt-5.1-codex-mini',
25176
+ modelName: 'gpt-5.1-codex-mini',
25177
+ modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
25178
+ pricing: {
25179
+ prompt: pricing(`$0.25 / 1M tokens`),
25180
+ output: pricing(`$2.00 / 1M tokens`),
25181
+ },
25182
+ },
25183
+ /**/
25184
+ /**/
25185
+ {
25186
+ modelVariant: 'CHAT',
25187
+ modelTitle: 'gpt-5-codex',
25188
+ modelName: 'gpt-5-codex',
25189
+ modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
25190
+ pricing: {
25191
+ prompt: pricing(`$1.25 / 1M tokens`),
25192
+ output: pricing(`$10.00 / 1M tokens`),
25193
+ },
25194
+ },
25195
+ /**/
25196
+ /**/
24977
25197
  {
24978
25198
  modelVariant: 'CHAT',
24979
25199
  modelTitle: 'gpt-5-mini',
@@ -27002,6 +27222,32 @@
27002
27222
  errorMessage.includes('does not support'));
27003
27223
  }
27004
27224
 
27225
+ /**
27226
+ * Provides access to the structured clone implementation when available.
27227
+ */
27228
+ function getStructuredCloneFunction() {
27229
+ return globalThis.structuredClone;
27230
+ }
27231
+ /**
27232
+ * Checks whether the prompt is a chat prompt that carries file attachments.
27233
+ */
27234
+ function hasChatPromptFiles(prompt) {
27235
+ return 'files' in prompt && Array.isArray(prompt.files);
27236
+ }
27237
+ /**
27238
+ * Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
27239
+ */
27240
+ function clonePromptPreservingFiles(prompt) {
27241
+ const structuredCloneFn = getStructuredCloneFunction();
27242
+ if (typeof structuredCloneFn === 'function') {
27243
+ return structuredCloneFn(prompt);
27244
+ }
27245
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
27246
+ if (hasChatPromptFiles(prompt)) {
27247
+ clonedPrompt.files = prompt.files;
27248
+ }
27249
+ return clonedPrompt;
27250
+ }
27005
27251
  /**
27006
27252
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
27007
27253
  *
@@ -27031,16 +27277,11 @@
27031
27277
  const openAiOptions = { ...this.options };
27032
27278
  delete openAiOptions.isVerbose;
27033
27279
  delete openAiOptions.userId;
27034
- // Enhanced configuration for better ECONNRESET handling
27280
+ // Enhanced configuration with retries and timeouts.
27035
27281
  const enhancedOptions = {
27036
27282
  ...openAiOptions,
27037
27283
  timeout: API_REQUEST_TIMEOUT,
27038
27284
  maxRetries: CONNECTION_RETRIES_LIMIT,
27039
- defaultHeaders: {
27040
- Connection: 'keep-alive',
27041
- 'Keep-Alive': 'timeout=30, max=100',
27042
- ...openAiOptions.defaultHeaders,
27043
- },
27044
27285
  };
27045
27286
  this.client = new OpenAI__default["default"](enhancedOptions);
27046
27287
  }
@@ -27091,7 +27332,7 @@
27091
27332
  */
27092
27333
  async callChatModelStream(prompt, onProgress) {
27093
27334
  // Deep clone prompt and modelRequirements to avoid mutation across calls
27094
- const clonedPrompt = JSON.parse(JSON.stringify(prompt));
27335
+ const clonedPrompt = clonePromptPreservingFiles(prompt);
27095
27336
  // Use local Set for retried parameters to ensure independence and thread safety
27096
27337
  const retriedUnsupportedParameters = new Set();
27097
27338
  return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
@@ -27118,7 +27359,10 @@
27118
27359
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
27119
27360
  // <- Note: [🧆]
27120
27361
  }; // <- TODO: [💩] Guard here types better
27121
- if (format === 'JSON') {
27362
+ if (currentModelRequirements.responseFormat !== undefined) {
27363
+ modelSettings.response_format = currentModelRequirements.responseFormat;
27364
+ }
27365
+ else if (format === 'JSON') {
27122
27366
  modelSettings.response_format = {
27123
27367
  type: 'json_object',
27124
27368
  };
@@ -28331,6 +28575,7 @@
28331
28575
  apiKey: 'sk-',
28332
28576
  assistantId: 'asst_',
28333
28577
  maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
28578
+ isCreatingNewAssistantsAllowed: false,
28334
28579
  },
28335
28580
  };
28336
28581
  },
@@ -28425,18 +28670,6 @@
28425
28670
  get profile() {
28426
28671
  return OPENAI_PROVIDER_PROFILE;
28427
28672
  }
28428
- /*
28429
- Note: Commenting this out to avoid circular dependency
28430
- /**
28431
- * Create (sub)tools for calling OpenAI API Assistants
28432
- *
28433
- * @param assistantId Which assistant to use
28434
- * @returns Tools for calling OpenAI API Assistants with same token
28435
- * /
28436
- public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
28437
- return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
28438
- }
28439
- */
28440
28673
  /**
28441
28674
  * List all available models (non dynamically)
28442
28675
  *
@@ -28471,6 +28704,775 @@
28471
28704
  }
28472
28705
  }
28473
28706
 
28707
+ const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
28708
+ const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
28709
+ const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
28710
+ const VECTOR_STORE_STALL_LOG_THRESHOLD_MS = 30000;
28711
+ /**
28712
+ * Base class for OpenAI execution tools that need hosted vector stores.
28713
+ *
28714
+ * @public exported from `@promptbook/openai`
28715
+ */
28716
+ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
28717
+ /**
28718
+ * Returns the per-knowledge-source download timeout in milliseconds.
28719
+ */
28720
+ getKnowledgeSourceDownloadTimeoutMs() {
28721
+ var _a;
28722
+ return (_a = this.vectorStoreOptions.knowledgeSourceDownloadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS;
28723
+ }
28724
+ /**
28725
+ * Returns the max concurrency for knowledge source uploads.
28726
+ */
28727
+ getKnowledgeSourceUploadMaxConcurrency() {
28728
+ var _a;
28729
+ return (_a = this.vectorStoreOptions.knowledgeSourceUploadMaxConcurrency) !== null && _a !== void 0 ? _a : 5;
28730
+ }
28731
+ /**
28732
+ * Returns the polling interval in milliseconds for vector store uploads.
28733
+ */
28734
+ getKnowledgeSourceUploadPollIntervalMs() {
28735
+ var _a;
28736
+ return (_a = this.vectorStoreOptions.knowledgeSourceUploadPollIntervalMs) !== null && _a !== void 0 ? _a : 5000;
28737
+ }
28738
+ /**
28739
+ * Returns the overall upload timeout in milliseconds for vector store uploads.
28740
+ */
28741
+ getKnowledgeSourceUploadTimeoutMs() {
28742
+ var _a;
28743
+ return (_a = this.vectorStoreOptions.knowledgeSourceUploadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS;
28744
+ }
28745
+ /**
28746
+ * Returns true if we should continue even if vector store ingestion stalls.
28747
+ */
28748
+ shouldContinueOnVectorStoreStall() {
28749
+ var _a;
28750
+ return (_a = this.vectorStoreOptions.shouldContinueOnVectorStoreStall) !== null && _a !== void 0 ? _a : true;
28751
+ }
28752
+ /**
28753
+ * Returns vector-store-specific options with extended settings.
28754
+ */
28755
+ get vectorStoreOptions() {
28756
+ return this.options;
28757
+ }
28758
+ /**
28759
+ * Returns the OpenAI vector stores API surface, supporting stable and beta SDKs.
28760
+ */
28761
+ getVectorStoresApi(client) {
28762
+ var _a, _b;
28763
+ const vectorStores = (_a = client.vectorStores) !== null && _a !== void 0 ? _a : (_b = client.beta) === null || _b === void 0 ? void 0 : _b.vectorStores;
28764
+ if (!vectorStores) {
28765
+ throw new Error('OpenAI client does not support vector stores. Please ensure you are using a compatible version of the OpenAI SDK with vector store support.');
28766
+ }
28767
+ return vectorStores;
28768
+ }
28769
+ /**
28770
+ * Downloads a knowledge source URL into a File for vector store upload.
28771
+ */
28772
+ async downloadKnowledgeSourceFile(options) {
28773
+ var _a;
28774
+ const { source, timeoutMs, logLabel } = options;
28775
+ const startedAtMs = Date.now();
28776
+ const controller = new AbortController();
28777
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
28778
+ if (this.options.isVerbose) {
28779
+ console.info('[🤰]', 'Downloading knowledge source', {
28780
+ source,
28781
+ timeoutMs,
28782
+ logLabel,
28783
+ });
28784
+ }
28785
+ try {
28786
+ const response = await fetch(source, { signal: controller.signal });
28787
+ const contentType = (_a = response.headers.get('content-type')) !== null && _a !== void 0 ? _a : undefined;
28788
+ if (!response.ok) {
28789
+ console.error('[🤰]', 'Failed to download knowledge source', {
28790
+ source,
28791
+ status: response.status,
28792
+ statusText: response.statusText,
28793
+ contentType,
28794
+ elapsedMs: Date.now() - startedAtMs,
28795
+ logLabel,
28796
+ });
28797
+ return null;
28798
+ }
28799
+ const buffer = await response.arrayBuffer();
28800
+ let filename = source.split('/').pop() || 'downloaded-file';
28801
+ try {
28802
+ const url = new URL(source);
28803
+ filename = url.pathname.split('/').pop() || filename;
28804
+ }
28805
+ catch (error) {
28806
+ // Keep default filename
28807
+ }
28808
+ const file = new File([buffer], filename, contentType ? { type: contentType } : undefined);
28809
+ const elapsedMs = Date.now() - startedAtMs;
28810
+ const sizeBytes = buffer.byteLength;
28811
+ if (this.options.isVerbose) {
28812
+ console.info('[🤰]', 'Downloaded knowledge source', {
28813
+ source,
28814
+ filename,
28815
+ sizeBytes,
28816
+ contentType,
28817
+ elapsedMs,
28818
+ logLabel,
28819
+ });
28820
+ }
28821
+ return { file, sizeBytes, filename, elapsedMs };
28822
+ }
28823
+ catch (error) {
28824
+ assertsError(error);
28825
+ console.error('[🤰]', 'Error downloading knowledge source', {
28826
+ source,
28827
+ elapsedMs: Date.now() - startedAtMs,
28828
+ logLabel,
28829
+ error: serializeError(error),
28830
+ });
28831
+ return null;
28832
+ }
28833
+ finally {
28834
+ clearTimeout(timeoutId);
28835
+ }
28836
+ }
28837
+ /**
28838
+ * Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
28839
+ */
28840
+ async logVectorStoreFileBatchDiagnostics(options) {
28841
+ var _a, _b, _c, _d, _e;
28842
+ const { client, vectorStoreId, batchId, uploadedFiles, logLabel, reason } = options;
28843
+ if (reason === 'stalled' && !this.options.isVerbose) {
28844
+ return;
28845
+ }
28846
+ if (!batchId.startsWith('vsfb_')) {
28847
+ console.error('[🤰]', 'Vector store file batch diagnostics skipped (invalid batch id)', {
28848
+ vectorStoreId,
28849
+ batchId,
28850
+ reason,
28851
+ logLabel,
28852
+ });
28853
+ return;
28854
+ }
28855
+ const fileIdToMetadata = new Map();
28856
+ for (const file of uploadedFiles) {
28857
+ fileIdToMetadata.set(file.fileId, file);
28858
+ }
28859
+ try {
28860
+ const vectorStores = this.getVectorStoresApi(client);
28861
+ const limit = Math.min(100, Math.max(10, uploadedFiles.length));
28862
+ const batchFilesPage = await vectorStores.fileBatches.listFiles(batchId, {
28863
+ vector_store_id: vectorStoreId,
28864
+ limit,
28865
+ });
28866
+ const batchFiles = (_a = batchFilesPage.data) !== null && _a !== void 0 ? _a : [];
28867
+ const statusCounts = {
28868
+ in_progress: 0,
28869
+ completed: 0,
28870
+ failed: 0,
28871
+ cancelled: 0,
28872
+ };
28873
+ const errorSamples = [];
28874
+ const inProgressSamples = [];
28875
+ const batchFileIds = new Set();
28876
+ for (const file of batchFiles) {
28877
+ const status = (_b = file.status) !== null && _b !== void 0 ? _b : 'unknown';
28878
+ statusCounts[status] = ((_c = statusCounts[status]) !== null && _c !== void 0 ? _c : 0) + 1;
28879
+ const vectorStoreFileId = file.id;
28880
+ const uploadedFileId = (_d = file.file_id) !== null && _d !== void 0 ? _d : file.fileId;
28881
+ const fileId = uploadedFileId !== null && uploadedFileId !== void 0 ? uploadedFileId : vectorStoreFileId;
28882
+ batchFileIds.add(fileId);
28883
+ const metadata = fileIdToMetadata.get(fileId);
28884
+ if (status === 'failed') {
28885
+ errorSamples.push({
28886
+ fileId,
28887
+ status,
28888
+ error: (_e = file.last_error) === null || _e === void 0 ? void 0 : _e.message,
28889
+ filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
28890
+ vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
28891
+ });
28892
+ }
28893
+ if (status === 'in_progress') {
28894
+ inProgressSamples.push({
28895
+ fileId,
28896
+ filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
28897
+ vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
28898
+ });
28899
+ }
28900
+ }
28901
+ const missingSamples = uploadedFiles
28902
+ .filter((file) => !batchFileIds.has(file.fileId))
28903
+ .slice(0, 5)
28904
+ .map((file) => ({
28905
+ fileId: file.fileId,
28906
+ filename: file.filename,
28907
+ sizeBytes: file.sizeBytes,
28908
+ }));
28909
+ const vectorStore = await vectorStores.retrieve(vectorStoreId);
28910
+ const logPayload = {
28911
+ vectorStoreId,
28912
+ batchId,
28913
+ reason,
28914
+ vectorStoreStatus: vectorStore.status,
28915
+ vectorStoreFileCounts: vectorStore.file_counts,
28916
+ vectorStoreUsageBytes: vectorStore.usage_bytes,
28917
+ batchFileCount: batchFiles.length,
28918
+ statusCounts,
28919
+ errorSamples: errorSamples.slice(0, 5),
28920
+ inProgressSamples,
28921
+ missingFileCount: uploadedFiles.length - batchFileIds.size,
28922
+ missingSamples,
28923
+ logLabel,
28924
+ };
28925
+ const logFunction = reason === 'stalled' ? console.info : console.error;
28926
+ logFunction('[🤰]', 'Vector store file batch diagnostics', logPayload);
28927
+ }
28928
+ catch (error) {
28929
+ assertsError(error);
28930
+ console.error('[🤰]', 'Vector store file batch diagnostics failed', {
28931
+ vectorStoreId,
28932
+ batchId,
28933
+ reason,
28934
+ logLabel,
28935
+ error: serializeError(error),
28936
+ });
28937
+ }
28938
+ }
28939
+ /**
28940
+ * Uploads knowledge source files to the vector store and polls until processing completes.
28941
+ */
28942
+ async uploadKnowledgeSourceFilesToVectorStore(options) {
28943
+ var _a, _b, _c, _d, _e, _f;
28944
+ const { client, vectorStoreId, files, totalBytes, logLabel } = options;
28945
+ const vectorStores = this.getVectorStoresApi(client);
28946
+ const uploadStartedAtMs = Date.now();
28947
+ const maxConcurrency = Math.max(1, this.getKnowledgeSourceUploadMaxConcurrency());
28948
+ const pollIntervalMs = Math.max(1000, this.getKnowledgeSourceUploadPollIntervalMs());
28949
+ const uploadTimeoutMs = Math.max(1000, this.getKnowledgeSourceUploadTimeoutMs());
28950
+ if (this.options.isVerbose) {
28951
+ console.info('[🤰]', 'Uploading knowledge source files to OpenAI', {
28952
+ vectorStoreId,
28953
+ fileCount: files.length,
28954
+ totalBytes,
28955
+ maxConcurrency,
28956
+ pollIntervalMs,
28957
+ uploadTimeoutMs,
28958
+ logLabel,
28959
+ });
28960
+ }
28961
+ const fileTypeSummary = {};
28962
+ for (const file of files) {
28963
+ const filename = (_a = file.name) !== null && _a !== void 0 ? _a : '';
28964
+ const extension = filename.includes('.')
28965
+ ? (_c = (_b = filename.split('.').pop()) === null || _b === void 0 ? void 0 : _b.toLowerCase()) !== null && _c !== void 0 ? _c : 'unknown'
28966
+ : 'unknown';
28967
+ const sizeBytes = typeof file.size === 'number' ? file.size : 0;
28968
+ const summary = (_d = fileTypeSummary[extension]) !== null && _d !== void 0 ? _d : { count: 0, totalBytes: 0 };
28969
+ summary.count += 1;
28970
+ summary.totalBytes += sizeBytes;
28971
+ fileTypeSummary[extension] = summary;
28972
+ }
28973
+ if (this.options.isVerbose) {
28974
+ console.info('[🤰]', 'Knowledge source file summary', {
28975
+ vectorStoreId,
28976
+ fileCount: files.length,
28977
+ totalBytes,
28978
+ fileTypeSummary,
28979
+ logLabel,
28980
+ });
28981
+ }
28982
+ const fileEntries = files.map((file, index) => ({ file, index }));
28983
+ const fileIterator = fileEntries.values();
28984
+ const fileIds = [];
28985
+ const uploadedFiles = [];
28986
+ const failedUploads = [];
28987
+ let uploadedCount = 0;
28988
+ const processFiles = async (iterator) => {
28989
+ var _a, _b;
28990
+ for (const { file, index } of iterator) {
28991
+ const uploadIndex = index + 1;
28992
+ const filename = file.name || `knowledge-source-${uploadIndex}`;
28993
+ const extension = filename.includes('.')
28994
+ ? (_b = (_a = filename.split('.').pop()) === null || _a === void 0 ? void 0 : _a.toLowerCase()) !== null && _b !== void 0 ? _b : 'unknown'
28995
+ : 'unknown';
28996
+ const sizeBytes = typeof file.size === 'number' ? file.size : undefined;
28997
+ const fileUploadStartedAtMs = Date.now();
28998
+ if (this.options.isVerbose) {
28999
+ console.info('[🤰]', 'Uploading knowledge source file', {
29000
+ index: uploadIndex,
29001
+ total: files.length,
29002
+ filename,
29003
+ extension,
29004
+ sizeBytes,
29005
+ logLabel,
29006
+ });
29007
+ }
29008
+ try {
29009
+ const uploaded = await client.files.create({ file, purpose: 'assistants' });
29010
+ fileIds.push(uploaded.id);
29011
+ uploadedFiles.push({ fileId: uploaded.id, filename, sizeBytes });
29012
+ uploadedCount += 1;
29013
+ if (this.options.isVerbose) {
29014
+ console.info('[🤰]', 'Uploaded knowledge source file', {
29015
+ index: uploadIndex,
29016
+ total: files.length,
29017
+ filename,
29018
+ sizeBytes,
29019
+ fileId: uploaded.id,
29020
+ elapsedMs: Date.now() - fileUploadStartedAtMs,
29021
+ logLabel,
29022
+ });
29023
+ }
29024
+ }
29025
+ catch (error) {
29026
+ assertsError(error);
29027
+ const serializedError = serializeError(error);
29028
+ failedUploads.push({ index: uploadIndex, filename, error: serializedError });
29029
+ console.error('[🤰]', 'Failed to upload knowledge source file', {
29030
+ index: uploadIndex,
29031
+ total: files.length,
29032
+ filename,
29033
+ sizeBytes,
29034
+ elapsedMs: Date.now() - fileUploadStartedAtMs,
29035
+ logLabel,
29036
+ error: serializedError,
29037
+ });
29038
+ }
29039
+ }
29040
+ };
29041
+ const workerCount = Math.min(maxConcurrency, files.length);
29042
+ const workers = Array.from({ length: workerCount }, () => processFiles(fileIterator));
29043
+ await Promise.all(workers);
29044
+ if (this.options.isVerbose) {
29045
+ console.info('[🤰]', 'Finished uploading knowledge source files', {
29046
+ vectorStoreId,
29047
+ fileCount: files.length,
29048
+ uploadedCount,
29049
+ failedCount: failedUploads.length,
29050
+ elapsedMs: Date.now() - uploadStartedAtMs,
29051
+ failedSamples: failedUploads.slice(0, 3),
29052
+ logLabel,
29053
+ });
29054
+ }
29055
+ if (fileIds.length === 0) {
29056
+ console.error('[🤰]', 'No knowledge source files were uploaded', {
29057
+ vectorStoreId,
29058
+ fileCount: files.length,
29059
+ failedCount: failedUploads.length,
29060
+ logLabel,
29061
+ });
29062
+ return null;
29063
+ }
29064
+ const batch = await vectorStores.fileBatches.create(vectorStoreId, {
29065
+ file_ids: fileIds,
29066
+ });
29067
+ const expectedBatchId = batch.id;
29068
+ const expectedBatchIdValid = expectedBatchId.startsWith('vsfb_');
29069
+ if (!expectedBatchIdValid) {
29070
+ console.error('[🤰]', 'Vector store file batch id looks invalid', {
29071
+ vectorStoreId,
29072
+ batchId: expectedBatchId,
29073
+ batchVectorStoreId: batch.vector_store_id,
29074
+ logLabel,
29075
+ });
29076
+ }
29077
+ else if (batch.vector_store_id !== vectorStoreId) {
29078
+ console.error('[🤰]', 'Vector store file batch vector store id mismatch', {
29079
+ vectorStoreId,
29080
+ batchId: expectedBatchId,
29081
+ batchVectorStoreId: batch.vector_store_id,
29082
+ logLabel,
29083
+ });
29084
+ }
29085
+ if (this.options.isVerbose) {
29086
+ console.info('[🤰]', 'Created vector store file batch', {
29087
+ vectorStoreId,
29088
+ batchId: expectedBatchId,
29089
+ fileCount: fileIds.length,
29090
+ logLabel,
29091
+ });
29092
+ }
29093
+ const pollStartedAtMs = Date.now();
29094
+ const progressLogIntervalMs = Math.max(VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS, pollIntervalMs);
29095
+ const diagnosticsIntervalMs = Math.max(60000, pollIntervalMs * 5);
29096
+ // let lastStatus: string | undefined;
29097
+ let lastCountsKey = '';
29098
+ let lastProgressKey = '';
29099
+ let lastLogAtMs = 0;
29100
+ let lastProgressAtMs = pollStartedAtMs;
29101
+ let lastDiagnosticsAtMs = pollStartedAtMs;
29102
+ let latestBatch = batch;
29103
+ let loggedBatchIdMismatch = false;
29104
+ let loggedBatchIdFallback = false;
29105
+ let loggedBatchIdInvalid = false;
29106
+ let shouldPoll = true;
29107
+ while (shouldPoll) {
29108
+ const nowMs = Date.now();
29109
+ // [🤰] Note: Sometimes OpenAI returns Vector Store object instead of Batch object, or IDs get swapped.
29110
+ const rawBatchId = typeof latestBatch.id === 'string' ? latestBatch.id : '';
29111
+ const rawVectorStoreId = latestBatch.vector_store_id;
29112
+ let returnedBatchId = rawBatchId;
29113
+ let returnedBatchIdValid = typeof returnedBatchId === 'string' && returnedBatchId.startsWith('vsfb_');
29114
+ if (!returnedBatchIdValid && expectedBatchIdValid) {
29115
+ if (!loggedBatchIdFallback) {
29116
+ console.error('[🤰]', 'Vector store file batch id missing from response; falling back to expected', {
29117
+ vectorStoreId,
29118
+ expectedBatchId,
29119
+ returnedBatchId,
29120
+ rawVectorStoreId,
29121
+ logLabel,
29122
+ });
29123
+ loggedBatchIdFallback = true;
29124
+ }
29125
+ returnedBatchId = expectedBatchId;
29126
+ returnedBatchIdValid = true;
29127
+ }
29128
+ if (!returnedBatchIdValid && !loggedBatchIdInvalid) {
29129
+ console.error('[🤰]', 'Vector store file batch id is invalid; stopping polling', {
29130
+ vectorStoreId,
29131
+ expectedBatchId,
29132
+ returnedBatchId,
29133
+ rawVectorStoreId,
29134
+ logLabel,
29135
+ });
29136
+ loggedBatchIdInvalid = true;
29137
+ }
29138
+ const batchIdMismatch = expectedBatchIdValid && returnedBatchIdValid && returnedBatchId !== expectedBatchId;
29139
+ if (batchIdMismatch && !loggedBatchIdMismatch) {
29140
+ console.error('[🤰]', 'Vector store file batch id mismatch', {
29141
+ vectorStoreId,
29142
+ expectedBatchId,
29143
+ returnedBatchId,
29144
+ logLabel,
29145
+ });
29146
+ loggedBatchIdMismatch = true;
29147
+ }
29148
+ if (returnedBatchIdValid) {
29149
+ latestBatch = await vectorStores.fileBatches.retrieve(returnedBatchId, {
29150
+ vector_store_id: vectorStoreId,
29151
+ });
29152
+ }
29153
+ else {
29154
+ shouldPoll = false;
29155
+ continue;
29156
+ }
29157
+ const status = (_e = latestBatch.status) !== null && _e !== void 0 ? _e : 'unknown';
29158
+ const fileCounts = (_f = latestBatch.file_counts) !== null && _f !== void 0 ? _f : {};
29159
+ const progressKey = JSON.stringify(fileCounts);
29160
+ const statusCountsKey = `${status}-${progressKey}`;
29161
+ const isProgressing = progressKey !== lastProgressKey;
29162
+ if (isProgressing) {
29163
+ lastProgressAtMs = nowMs;
29164
+ lastProgressKey = progressKey;
29165
+ }
29166
+ if (this.options.isVerbose &&
29167
+ (statusCountsKey !== lastCountsKey || nowMs - lastLogAtMs >= progressLogIntervalMs)) {
29168
+ console.info('[🤰]', 'Vector store file batch status', {
29169
+ vectorStoreId,
29170
+ batchId: returnedBatchId,
29171
+ status,
29172
+ fileCounts,
29173
+ elapsedMs: nowMs - pollStartedAtMs,
29174
+ logLabel,
29175
+ });
29176
+ lastCountsKey = statusCountsKey;
29177
+ lastLogAtMs = nowMs;
29178
+ }
29179
+ if (status === 'in_progress' &&
29180
+ nowMs - lastProgressAtMs >= VECTOR_STORE_STALL_LOG_THRESHOLD_MS &&
29181
+ nowMs - lastDiagnosticsAtMs >= diagnosticsIntervalMs) {
29182
+ lastDiagnosticsAtMs = nowMs;
29183
+ await this.logVectorStoreFileBatchDiagnostics({
29184
+ client,
29185
+ vectorStoreId,
29186
+ batchId: returnedBatchId,
29187
+ uploadedFiles,
29188
+ logLabel,
29189
+ reason: 'stalled',
29190
+ });
29191
+ }
29192
+ if (status === 'completed') {
29193
+ if (this.options.isVerbose) {
29194
+ console.info('[🤰]', 'Vector store file batch completed', {
29195
+ vectorStoreId,
29196
+ batchId: returnedBatchId,
29197
+ fileCounts,
29198
+ elapsedMs: nowMs - pollStartedAtMs,
29199
+ logLabel,
29200
+ });
29201
+ }
29202
+ shouldPoll = false;
29203
+ continue;
29204
+ }
29205
+ if (status === 'failed') {
29206
+ console.error('[🤰]', 'Vector store file batch completed with failures', {
29207
+ vectorStoreId,
29208
+ batchId: returnedBatchId,
29209
+ fileCounts,
29210
+ elapsedMs: nowMs - pollStartedAtMs,
29211
+ logLabel,
29212
+ });
29213
+ await this.logVectorStoreFileBatchDiagnostics({
29214
+ client,
29215
+ vectorStoreId,
29216
+ batchId: returnedBatchId,
29217
+ uploadedFiles,
29218
+ logLabel,
29219
+ reason: 'failed',
29220
+ });
29221
+ shouldPoll = false;
29222
+ continue;
29223
+ }
29224
+ if (status === 'cancelled') {
29225
+ console.error('[🤰]', 'Vector store file batch did not complete', {
29226
+ vectorStoreId,
29227
+ batchId: returnedBatchId,
29228
+ status,
29229
+ fileCounts,
29230
+ elapsedMs: nowMs - pollStartedAtMs,
29231
+ logLabel,
29232
+ });
29233
+ await this.logVectorStoreFileBatchDiagnostics({
29234
+ client,
29235
+ vectorStoreId,
29236
+ batchId: returnedBatchId,
29237
+ uploadedFiles,
29238
+ logLabel,
29239
+ reason: 'failed',
29240
+ });
29241
+ shouldPoll = false;
29242
+ continue;
29243
+ }
29244
+ if (nowMs - pollStartedAtMs >= uploadTimeoutMs) {
29245
+ console.error('[🤰]', 'Timed out waiting for vector store file batch', {
29246
+ vectorStoreId,
29247
+ batchId: returnedBatchId,
29248
+ fileCounts,
29249
+ elapsedMs: nowMs - pollStartedAtMs,
29250
+ uploadTimeoutMs,
29251
+ logLabel,
29252
+ });
29253
+ await this.logVectorStoreFileBatchDiagnostics({
29254
+ client,
29255
+ vectorStoreId,
29256
+ batchId: returnedBatchId,
29257
+ uploadedFiles,
29258
+ logLabel,
29259
+ reason: 'timeout',
29260
+ });
29261
+ if (this.shouldContinueOnVectorStoreStall()) {
29262
+ console.warn('[🤰]', 'Continuing despite vector store timeout as requested', {
29263
+ vectorStoreId,
29264
+ logLabel,
29265
+ });
29266
+ shouldPoll = false;
29267
+ continue;
29268
+ }
29269
+ try {
29270
+ const cancelBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
29271
+ if (!cancelBatchId.startsWith('vsfb_')) {
29272
+ console.error('[🤰]', 'Skipping vector store file batch cancel (invalid batch id)', {
29273
+ vectorStoreId,
29274
+ batchId: cancelBatchId,
29275
+ logLabel,
29276
+ });
29277
+ }
29278
+ else {
29279
+ await vectorStores.fileBatches.cancel(cancelBatchId, {
29280
+ vector_store_id: vectorStoreId,
29281
+ });
29282
+ }
29283
+ if (this.options.isVerbose) {
29284
+ console.info('[🤰]', 'Cancelled vector store file batch after timeout', {
29285
+ vectorStoreId,
29286
+ batchId: batchIdMismatch && returnedBatchId.startsWith('vsfb_')
29287
+ ? returnedBatchId
29288
+ : expectedBatchId,
29289
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
29290
+ logLabel,
29291
+ });
29292
+ }
29293
+ }
29294
+ catch (error) {
29295
+ assertsError(error);
29296
+ console.error('[🤰]', 'Failed to cancel vector store file batch after timeout', {
29297
+ vectorStoreId,
29298
+ batchId: expectedBatchId,
29299
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
29300
+ logLabel,
29301
+ error: serializeError(error),
29302
+ });
29303
+ }
29304
+ shouldPoll = false;
29305
+ continue;
29306
+ }
29307
+ await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
29308
+ }
29309
+ return latestBatch;
29310
+ }
29311
+ /**
29312
+ * Creates a vector store and uploads knowledge sources, returning its ID.
29313
+ */
29314
+ async createVectorStoreWithKnowledgeSources(options) {
29315
+ const { client, name, knowledgeSources, logLabel } = options;
29316
+ const vectorStores = this.getVectorStoresApi(client);
29317
+ const knowledgeSourcesCount = knowledgeSources.length;
29318
+ const downloadTimeoutMs = this.getKnowledgeSourceDownloadTimeoutMs();
29319
+ if (this.options.isVerbose) {
29320
+ console.info('[🤰]', 'Creating vector store with knowledge sources', {
29321
+ name,
29322
+ knowledgeSourcesCount,
29323
+ downloadTimeoutMs,
29324
+ logLabel,
29325
+ });
29326
+ }
29327
+ const vectorStore = await vectorStores.create({
29328
+ name: `${name} Knowledge Base`,
29329
+ });
29330
+ const vectorStoreId = vectorStore.id;
29331
+ if (this.options.isVerbose) {
29332
+ console.info('[🤰]', 'Vector store created', {
29333
+ vectorStoreId,
29334
+ logLabel,
29335
+ });
29336
+ }
29337
+ const fileStreams = [];
29338
+ const skippedSources = [];
29339
+ let totalBytes = 0;
29340
+ const processingStartedAtMs = Date.now();
29341
+ for (const [index, source] of knowledgeSources.entries()) {
29342
+ try {
29343
+ const isDataUrl = isDataUrlKnowledgeSource(source);
29344
+ const isHttp = source.startsWith('http://') || source.startsWith('https://');
29345
+ const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
29346
+ if (this.options.isVerbose) {
29347
+ console.info('[🤰]', 'Processing knowledge source', {
29348
+ index: index + 1,
29349
+ total: knowledgeSourcesCount,
29350
+ source,
29351
+ sourceType,
29352
+ logLabel,
29353
+ });
29354
+ }
29355
+ if (isDataUrl) {
29356
+ const parsed = parseDataUrlKnowledgeSource(source);
29357
+ if (!parsed) {
29358
+ skippedSources.push({ source, reason: 'invalid_data_url' });
29359
+ if (this.options.isVerbose) {
29360
+ console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
29361
+ source,
29362
+ sourceType,
29363
+ logLabel,
29364
+ });
29365
+ }
29366
+ continue;
29367
+ }
29368
+ const dataUrlFile = new File([parsed.buffer], parsed.filename, {
29369
+ type: parsed.mimeType,
29370
+ });
29371
+ fileStreams.push(dataUrlFile);
29372
+ totalBytes += parsed.buffer.length;
29373
+ continue;
29374
+ }
29375
+ if (isHttp) {
29376
+ const downloadResult = await this.downloadKnowledgeSourceFile({
29377
+ source,
29378
+ timeoutMs: downloadTimeoutMs,
29379
+ logLabel,
29380
+ });
29381
+ if (downloadResult) {
29382
+ fileStreams.push(downloadResult.file);
29383
+ totalBytes += downloadResult.sizeBytes;
29384
+ }
29385
+ else {
29386
+ skippedSources.push({ source, reason: 'download_failed' });
29387
+ }
29388
+ }
29389
+ else {
29390
+ skippedSources.push({ source, reason: 'unsupported_source_type' });
29391
+ if (this.options.isVerbose) {
29392
+ console.info('[🤰]', 'Skipping knowledge source (unsupported type)', {
29393
+ source,
29394
+ sourceType,
29395
+ logLabel,
29396
+ });
29397
+ }
29398
+ /*
29399
+ TODO: [🤰] Resolve problem with browser environment
29400
+ // Assume it's a local file path
29401
+ // Note: This will work in Node.js environment
29402
+ // For browser environments, this would need different handling
29403
+ const fs = await import('fs');
29404
+ const fileStream = fs.createReadStream(source);
29405
+ fileStreams.push(fileStream);
29406
+ */
29407
+ }
29408
+ }
29409
+ catch (error) {
29410
+ assertsError(error);
29411
+ skippedSources.push({ source, reason: 'processing_error' });
29412
+ console.error('[🤰]', 'Error processing knowledge source', {
29413
+ source,
29414
+ logLabel,
29415
+ error: serializeError(error),
29416
+ });
29417
+ }
29418
+ }
29419
+ if (this.options.isVerbose) {
29420
+ console.info('[🤰]', 'Finished processing knowledge sources', {
29421
+ total: knowledgeSourcesCount,
29422
+ downloadedCount: fileStreams.length,
29423
+ skippedCount: skippedSources.length,
29424
+ totalBytes,
29425
+ elapsedMs: Date.now() - processingStartedAtMs,
29426
+ skippedSamples: skippedSources.slice(0, 3),
29427
+ logLabel,
29428
+ });
29429
+ }
29430
+ if (fileStreams.length > 0) {
29431
+ if (this.options.isVerbose) {
29432
+ console.info('[🤰]', 'Uploading files to vector store', {
29433
+ vectorStoreId,
29434
+ fileCount: fileStreams.length,
29435
+ totalBytes,
29436
+ maxConcurrency: this.getKnowledgeSourceUploadMaxConcurrency(),
29437
+ pollIntervalMs: this.getKnowledgeSourceUploadPollIntervalMs(),
29438
+ uploadTimeoutMs: this.getKnowledgeSourceUploadTimeoutMs(),
29439
+ logLabel,
29440
+ });
29441
+ }
29442
+ try {
29443
+ await this.uploadKnowledgeSourceFilesToVectorStore({
29444
+ client,
29445
+ vectorStoreId,
29446
+ files: fileStreams,
29447
+ totalBytes,
29448
+ logLabel,
29449
+ });
29450
+ }
29451
+ catch (error) {
29452
+ assertsError(error);
29453
+ console.error('[🤰]', 'Error uploading files to vector store', {
29454
+ vectorStoreId,
29455
+ logLabel,
29456
+ error: serializeError(error),
29457
+ });
29458
+ }
29459
+ }
29460
+ else if (this.options.isVerbose) {
29461
+ console.info('[🤰]', 'No knowledge source files to upload', {
29462
+ vectorStoreId,
29463
+ skippedCount: skippedSources.length,
29464
+ logLabel,
29465
+ });
29466
+ }
29467
+ return {
29468
+ vectorStoreId,
29469
+ uploadedFileCount: fileStreams.length,
29470
+ skippedCount: skippedSources.length,
29471
+ totalBytes,
29472
+ };
29473
+ }
29474
+ }
29475
+
28474
29476
  /**
28475
29477
  * Uploads files to OpenAI and returns their IDs
28476
29478
  *
@@ -28504,10 +29506,10 @@
28504
29506
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
28505
29507
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
28506
29508
  *
29509
+ * @deprecated Use `OpenAiAgentKitExecutionTools` instead.
28507
29510
  * @public exported from `@promptbook/openai`
28508
- * @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
28509
29511
  */
28510
- class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
29512
+ class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
28511
29513
  /**
28512
29514
  * Creates OpenAI Execution Tools.
28513
29515
  *
@@ -28636,8 +29638,7 @@
28636
29638
  console.info(colors__default["default"].bgWhite('rawRequest (non-streaming with tools)'), JSON.stringify(rawRequest, null, 4));
28637
29639
  }
28638
29640
  // Create thread and run
28639
- const threadAndRun = await client.beta.threads.createAndRun(rawRequest);
28640
- let run = threadAndRun;
29641
+ let run = (await client.beta.threads.createAndRun(rawRequest));
28641
29642
  const completedToolCalls = [];
28642
29643
  const toolCallStartedAt = new Map();
28643
29644
  // Poll until run completes or requires action
@@ -28732,14 +29733,14 @@
28732
29733
  }
28733
29734
  }
28734
29735
  // Submit tool outputs
28735
- run = await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
29736
+ run = (await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
28736
29737
  tool_outputs: toolOutputs,
28737
- });
29738
+ }));
28738
29739
  }
28739
29740
  else {
28740
29741
  // Wait a bit before polling again
28741
29742
  await new Promise((resolve) => setTimeout(resolve, 500));
28742
- run = await client.beta.threads.runs.retrieve(run.thread_id, run.id);
29743
+ run = (await client.beta.threads.runs.retrieve(run.thread_id, run.id));
28743
29744
  }
28744
29745
  }
28745
29746
  if (run.status !== 'completed') {
@@ -28938,6 +29939,7 @@
28938
29939
  getAssistant(assistantId) {
28939
29940
  return new OpenAiAssistantExecutionTools({
28940
29941
  ...this.options,
29942
+ isCreatingNewAssistantsAllowed: this.isCreatingNewAssistantsAllowed,
28941
29943
  assistantId,
28942
29944
  });
28943
29945
  }
@@ -28963,88 +29965,13 @@
28963
29965
  let vectorStoreId;
28964
29966
  // If knowledge sources are provided, create a vector store with them
28965
29967
  if (knowledgeSources && knowledgeSources.length > 0) {
28966
- if (this.options.isVerbose) {
28967
- console.info('[🤰]', 'Creating vector store with knowledge sources', {
28968
- name,
28969
- knowledgeSourcesCount,
28970
- });
28971
- }
28972
- // Create a vector store
28973
- const vectorStore = await client.beta.vectorStores.create({
28974
- name: `${name} Knowledge Base`,
29968
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
29969
+ client,
29970
+ name,
29971
+ knowledgeSources,
29972
+ logLabel: 'assistant creation',
28975
29973
  });
28976
- vectorStoreId = vectorStore.id;
28977
- if (this.options.isVerbose) {
28978
- console.info('[🤰]', 'Vector store created', {
28979
- vectorStoreId,
28980
- });
28981
- }
28982
- // Upload files from knowledge sources to the vector store
28983
- const fileStreams = [];
28984
- for (const [index, source] of knowledgeSources.entries()) {
28985
- try {
28986
- if (this.options.isVerbose) {
28987
- console.info('[🤰]', 'Processing knowledge source', {
28988
- index: index + 1,
28989
- total: knowledgeSources.length,
28990
- source,
28991
- sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
28992
- });
28993
- }
28994
- // Check if it's a URL
28995
- if (source.startsWith('http://') || source.startsWith('https://')) {
28996
- // Download the file
28997
- const response = await fetch(source);
28998
- if (!response.ok) {
28999
- console.error(`Failed to download ${source}: ${response.statusText}`);
29000
- continue;
29001
- }
29002
- const buffer = await response.arrayBuffer();
29003
- let filename = source.split('/').pop() || 'downloaded-file';
29004
- try {
29005
- const url = new URL(source);
29006
- filename = url.pathname.split('/').pop() || filename;
29007
- }
29008
- catch (error) {
29009
- // Keep default filename
29010
- }
29011
- const blob = new Blob([buffer]);
29012
- const file = new File([blob], filename);
29013
- fileStreams.push(file);
29014
- }
29015
- else {
29016
- /*
29017
- TODO: [🐱‍🚀] Resolve problem with browser environment
29018
- // Assume it's a local file path
29019
- // Note: This will work in Node.js environment
29020
- // For browser environments, this would need different handling
29021
- const fs = await import('fs');
29022
- const fileStream = fs.createReadStream(source);
29023
- fileStreams.push(fileStream);
29024
- */
29025
- }
29026
- }
29027
- catch (error) {
29028
- console.error(`Error processing knowledge source ${source}:`, error);
29029
- }
29030
- }
29031
- // Batch upload files to the vector store
29032
- if (fileStreams.length > 0) {
29033
- try {
29034
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
29035
- files: fileStreams,
29036
- });
29037
- if (this.options.isVerbose) {
29038
- console.info('[🤰]', 'Uploaded files to vector store', {
29039
- vectorStoreId,
29040
- fileCount: fileStreams.length,
29041
- });
29042
- }
29043
- }
29044
- catch (error) {
29045
- console.error('Error uploading files to vector store:', error);
29046
- }
29047
- }
29974
+ vectorStoreId = vectorStoreResult.vectorStoreId;
29048
29975
  }
29049
29976
  // Create assistant with vector store attached
29050
29977
  const assistantConfig = {
@@ -29111,91 +30038,14 @@
29111
30038
  const client = await this.getClient();
29112
30039
  let vectorStoreId;
29113
30040
  // If knowledge sources are provided, create a vector store with them
29114
- // TODO: [🧠] Reuse vector store creation logic from createNewAssistant
29115
30041
  if (knowledgeSources && knowledgeSources.length > 0) {
29116
- if (this.options.isVerbose) {
29117
- console.info('[🤰]', 'Creating vector store for assistant update', {
29118
- assistantId,
29119
- name,
29120
- knowledgeSourcesCount,
29121
- });
29122
- }
29123
- // Create a vector store
29124
- const vectorStore = await client.beta.vectorStores.create({
29125
- name: `${name} Knowledge Base`,
30042
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
30043
+ client,
30044
+ name: name !== null && name !== void 0 ? name : assistantId,
30045
+ knowledgeSources,
30046
+ logLabel: 'assistant update',
29126
30047
  });
29127
- vectorStoreId = vectorStore.id;
29128
- if (this.options.isVerbose) {
29129
- console.info('[🤰]', 'Vector store created for assistant update', {
29130
- vectorStoreId,
29131
- });
29132
- }
29133
- // Upload files from knowledge sources to the vector store
29134
- const fileStreams = [];
29135
- for (const [index, source] of knowledgeSources.entries()) {
29136
- try {
29137
- if (this.options.isVerbose) {
29138
- console.info('[🤰]', 'Processing knowledge source for update', {
29139
- index: index + 1,
29140
- total: knowledgeSources.length,
29141
- source,
29142
- sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
29143
- });
29144
- }
29145
- // Check if it's a URL
29146
- if (source.startsWith('http://') || source.startsWith('https://')) {
29147
- // Download the file
29148
- const response = await fetch(source);
29149
- if (!response.ok) {
29150
- console.error(`Failed to download ${source}: ${response.statusText}`);
29151
- continue;
29152
- }
29153
- const buffer = await response.arrayBuffer();
29154
- let filename = source.split('/').pop() || 'downloaded-file';
29155
- try {
29156
- const url = new URL(source);
29157
- filename = url.pathname.split('/').pop() || filename;
29158
- }
29159
- catch (error) {
29160
- // Keep default filename
29161
- }
29162
- const blob = new Blob([buffer]);
29163
- const file = new File([blob], filename);
29164
- fileStreams.push(file);
29165
- }
29166
- else {
29167
- /*
29168
- TODO: [🐱‍🚀] Resolve problem with browser environment
29169
- // Assume it's a local file path
29170
- // Note: This will work in Node.js environment
29171
- // For browser environments, this would need different handling
29172
- const fs = await import('fs');
29173
- const fileStream = fs.createReadStream(source);
29174
- fileStreams.push(fileStream);
29175
- */
29176
- }
29177
- }
29178
- catch (error) {
29179
- console.error(`Error processing knowledge source ${source}:`, error);
29180
- }
29181
- }
29182
- // Batch upload files to the vector store
29183
- if (fileStreams.length > 0) {
29184
- try {
29185
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
29186
- files: fileStreams,
29187
- });
29188
- if (this.options.isVerbose) {
29189
- console.info('[🤰]', 'Uploaded files to vector store for update', {
29190
- vectorStoreId,
29191
- fileCount: fileStreams.length,
29192
- });
29193
- }
29194
- }
29195
- catch (error) {
29196
- console.error('Error uploading files to vector store:', error);
29197
- }
29198
- }
30048
+ vectorStoreId = vectorStoreResult.vectorStoreId;
29199
30049
  }
29200
30050
  const assistantUpdate = {
29201
30051
  name,
@@ -29238,7 +30088,7 @@
29238
30088
  * Discriminant for type guards
29239
30089
  */
29240
30090
  get discriminant() {
29241
- return DISCRIMINANT;
30091
+ return DISCRIMINANT$1;
29242
30092
  }
29243
30093
  /**
29244
30094
  * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAssistantExecutionTools`
@@ -29246,7 +30096,7 @@
29246
30096
  * Note: This is useful when you can possibly have multiple versions of `@promptbook/openai` installed
29247
30097
  */
29248
30098
  static isOpenAiAssistantExecutionTools(llmExecutionTools) {
29249
- return llmExecutionTools.discriminant === DISCRIMINANT;
30099
+ return llmExecutionTools.discriminant === DISCRIMINANT$1;
29250
30100
  }
29251
30101
  }
29252
30102
  /**
@@ -29254,7 +30104,7 @@
29254
30104
  *
29255
30105
  * @private const of `OpenAiAssistantExecutionTools`
29256
30106
  */
29257
- const DISCRIMINANT = 'OPEN_AI_ASSISTANT_V1';
30107
+ const DISCRIMINANT$1 = 'OPEN_AI_ASSISTANT_V1';
29258
30108
  /**
29259
30109
  * TODO: !!!!! [✨🥚] Knowledge should work both with and without scrapers
29260
30110
  * TODO: [🙎] In `OpenAiAssistantExecutionTools` Allow to create abstract assistants with `isCreatingNewAssistantsAllowed`
@@ -30407,11 +31257,14 @@
30407
31257
  function createEmptyAgentModelRequirements() {
30408
31258
  return {
30409
31259
  systemMessage: '',
31260
+ promptSuffix: '',
30410
31261
  // modelName: 'gpt-5',
30411
31262
  modelName: 'gemini-2.5-flash-lite',
30412
31263
  temperature: 0.7,
30413
31264
  topP: 0.9,
30414
31265
  topK: 50,
31266
+ parentAgentUrl: null,
31267
+ isClosed: false,
30415
31268
  };
30416
31269
  }
30417
31270
  /**
@@ -30801,14 +31654,26 @@
30801
31654
  }
30802
31655
 
30803
31656
  /**
30804
- * Creates agent model requirements using the new commitment system
31657
+ * Creates agent model requirements using the new commitment system.
31658
+ *
30805
31659
  * This function uses a reduce-like pattern where each commitment applies its changes
30806
- * to build the final requirements starting from a basic empty model
31660
+ * to build the final requirements starting from a basic empty model.
30807
31661
  *
30808
- * @public exported from `@promptbook/core`
31662
+ * @param agentSource - Agent source book to parse.
31663
+ * @param modelName - Optional override for the agent model name.
31664
+ * @param options - Additional options such as the agent reference resolver.
31665
+ *
31666
+ * @private @@@
31667
+ */
31668
+ const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
31669
+ /**
31670
+ * @@@
31671
+ *
31672
+ * @private @@@
30809
31673
  */
30810
- async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
31674
+ async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
30811
31675
  var _a;
31676
+ const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
30812
31677
  // Parse the agent source to extract commitments
30813
31678
  const parseResult = parseAgentSourceWithCommitments(agentSource);
30814
31679
  // Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
@@ -30845,8 +31710,8 @@
30845
31710
  // Store the agent name in metadata so commitments can access it
30846
31711
  requirements = {
30847
31712
  ...requirements,
30848
- metadata: {
30849
- ...requirements.metadata,
31713
+ _metadata: {
31714
+ ...requirements._metadata,
30850
31715
  agentName: parseResult.agentName,
30851
31716
  },
30852
31717
  };
@@ -30860,6 +31725,11 @@
30860
31725
  // Apply each commitment in order using reduce-like pattern
30861
31726
  for (let i = 0; i < filteredCommitments.length; i++) {
30862
31727
  const commitment = filteredCommitments[i];
31728
+ const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
31729
+ let commitmentContent = commitment.content;
31730
+ if (isReferenceCommitment && agentReferenceResolver) {
31731
+ commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
31732
+ }
30863
31733
  // CLOSED commitment should work only if its the last commitment in the book
30864
31734
  if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
30865
31735
  continue;
@@ -30867,7 +31737,7 @@
30867
31737
  const definition = getCommitmentDefinition(commitment.type);
30868
31738
  if (definition) {
30869
31739
  try {
30870
- requirements = definition.applyToAgentModelRequirements(requirements, commitment.content);
31740
+ requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
30871
31741
  }
30872
31742
  catch (error) {
30873
31743
  console.warn(`Failed to apply commitment ${commitment.type}:`, error);
@@ -31326,23 +32196,28 @@
31326
32196
  */
31327
32197
 
31328
32198
  /**
31329
- * Creates model requirements for an agent based on its source
32199
+ * Creates model requirements for an agent based on its source.
31330
32200
  *
31331
32201
  * There are 2 similar functions:
31332
32202
  * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
31333
32203
  * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
31334
32204
  *
32205
+ * @param agentSource - Book describing the agent.
32206
+ * @param modelName - Optional override for the agent's model.
32207
+ * @param availableModels - Models that could fulfill the agent.
32208
+ * @param llmTools - Execution tools used when selecting a best model.
32209
+ * @param options - Optional hooks such as the agent reference resolver.
31335
32210
  * @public exported from `@promptbook/core`
31336
32211
  */
31337
- async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
32212
+ async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
31338
32213
  // If availableModels are provided and no specific modelName is given,
31339
32214
  // use preparePersona to select the best model
31340
32215
  if (availableModels && !modelName && llmTools) {
31341
32216
  const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
31342
- return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
32217
+ return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
31343
32218
  }
31344
32219
  // Use the new commitment-based system with provided or default model
31345
- return createAgentModelRequirementsWithCommitments(agentSource, modelName);
32220
+ return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
31346
32221
  }
31347
32222
  /**
31348
32223
  * Selects the best model using the preparePersona function
@@ -31840,6 +32715,40 @@
31840
32715
  return toolCall.name === ASSISTANT_PREPARATION_TOOL_CALL_NAME;
31841
32716
  }
31842
32717
 
32718
+ /**
32719
+ * Builds a stable identity string for tool calls across partial updates.
32720
+ *
32721
+ * @param toolCall - Tool call entry to identify.
32722
+ * @returns Stable identity string for deduplication.
32723
+ *
32724
+ * @private function of <Chat/>
32725
+ */
32726
+ function getToolCallIdentity(toolCall) {
32727
+ const rawToolCall = toolCall.rawToolCall;
32728
+ const rawId = (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.id) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.callId) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.call_id);
32729
+ if (rawId) {
32730
+ return `id:${rawId}`;
32731
+ }
32732
+ if (toolCall.createdAt) {
32733
+ return `time:${toolCall.createdAt}:${toolCall.name}`;
32734
+ }
32735
+ const argsKey = (() => {
32736
+ if (typeof toolCall.arguments === 'string') {
32737
+ return toolCall.arguments;
32738
+ }
32739
+ if (!toolCall.arguments) {
32740
+ return '';
32741
+ }
32742
+ try {
32743
+ return JSON.stringify(toolCall.arguments);
32744
+ }
32745
+ catch (_a) {
32746
+ return '';
32747
+ }
32748
+ })();
32749
+ return `fallback:${toolCall.name}:${argsKey}`;
32750
+ }
32751
+
31843
32752
  /*! *****************************************************************************
31844
32753
  Copyright (c) Microsoft Corporation.
31845
32754
 
@@ -32061,206 +32970,490 @@
32061
32970
  * TODO: [🧠][✌️] Make some Promptbook-native token system
32062
32971
  */
32063
32972
 
32973
+ const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
32974
+ const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
32975
+ /*
32976
+ TODO: Use or remove
32977
+ const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
32978
+ type: 'object',
32979
+ properties: {},
32980
+ required: [],
32981
+ additionalProperties: true,
32982
+ };
32983
+ */
32984
+ function buildJsonSchemaDefinition(jsonSchema) {
32985
+ var _a, _b, _c;
32986
+ const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
32987
+ return {
32988
+ type: 'json_schema',
32989
+ name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
32990
+ strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
32991
+ schema: {
32992
+ type: 'object',
32993
+ properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
32994
+ required: Array.isArray(schema.required) ? schema.required : [],
32995
+ additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
32996
+ description: schema.description,
32997
+ },
32998
+ };
32999
+ }
33000
+ /**
33001
+ * Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
33002
+ * structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
33003
+ *
33004
+ * @param responseFormat - The OpenAI `response_format` payload from the user request.
33005
+ * @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
33006
+ * @private utility of Open AI
33007
+ */
33008
+ function mapResponseFormatToAgentOutputType(responseFormat) {
33009
+ if (!responseFormat) {
33010
+ return undefined;
33011
+ }
33012
+ if (typeof responseFormat === 'string') {
33013
+ if (responseFormat === 'text') {
33014
+ return 'text';
33015
+ }
33016
+ if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
33017
+ return buildJsonSchemaDefinition();
33018
+ }
33019
+ return 'text';
33020
+ }
33021
+ switch (responseFormat.type) {
33022
+ case 'text':
33023
+ return 'text';
33024
+ case 'json_schema':
33025
+ return buildJsonSchemaDefinition(responseFormat.json_schema);
33026
+ case 'json_object':
33027
+ return buildJsonSchemaDefinition();
33028
+ default:
33029
+ return undefined;
33030
+ }
33031
+ }
32064
33032
  /**
32065
- * Execution Tools for calling OpenAI API using the Responses API (Agents)
33033
+ * Execution tools for OpenAI AgentKit (Agents SDK).
32066
33034
  *
32067
33035
  * @public exported from `@promptbook/openai`
32068
33036
  */
32069
- class OpenAiAgentExecutionTools extends OpenAiExecutionTools {
33037
+ class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
33038
+ /**
33039
+ * Creates OpenAI AgentKit execution tools.
33040
+ */
32070
33041
  constructor(options) {
33042
+ var _a;
33043
+ if (options.isProxied) {
33044
+ throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI AgentKit`);
33045
+ }
32071
33046
  super(options);
32072
- this.vectorStoreId = options.vectorStoreId;
33047
+ this.preparedAgentKitAgent = null;
33048
+ this.agentKitModelName = (_a = options.agentKitModelName) !== null && _a !== void 0 ? _a : DEFAULT_AGENT_KIT_MODEL_NAME;
32073
33049
  }
32074
33050
  get title() {
32075
- return 'OpenAI Agent';
33051
+ return 'OpenAI AgentKit';
32076
33052
  }
32077
33053
  get description() {
32078
- return 'Use OpenAI Responses API (Agentic)';
33054
+ return 'Use OpenAI AgentKit for agent-style chat with tools and knowledge';
32079
33055
  }
32080
33056
  /**
32081
- * Calls OpenAI API to use a chat model with streaming.
33057
+ * Calls OpenAI AgentKit with a chat prompt (non-streaming).
33058
+ */
33059
+ async callChatModel(prompt) {
33060
+ return this.callChatModelStream(prompt, () => { });
33061
+ }
33062
+ /**
33063
+ * Calls OpenAI AgentKit with a chat prompt (streaming).
32082
33064
  */
32083
33065
  async callChatModelStream(prompt, onProgress) {
32084
- if (this.options.isVerbose) {
32085
- console.info('💬 OpenAI Agent callChatModel call', { prompt });
32086
- }
32087
33066
  const { content, parameters, modelRequirements } = prompt;
32088
- const client = await this.getClient();
32089
33067
  if (modelRequirements.modelVariant !== 'CHAT') {
32090
33068
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
32091
33069
  }
33070
+ for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
33071
+ if (modelRequirements[key] !== undefined) {
33072
+ throw new NotYetImplementedError(`In \`OpenAiAgentKitExecutionTools\` you cannot specify \`${key}\``);
33073
+ }
33074
+ }
32092
33075
  const rawPromptContent = templateParameters(content, {
32093
33076
  ...parameters,
32094
- modelName: 'agent',
33077
+ modelName: this.agentKitModelName,
32095
33078
  });
32096
- // Build input items
32097
- const input = []; // TODO: Type properly when OpenAI types are updated
32098
- // Add previous messages from thread (if any)
32099
- if ('thread' in prompt && Array.isArray(prompt.thread)) {
32100
- const previousMessages = prompt.thread.map((msg) => ({
32101
- role: msg.sender === 'assistant' ? 'assistant' : 'user',
32102
- content: msg.content,
32103
- }));
32104
- input.push(...previousMessages);
32105
- }
32106
- // Add current user message
32107
- input.push({
32108
- role: 'user',
32109
- content: rawPromptContent,
33079
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
33080
+ const preparedAgentKitAgent = await this.prepareAgentKitAgent({
33081
+ name: (prompt.title || 'Agent'),
33082
+ instructions: modelRequirements.systemMessage || '',
33083
+ knowledgeSources: modelRequirements.knowledgeSources,
33084
+ tools: 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : modelRequirements.tools,
32110
33085
  });
32111
- // Prepare tools
32112
- const tools = modelRequirements.tools ? mapToolsToOpenAi(modelRequirements.tools) : undefined;
32113
- // Add file_search if vector store is present
32114
- const agentTools = tools ? [...tools] : [];
32115
- let toolResources = undefined;
32116
- if (this.vectorStoreId) {
32117
- agentTools.push({ type: 'file_search' });
32118
- toolResources = {
32119
- file_search: {
32120
- vector_store_ids: [this.vectorStoreId],
32121
- },
32122
- };
33086
+ return this.callChatModelStreamWithPreparedAgent({
33087
+ openAiAgentKitAgent: preparedAgentKitAgent.agent,
33088
+ prompt,
33089
+ rawPromptContent,
33090
+ onProgress,
33091
+ responseFormatOutputType,
33092
+ });
33093
+ }
33094
+ /**
33095
+ * Returns a prepared AgentKit agent when the server wants to manage caching externally.
33096
+ */
33097
+ getPreparedAgentKitAgent() {
33098
+ return this.preparedAgentKitAgent;
33099
+ }
33100
+ /**
33101
+ * Stores a prepared AgentKit agent for later reuse by external cache managers.
33102
+ */
33103
+ setPreparedAgentKitAgent(preparedAgent) {
33104
+ this.preparedAgentKitAgent = preparedAgent;
33105
+ }
33106
+ /**
33107
+ * Creates a new tools instance bound to a prepared AgentKit agent.
33108
+ */
33109
+ getPreparedAgentTools(preparedAgent) {
33110
+ const tools = new OpenAiAgentKitExecutionTools(this.agentKitOptions);
33111
+ tools.setPreparedAgentKitAgent(preparedAgent);
33112
+ return tools;
33113
+ }
33114
+ /**
33115
+ * Prepares an AgentKit agent with optional knowledge sources and tool definitions.
33116
+ */
33117
+ async prepareAgentKitAgent(options) {
33118
+ var _a, _b;
33119
+ const { name, instructions, knowledgeSources, tools, vectorStoreId: cachedVectorStoreId, storeAsPrepared, } = options;
33120
+ await this.ensureAgentKitDefaults();
33121
+ if (this.options.isVerbose) {
33122
+ console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
33123
+ name,
33124
+ instructionsLength: instructions.length,
33125
+ knowledgeSourcesCount: (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0,
33126
+ toolsCount: (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0,
33127
+ });
32123
33128
  }
32124
- // Add file_search also if knowledgeSources are present in the prompt (passed via AgentLlmExecutionTools)
32125
- if (modelRequirements.knowledgeSources &&
32126
- modelRequirements.knowledgeSources.length > 0 &&
32127
- !this.vectorStoreId) {
32128
- // Note: Vector store should have been created by AgentLlmExecutionTools and passed via options.
32129
- // If we are here, it means we have knowledge sources but no vector store ID.
32130
- // We can't easily create one here without persisting it.
32131
- console.warn('Knowledge sources provided but no vector store ID. Creating temporary vector store is not implemented in callChatModelStream.');
33129
+ let vectorStoreId = cachedVectorStoreId;
33130
+ if (!vectorStoreId && knowledgeSources && knowledgeSources.length > 0) {
33131
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
33132
+ client: await this.getClient(),
33133
+ name,
33134
+ knowledgeSources,
33135
+ logLabel: 'agentkit preparation',
33136
+ });
33137
+ vectorStoreId = vectorStoreResult.vectorStoreId;
32132
33138
  }
32133
- const start = $getCurrentDate();
32134
- // Construct the request
32135
- const rawRequest = {
32136
- // TODO: Type properly as OpenAI.Responses.CreateResponseParams
32137
- model: modelRequirements.modelName || 'gpt-4o',
32138
- input,
32139
- instructions: modelRequirements.systemMessage,
32140
- tools: agentTools.length > 0 ? agentTools : undefined,
32141
- tool_resources: toolResources,
32142
- store: false, // Stateless by default as we pass full history
33139
+ else if (vectorStoreId && this.options.isVerbose) {
33140
+ console.info('[🤰]', 'Using cached vector store for AgentKit agent', {
33141
+ name,
33142
+ vectorStoreId,
33143
+ });
33144
+ }
33145
+ const agentKitTools = this.buildAgentKitTools({ tools, vectorStoreId });
33146
+ const openAiAgentKitAgent = new agents.Agent({
33147
+ name,
33148
+ model: this.agentKitModelName,
33149
+ instructions: instructions || 'You are a helpful assistant.',
33150
+ tools: agentKitTools,
33151
+ });
33152
+ const preparedAgent = {
33153
+ agent: openAiAgentKitAgent,
33154
+ vectorStoreId,
32143
33155
  };
32144
- if (this.options.isVerbose) {
32145
- console.info(colors__default["default"].bgWhite('rawRequest (Responses API)'), JSON.stringify(rawRequest, null, 4));
33156
+ if (storeAsPrepared) {
33157
+ this.setPreparedAgentKitAgent(preparedAgent);
32146
33158
  }
32147
- // Call Responses API
32148
- // Note: Using any cast because types might not be updated yet
32149
- const response = await client.responses.create(rawRequest);
32150
33159
  if (this.options.isVerbose) {
32151
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(response, null, 4));
33160
+ console.info('[🤰]', 'OpenAI AgentKit agent ready', {
33161
+ name,
33162
+ model: this.agentKitModelName,
33163
+ toolCount: agentKitTools.length,
33164
+ hasVectorStore: Boolean(vectorStoreId),
33165
+ });
32152
33166
  }
32153
- const complete = $getCurrentDate();
32154
- let resultContent = '';
32155
- const toolCalls = [];
32156
- // Parse output items
32157
- if (response.output) {
32158
- for (const item of response.output) {
32159
- if (item.type === 'message' && item.role === 'assistant') {
32160
- for (const contentPart of item.content) {
32161
- if (contentPart.type === 'output_text') {
32162
- // "output_text" based on migration guide, or "text"? Guide says "output_text" in example.
32163
- resultContent += contentPart.text;
33167
+ return preparedAgent;
33168
+ }
33169
+ /**
33170
+ * Ensures the AgentKit SDK is wired to the OpenAI client and API key.
33171
+ */
33172
+ async ensureAgentKitDefaults() {
33173
+ const client = await this.getClient();
33174
+ agents.setDefaultOpenAIClient(client);
33175
+ const apiKey = this.agentKitOptions.apiKey;
33176
+ if (apiKey && typeof apiKey === 'string') {
33177
+ agents.setDefaultOpenAIKey(apiKey);
33178
+ }
33179
+ }
33180
+ /**
33181
+ * Builds the tool list for AgentKit, including hosted file search when applicable.
33182
+ */
33183
+ buildAgentKitTools(options) {
33184
+ var _a;
33185
+ const { tools, vectorStoreId } = options;
33186
+ const agentKitTools = [];
33187
+ if (vectorStoreId) {
33188
+ agentKitTools.push(agents.fileSearchTool(vectorStoreId));
33189
+ }
33190
+ if (tools && tools.length > 0) {
33191
+ const scriptTools = this.resolveScriptTools();
33192
+ for (const toolDefinition of tools) {
33193
+ agentKitTools.push(agents.tool({
33194
+ name: toolDefinition.name,
33195
+ description: toolDefinition.description,
33196
+ parameters: toolDefinition.parameters
33197
+ ? {
33198
+ ...toolDefinition.parameters,
33199
+ additionalProperties: false,
33200
+ required: (_a = toolDefinition.parameters.required) !== null && _a !== void 0 ? _a : [],
33201
+ }
33202
+ : undefined,
33203
+ strict: false,
33204
+ execute: async (input, runContext, details) => {
33205
+ var _a, _b, _c;
33206
+ const scriptTool = scriptTools[0];
33207
+ const functionName = toolDefinition.name;
33208
+ const calledAt = $getCurrentDate();
33209
+ const callId = (_a = details === null || details === void 0 ? void 0 : details.toolCall) === null || _a === void 0 ? void 0 : _a.callId;
33210
+ const functionArgs = input !== null && input !== void 0 ? input : {};
33211
+ if (this.options.isVerbose) {
33212
+ console.info('[🤰]', 'Executing AgentKit tool', {
33213
+ functionName,
33214
+ callId,
33215
+ calledAt,
33216
+ });
33217
+ }
33218
+ try {
33219
+ return await scriptTool.execute({
33220
+ scriptLanguage: 'javascript',
33221
+ script: `
33222
+ const args = ${JSON.stringify(functionArgs)};
33223
+ return await ${functionName}(args);
33224
+ `,
33225
+ parameters: (_c = (_b = runContext === null || runContext === void 0 ? void 0 : runContext.context) === null || _b === void 0 ? void 0 : _b.parameters) !== null && _c !== void 0 ? _c : {},
33226
+ });
32164
33227
  }
32165
- else if (contentPart.type === 'text') {
32166
- resultContent += contentPart.text.value || contentPart.text;
33228
+ catch (error) {
33229
+ assertsError(error);
33230
+ const serializedError = serializeError(error);
33231
+ const errorMessage = spaceTrim__default["default"]((block) => `
33232
+
33233
+ The invoked tool \`${functionName}\` failed with error:
33234
+
33235
+ \`\`\`json
33236
+ ${block(JSON.stringify(serializedError, null, 4))}
33237
+ \`\`\`
33238
+
33239
+ `);
33240
+ console.error('[🤰]', 'AgentKit tool execution failed', {
33241
+ functionName,
33242
+ callId,
33243
+ error: serializedError,
33244
+ });
33245
+ return errorMessage;
32167
33246
  }
33247
+ },
33248
+ }));
33249
+ }
33250
+ }
33251
+ return agentKitTools;
33252
+ }
33253
+ /**
33254
+ * Resolves the configured script tools for tool execution.
33255
+ */
33256
+ resolveScriptTools() {
33257
+ const executionTools = this.options.executionTools;
33258
+ if (!executionTools || !executionTools.script) {
33259
+ throw new PipelineExecutionError(`Model requested tools but no executionTools.script were provided in OpenAiAgentKitExecutionTools options`);
33260
+ }
33261
+ return Array.isArray(executionTools.script) ? executionTools.script : [executionTools.script];
33262
+ }
33263
+ /**
33264
+ * Runs a prepared AgentKit agent and streams results back to the caller.
33265
+ */
33266
+ async callChatModelStreamWithPreparedAgent(options) {
33267
+ var _a, _b, _c, _d;
33268
+ const { openAiAgentKitAgent, prompt, onProgress } = options;
33269
+ const rawPromptContent = (_a = options.rawPromptContent) !== null && _a !== void 0 ? _a : templateParameters(prompt.content, {
33270
+ ...prompt.parameters,
33271
+ modelName: this.agentKitModelName,
33272
+ });
33273
+ const agentForRun = options.responseFormatOutputType !== undefined
33274
+ ? openAiAgentKitAgent.clone({
33275
+ outputType: options.responseFormatOutputType,
33276
+ })
33277
+ : openAiAgentKitAgent;
33278
+ const start = $getCurrentDate();
33279
+ let latestContent = '';
33280
+ const toolCalls = [];
33281
+ const toolCallIndexById = new Map();
33282
+ const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
33283
+ const rawRequest = {
33284
+ agentName: agentForRun.name,
33285
+ input: inputItems,
33286
+ };
33287
+ const streamResult = await agents.run(agentForRun, inputItems, {
33288
+ stream: true,
33289
+ context: { parameters: prompt.parameters },
33290
+ });
33291
+ for await (const event of streamResult) {
33292
+ if (event.type === 'raw_model_stream_event' && ((_b = event.data) === null || _b === void 0 ? void 0 : _b.type) === 'output_text_delta') {
33293
+ latestContent += event.data.delta;
33294
+ onProgress({
33295
+ content: latestContent,
33296
+ modelName: this.agentKitModelName,
33297
+ timing: { start, complete: $getCurrentDate() },
33298
+ usage: UNCERTAIN_USAGE,
33299
+ rawPromptContent: rawPromptContent,
33300
+ rawRequest: null,
33301
+ rawResponse: {},
33302
+ });
33303
+ continue;
33304
+ }
33305
+ if (event.type === 'run_item_stream_event') {
33306
+ const rawItem = (_c = event.item) === null || _c === void 0 ? void 0 : _c.rawItem;
33307
+ if (event.name === 'tool_called' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call') {
33308
+ const toolCall = {
33309
+ name: rawItem.name,
33310
+ arguments: rawItem.arguments,
33311
+ rawToolCall: rawItem,
33312
+ createdAt: $getCurrentDate(),
33313
+ };
33314
+ toolCallIndexById.set(rawItem.callId, toolCalls.length);
33315
+ toolCalls.push(toolCall);
33316
+ onProgress({
33317
+ content: latestContent,
33318
+ modelName: this.agentKitModelName,
33319
+ timing: { start, complete: $getCurrentDate() },
33320
+ usage: UNCERTAIN_USAGE,
33321
+ rawPromptContent: rawPromptContent,
33322
+ rawRequest: null,
33323
+ rawResponse: {},
33324
+ toolCalls: [toolCall],
33325
+ });
33326
+ }
33327
+ if (event.name === 'tool_output' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call_result') {
33328
+ const index = toolCallIndexById.get(rawItem.callId);
33329
+ const result = this.formatAgentKitToolOutput(rawItem.output);
33330
+ if (index !== undefined) {
33331
+ const existingToolCall = toolCalls[index];
33332
+ const completedToolCall = {
33333
+ ...existingToolCall,
33334
+ result,
33335
+ rawToolCall: rawItem,
33336
+ };
33337
+ toolCalls[index] = completedToolCall;
33338
+ onProgress({
33339
+ content: latestContent,
33340
+ modelName: this.agentKitModelName,
33341
+ timing: { start, complete: $getCurrentDate() },
33342
+ usage: UNCERTAIN_USAGE,
33343
+ rawPromptContent: rawPromptContent,
33344
+ rawRequest: null,
33345
+ rawResponse: {},
33346
+ toolCalls: [completedToolCall],
33347
+ });
32168
33348
  }
32169
33349
  }
32170
- else if (item.type === 'function_call') ;
32171
33350
  }
32172
33351
  }
32173
- // Use output_text helper if available (mentioned in guide)
32174
- if (response.output_text) {
32175
- resultContent = response.output_text;
32176
- }
32177
- // TODO: Handle tool calls properly (Requires clearer docs or experimentation)
32178
- onProgress({
32179
- content: resultContent,
32180
- modelName: response.model || 'agent',
33352
+ await streamResult.completed;
33353
+ const complete = $getCurrentDate();
33354
+ const finalContent = ((_d = streamResult.finalOutput) !== null && _d !== void 0 ? _d : latestContent);
33355
+ const finalResult = {
33356
+ content: finalContent,
33357
+ modelName: this.agentKitModelName,
32181
33358
  timing: { start, complete },
32182
33359
  usage: UNCERTAIN_USAGE,
32183
- rawPromptContent,
33360
+ rawPromptContent: rawPromptContent,
32184
33361
  rawRequest,
32185
- rawResponse: response,
32186
- });
32187
- return exportJson({
32188
- name: 'promptResult',
32189
- message: `Result of \`OpenAiAgentExecutionTools.callChatModelStream\``,
32190
- order: [],
32191
- value: {
32192
- content: resultContent,
32193
- modelName: response.model || 'agent',
32194
- timing: { start, complete },
32195
- usage: UNCERTAIN_USAGE,
32196
- rawPromptContent,
32197
- rawRequest,
32198
- rawResponse: response,
32199
- toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
32200
- },
32201
- });
33362
+ rawResponse: { runResult: streamResult },
33363
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
33364
+ };
33365
+ onProgress(finalResult);
33366
+ return finalResult;
32202
33367
  }
32203
33368
  /**
32204
- * Creates a vector store from knowledge sources
33369
+ * Builds AgentKit input items from the prompt and optional thread.
32205
33370
  */
32206
- static async createVectorStore(client, name, knowledgeSources) {
32207
- // Create a vector store
32208
- const vectorStore = await client.beta.vectorStores.create({
32209
- name: `${name} Knowledge Base`,
32210
- });
32211
- const vectorStoreId = vectorStore.id;
32212
- // Upload files from knowledge sources to the vector store
32213
- const fileStreams = [];
32214
- for (const source of knowledgeSources) {
32215
- try {
32216
- // Check if it's a URL
32217
- if (source.startsWith('http://') || source.startsWith('https://')) {
32218
- // Download the file
32219
- const response = await fetch(source);
32220
- if (!response.ok) {
32221
- console.error(`Failed to download ${source}: ${response.statusText}`);
32222
- continue;
32223
- }
32224
- const buffer = await response.arrayBuffer();
32225
- const filename = source.split('/').pop() || 'downloaded-file';
32226
- const blob = new Blob([buffer]);
32227
- const file = new File([blob], filename);
32228
- fileStreams.push(file);
33371
+ async buildAgentKitInputItems(prompt, rawPromptContent) {
33372
+ var _a;
33373
+ const inputItems = [];
33374
+ if ('thread' in prompt && Array.isArray(prompt.thread)) {
33375
+ for (const message of prompt.thread) {
33376
+ const sender = message.sender;
33377
+ const content = (_a = message.content) !== null && _a !== void 0 ? _a : '';
33378
+ if (sender === 'assistant' || sender === 'agent') {
33379
+ inputItems.push({
33380
+ role: 'assistant',
33381
+ status: 'completed',
33382
+ content: [{ type: 'output_text', text: content }],
33383
+ });
32229
33384
  }
32230
33385
  else {
32231
- // Local files not supported in browser env easily, same as before
33386
+ inputItems.push({
33387
+ role: 'user',
33388
+ content,
33389
+ });
32232
33390
  }
32233
33391
  }
32234
- catch (error) {
32235
- console.error(`Error processing knowledge source ${source}:`, error);
32236
- }
32237
33392
  }
32238
- // Batch upload files to the vector store
32239
- if (fileStreams.length > 0) {
32240
- try {
32241
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
32242
- files: fileStreams,
32243
- });
32244
- }
32245
- catch (error) {
32246
- console.error('Error uploading files to vector store:', error);
33393
+ const userContent = await this.buildAgentKitUserContent(prompt, rawPromptContent);
33394
+ inputItems.push({
33395
+ role: 'user',
33396
+ content: userContent,
33397
+ });
33398
+ return inputItems;
33399
+ }
33400
+ /**
33401
+ * Builds the user message content for AgentKit runs, including file inputs when provided.
33402
+ */
33403
+ async buildAgentKitUserContent(prompt, rawPromptContent) {
33404
+ if ('files' in prompt && Array.isArray(prompt.files) && prompt.files.length > 0) {
33405
+ const fileItems = await Promise.all(prompt.files.map(async (file) => {
33406
+ const arrayBuffer = await file.arrayBuffer();
33407
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
33408
+ return {
33409
+ type: 'input_image',
33410
+ image: `data:${file.type};base64,${base64}`,
33411
+ };
33412
+ }));
33413
+ return [{ type: 'input_text', text: rawPromptContent }, ...fileItems];
33414
+ }
33415
+ return rawPromptContent;
33416
+ }
33417
+ /**
33418
+ * Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
33419
+ */
33420
+ formatAgentKitToolOutput(output) {
33421
+ if (typeof output === 'string') {
33422
+ return output;
33423
+ }
33424
+ if (output && typeof output === 'object') {
33425
+ const textOutput = output;
33426
+ if (textOutput.type === 'text' && typeof textOutput.text === 'string') {
33427
+ return textOutput.text;
32247
33428
  }
32248
33429
  }
32249
- return vectorStoreId;
33430
+ return JSON.stringify(output !== null && output !== void 0 ? output : null);
32250
33431
  }
32251
33432
  /**
32252
- * Discriminant for type guards
33433
+ * Returns AgentKit-specific options.
33434
+ */
33435
+ get agentKitOptions() {
33436
+ return this.options;
33437
+ }
33438
+ /**
33439
+ * Discriminant for type guards.
32253
33440
  */
32254
33441
  get discriminant() {
32255
- return 'OPEN_AI_AGENT';
33442
+ return DISCRIMINANT;
32256
33443
  }
32257
33444
  /**
32258
- * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentExecutionTools`
33445
+ * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
32259
33446
  */
32260
- static isOpenAiAgentExecutionTools(llmExecutionTools) {
32261
- return llmExecutionTools.discriminant === 'OPEN_AI_AGENT';
33447
+ static isOpenAiAgentKitExecutionTools(llmExecutionTools) {
33448
+ return llmExecutionTools.discriminant === DISCRIMINANT;
32262
33449
  }
32263
33450
  }
33451
+ /**
33452
+ * Discriminant for type guards.
33453
+ *
33454
+ * @private const of `OpenAiAgentKitExecutionTools`
33455
+ */
33456
+ const DISCRIMINANT = 'OPEN_AI_AGENT_KIT_V1';
32264
33457
 
32265
33458
  /**
32266
33459
  * Emits a progress update to signal assistant preparation before long setup work.
@@ -32297,8 +33490,8 @@
32297
33490
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
32298
33491
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
32299
33492
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
32300
- * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
32301
33493
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
33494
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
32302
33495
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
32303
33496
  *
32304
33497
  * @public exported from `@promptbook/core`
@@ -32433,97 +33626,129 @@
32433
33626
  * Calls the chat model with agent-specific system prompt and requirements with streaming
32434
33627
  */
32435
33628
  async callChatModelStream(prompt, onProgress) {
33629
+ var _a, _b;
32436
33630
  // Ensure we're working with a chat prompt
32437
33631
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
32438
33632
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
32439
33633
  }
32440
33634
  const modelRequirements = await this.getModelRequirements();
33635
+ const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
32441
33636
  const chatPrompt = prompt;
32442
33637
  let underlyingLlmResult;
32443
- // Create modified chat prompt with agent system message
33638
+ const chatPromptContentWithSuffix = promptSuffix
33639
+ ? `${chatPrompt.content}\n\n${promptSuffix}`
33640
+ : chatPrompt.content;
32444
33641
  const promptWithAgentModelRequirements = {
32445
33642
  ...chatPrompt,
33643
+ content: chatPromptContentWithSuffix,
32446
33644
  modelRequirements: {
32447
33645
  ...chatPrompt.modelRequirements,
32448
- ...modelRequirements,
33646
+ ...sanitizedRequirements,
32449
33647
  // Spread tools to convert readonly array to mutable
32450
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
33648
+ tools: sanitizedRequirements.tools
33649
+ ? [...sanitizedRequirements.tools]
33650
+ : chatPrompt.modelRequirements.tools,
32451
33651
  // Spread knowledgeSources to convert readonly array to mutable
32452
- knowledgeSources: modelRequirements.knowledgeSources
32453
- ? [...modelRequirements.knowledgeSources]
33652
+ knowledgeSources: sanitizedRequirements.knowledgeSources
33653
+ ? [...sanitizedRequirements.knowledgeSources]
32454
33654
  : undefined,
32455
33655
  // Prepend agent system message to existing system message
32456
- systemMessage: modelRequirements.systemMessage +
33656
+ systemMessage: sanitizedRequirements.systemMessage +
32457
33657
  (chatPrompt.modelRequirements.systemMessage
32458
33658
  ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
32459
33659
  : ''),
32460
33660
  }, // Cast to avoid readonly mismatch from spread
32461
33661
  };
32462
33662
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
32463
- if (OpenAiAgentExecutionTools.isOpenAiAgentExecutionTools(this.options.llmTools)) {
32464
- const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
32465
- const cached = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
32466
- let agentTools;
32467
- if (cached && cached.requirementsHash === requirementsHash) {
33663
+ if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
33664
+ const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
33665
+ const vectorStoreHash = cryptoJs.SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
33666
+ const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
33667
+ const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
33668
+ let preparedAgentKit = this.options.assistantPreparationMode === 'external'
33669
+ ? this.options.llmTools.getPreparedAgentKitAgent()
33670
+ : null;
33671
+ const vectorStoreId = (preparedAgentKit === null || preparedAgentKit === void 0 ? void 0 : preparedAgentKit.vectorStoreId) ||
33672
+ (cachedVectorStore && cachedVectorStore.requirementsHash === vectorStoreHash
33673
+ ? cachedVectorStore.vectorStoreId
33674
+ : undefined);
33675
+ if (!preparedAgentKit && cachedAgentKit && cachedAgentKit.requirementsHash === requirementsHash) {
32468
33676
  if (this.options.isVerbose) {
32469
- console.log(`1️⃣ Using cached OpenAI Agent Vector Store for agent ${this.title}...`);
33677
+ console.info('[🤰]', 'Using cached OpenAI AgentKit agent', {
33678
+ agent: this.title,
33679
+ });
32470
33680
  }
32471
- // Create new instance with cached vectorStoreId
32472
- // We need to access options from the original tool.
32473
- // We assume isOpenAiAgentExecutionTools implies it has options we can clone.
32474
- // But protected options are not accessible.
32475
- // We can cast to access options if they were public, or use a method to clone.
32476
- // OpenAiAgentExecutionTools doesn't have a clone method.
32477
- // However, we can just assume the passed tool *might* not have the vector store yet, or we are replacing it.
32478
- // Actually, if the passed tool IS OpenAiAgentExecutionTools, we should use it as a base.
32479
- // TODO: [🧠] This is a bit hacky, accessing protected options or recreating tools.
32480
- // Ideally OpenAiAgentExecutionTools should have a method `withVectorStoreId`.
32481
- agentTools = new OpenAiAgentExecutionTools({
32482
- ...this.options.llmTools.options,
32483
- vectorStoreId: cached.vectorStoreId,
32484
- });
33681
+ preparedAgentKit = {
33682
+ agent: cachedAgentKit.agent,
33683
+ vectorStoreId: cachedAgentKit.vectorStoreId,
33684
+ };
32485
33685
  }
32486
- else {
33686
+ if (!preparedAgentKit) {
32487
33687
  if (this.options.isVerbose) {
32488
- console.log(`1️⃣ Creating/Updating OpenAI Agent Vector Store for agent ${this.title}...`);
32489
- }
32490
- let vectorStoreId;
32491
- if (modelRequirements.knowledgeSources && modelRequirements.knowledgeSources.length > 0) {
32492
- const client = await this.options.llmTools.getClient();
32493
- vectorStoreId = await OpenAiAgentExecutionTools.createVectorStore(client, this.title, modelRequirements.knowledgeSources);
33688
+ console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
33689
+ agent: this.title,
33690
+ });
32494
33691
  }
32495
- if (vectorStoreId) {
32496
- AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
32497
- vectorStoreId,
32498
- requirementsHash,
33692
+ if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
33693
+ emitAssistantPreparationProgress({
33694
+ onProgress,
33695
+ prompt,
33696
+ modelName: this.modelName,
33697
+ phase: 'Creating knowledge base',
32499
33698
  });
32500
33699
  }
32501
- agentTools = new OpenAiAgentExecutionTools({
32502
- ...this.options.llmTools.options,
33700
+ emitAssistantPreparationProgress({
33701
+ onProgress,
33702
+ prompt,
33703
+ modelName: this.modelName,
33704
+ phase: 'Preparing AgentKit agent',
33705
+ });
33706
+ preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
33707
+ name: this.title,
33708
+ instructions: sanitizedRequirements.systemMessage || '',
33709
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
33710
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
32503
33711
  vectorStoreId,
32504
33712
  });
32505
33713
  }
32506
- // Create modified chat prompt with agent system message specific to OpenAI Agent
32507
- // Note: Unlike Assistants API, Responses API expects instructions (system message) to be passed in the call.
32508
- // So we use promptWithAgentModelRequirements which has the system message prepended.
32509
- // But we need to make sure we pass knowledgeSources in modelRequirements so OpenAiAgentExecutionTools can fallback to warning if vectorStoreId is missing (though we just handled it).
32510
- const promptForAgent = {
32511
- ...promptWithAgentModelRequirements,
32512
- modelRequirements: {
32513
- ...promptWithAgentModelRequirements.modelRequirements,
32514
- knowledgeSources: modelRequirements.knowledgeSources
32515
- ? [...modelRequirements.knowledgeSources]
32516
- : undefined, // Pass knowledge sources explicitly
32517
- },
32518
- };
32519
- underlyingLlmResult = await agentTools.callChatModelStream(promptForAgent, onProgress);
33714
+ if (preparedAgentKit.vectorStoreId) {
33715
+ AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
33716
+ vectorStoreId: preparedAgentKit.vectorStoreId,
33717
+ requirementsHash: vectorStoreHash,
33718
+ });
33719
+ }
33720
+ AgentLlmExecutionTools.agentKitAgentCache.set(this.title, {
33721
+ agent: preparedAgentKit.agent,
33722
+ requirementsHash,
33723
+ vectorStoreId: preparedAgentKit.vectorStoreId,
33724
+ });
33725
+ const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
33726
+ underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
33727
+ openAiAgentKitAgent: preparedAgentKit.agent,
33728
+ prompt: promptWithAgentModelRequirements,
33729
+ onProgress,
33730
+ responseFormatOutputType,
33731
+ });
32520
33732
  }
32521
33733
  else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
32522
33734
  // ... deprecated path ...
32523
- const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
33735
+ const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
32524
33736
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
32525
33737
  let assistant;
32526
- if (cached) {
33738
+ if (this.options.assistantPreparationMode === 'external') {
33739
+ assistant = this.options.llmTools;
33740
+ if (this.options.isVerbose) {
33741
+ console.info('[🤰]', 'Using externally managed OpenAI Assistant', {
33742
+ agent: this.title,
33743
+ assistantId: assistant.assistantId,
33744
+ });
33745
+ }
33746
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
33747
+ assistantId: assistant.assistantId,
33748
+ requirementsHash,
33749
+ });
33750
+ }
33751
+ else if (cached) {
32527
33752
  if (cached.requirementsHash === requirementsHash) {
32528
33753
  if (this.options.isVerbose) {
32529
33754
  console.info('[🤰]', 'Using cached OpenAI Assistant', {
@@ -32549,9 +33774,9 @@
32549
33774
  assistant = await this.options.llmTools.updateAssistant({
32550
33775
  assistantId: cached.assistantId,
32551
33776
  name: this.title,
32552
- instructions: modelRequirements.systemMessage,
32553
- knowledgeSources: modelRequirements.knowledgeSources,
32554
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
33777
+ instructions: sanitizedRequirements.systemMessage,
33778
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
33779
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
32555
33780
  });
32556
33781
  AgentLlmExecutionTools.assistantCache.set(this.title, {
32557
33782
  assistantId: assistant.assistantId,
@@ -32574,9 +33799,9 @@
32574
33799
  });
32575
33800
  assistant = await this.options.llmTools.createNewAssistant({
32576
33801
  name: this.title,
32577
- instructions: modelRequirements.systemMessage,
32578
- knowledgeSources: modelRequirements.knowledgeSources,
32579
- tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
33802
+ instructions: sanitizedRequirements.systemMessage,
33803
+ knowledgeSources: sanitizedRequirements.knowledgeSources,
33804
+ tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
32580
33805
  /*
32581
33806
  !!!
32582
33807
  metadata: {
@@ -32618,18 +33843,28 @@
32618
33843
  }
32619
33844
  }
32620
33845
  let content = underlyingLlmResult.content;
32621
- // Note: Cleanup the AI artifacts from the content
32622
- content = humanizeAiText(content);
32623
- // Note: Make sure the content is Promptbook-like
32624
- content = promptbookifyAiText(content);
33846
+ if (typeof content === 'string') {
33847
+ // Note: Cleanup the AI artifacts from the content
33848
+ content = humanizeAiText(content);
33849
+ // Note: Make sure the content is Promptbook-like
33850
+ content = promptbookifyAiText(content);
33851
+ }
33852
+ else {
33853
+ // TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
33854
+ content = JSON.stringify(content);
33855
+ }
32625
33856
  const agentResult = {
32626
33857
  ...underlyingLlmResult,
32627
- content,
33858
+ content: content,
32628
33859
  modelName: this.modelName,
32629
33860
  };
32630
33861
  return agentResult;
32631
33862
  }
32632
33863
  }
33864
+ /**
33865
+ * Cached AgentKit agents to avoid rebuilding identical instances.
33866
+ */
33867
+ AgentLlmExecutionTools.agentKitAgentCache = new Map();
32633
33868
  /**
32634
33869
  * Cache of OpenAI assistants to avoid creating duplicates
32635
33870
  */
@@ -32710,8 +33945,8 @@
32710
33945
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
32711
33946
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
32712
33947
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
32713
- * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
32714
33948
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
33949
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
32715
33950
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
32716
33951
  *
32717
33952
  * @public exported from `@promptbook/core`
@@ -32742,6 +33977,7 @@
32742
33977
  super({
32743
33978
  isVerbose: options.isVerbose,
32744
33979
  llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
33980
+ assistantPreparationMode: options.assistantPreparationMode,
32745
33981
  agentSource: agentSource.value, // <- TODO: [🐱‍🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
32746
33982
  });
32747
33983
  _Agent_instances.add(this);
@@ -32808,7 +34044,6 @@
32808
34044
  * Note: This method also implements the learning mechanism
32809
34045
  */
32810
34046
  async callChatModelStream(prompt, onProgress) {
32811
- var _a;
32812
34047
  // [1] Check if the user is asking the same thing as in the samples
32813
34048
  const modelRequirements = await this.getModelRequirements();
32814
34049
  if (modelRequirements.samples) {
@@ -32856,7 +34091,7 @@
32856
34091
  if (result.rawResponse && 'sample' in result.rawResponse) {
32857
34092
  return result;
32858
34093
  }
32859
- if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
34094
+ if (modelRequirements.isClosed) {
32860
34095
  return result;
32861
34096
  }
32862
34097
  // Note: [0] Notify start of self-learning
@@ -33017,6 +34252,63 @@
33017
34252
  * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
33018
34253
  */
33019
34254
 
34255
+ /**
34256
+ * Resolve a remote META IMAGE value into an absolute URL when possible.
34257
+ */
34258
+ function resolveRemoteImageUrl(imageUrl, agentUrl) {
34259
+ if (!imageUrl) {
34260
+ return undefined;
34261
+ }
34262
+ if (imageUrl.startsWith('http://') ||
34263
+ imageUrl.startsWith('https://') ||
34264
+ imageUrl.startsWith('data:') ||
34265
+ imageUrl.startsWith('blob:')) {
34266
+ return imageUrl;
34267
+ }
34268
+ try {
34269
+ return new URL(imageUrl, agentUrl).href;
34270
+ }
34271
+ catch (_a) {
34272
+ return imageUrl;
34273
+ }
34274
+ }
34275
+ /**
34276
+ * Format a META commitment line when the value is provided.
34277
+ */
34278
+ function formatMetaLine(label, value) {
34279
+ if (!value) {
34280
+ return null;
34281
+ }
34282
+ return `META ${label} ${value}`;
34283
+ }
34284
+ /**
34285
+ * Build a minimal agent source snapshot for remote agents.
34286
+ */
34287
+ function buildRemoteAgentSource(profile, meta) {
34288
+ const metaLines = [
34289
+ formatMetaLine('FULLNAME', meta === null || meta === void 0 ? void 0 : meta.fullname),
34290
+ formatMetaLine('IMAGE', meta === null || meta === void 0 ? void 0 : meta.image),
34291
+ formatMetaLine('DESCRIPTION', meta === null || meta === void 0 ? void 0 : meta.description),
34292
+ formatMetaLine('COLOR', meta === null || meta === void 0 ? void 0 : meta.color),
34293
+ formatMetaLine('FONT', meta === null || meta === void 0 ? void 0 : meta.font),
34294
+ formatMetaLine('LINK', meta === null || meta === void 0 ? void 0 : meta.link),
34295
+ ]
34296
+ .filter((line) => Boolean(line))
34297
+ .join('\n');
34298
+ const personaBlock = profile.personaDescription
34299
+ ? spaceTrim__default["default"]((block) => `
34300
+ PERSONA
34301
+ ${block(profile.personaDescription || '')}
34302
+ `)
34303
+ : '';
34304
+ return book `
34305
+ ${profile.agentName}
34306
+
34307
+ ${metaLines}
34308
+
34309
+ ${personaBlock}
34310
+ `;
34311
+ }
33020
34312
  /**
33021
34313
  * Represents one AI Agent
33022
34314
  *
@@ -33024,13 +34316,15 @@
33024
34316
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
33025
34317
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
33026
34318
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
33027
- * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
34319
+ * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
34320
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
33028
34321
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
33029
34322
  *
33030
34323
  * @public exported from `@promptbook/core`
33031
34324
  */
33032
34325
  class RemoteAgent extends Agent {
33033
34326
  static async connect(options) {
34327
+ var _a, _b, _c;
33034
34328
  const agentProfileUrl = `${options.agentUrl}/api/profile`;
33035
34329
  const profileResponse = await fetch(agentProfileUrl);
33036
34330
  // <- TODO: [🐱‍🚀] What about closed-source agents?
@@ -33050,14 +34344,14 @@
33050
34344
 
33051
34345
  `));
33052
34346
  }
33053
- const profile = await profileResponse.json();
34347
+ const profile = (await profileResponse.json());
34348
+ const resolvedMeta = {
34349
+ ...(profile.meta || {}),
34350
+ image: resolveRemoteImageUrl((_a = profile.meta) === null || _a === void 0 ? void 0 : _a.image, options.agentUrl),
34351
+ };
33054
34352
  // Note: We are creating dummy agent source because we don't have the source from the remote agent
33055
34353
  // But we populate the metadata from the profile
33056
- const agentSource = new rxjs.BehaviorSubject(book `
33057
- ${profile.agentName}
33058
-
33059
- ${profile.personaDescription}
33060
- `);
34354
+ const agentSource = new rxjs.BehaviorSubject(buildRemoteAgentSource(profile, resolvedMeta));
33061
34355
  // <- TODO: [🐱‍🚀] createBookFromProfile
33062
34356
  // <- TODO: [🐱‍🚀] Support updating and self-updating
33063
34357
  const remoteAgent = new RemoteAgent({
@@ -33080,10 +34374,10 @@
33080
34374
  });
33081
34375
  remoteAgent._remoteAgentName = profile.agentName;
33082
34376
  remoteAgent._remoteAgentHash = profile.agentHash;
33083
- remoteAgent.personaDescription = profile.personaDescription;
33084
- remoteAgent.initialMessage = profile.initialMessage;
33085
- remoteAgent.links = profile.links;
33086
- remoteAgent.meta = profile.meta;
34377
+ remoteAgent.personaDescription = (_b = profile.personaDescription) !== null && _b !== void 0 ? _b : null;
34378
+ remoteAgent.initialMessage = (_c = profile.initialMessage) !== null && _c !== void 0 ? _c : null;
34379
+ remoteAgent.links = profile.links || [];
34380
+ remoteAgent.meta = resolvedMeta;
33087
34381
  remoteAgent.capabilities = profile.capabilities || [];
33088
34382
  remoteAgent.samples = profile.samples || [];
33089
34383
  remoteAgent.toolTitles = profile.toolTitles || {};
@@ -33187,26 +34481,7 @@
33187
34481
  };
33188
34482
  };
33189
34483
  const getToolCallKey = (toolCall) => {
33190
- var _a;
33191
- const rawId = (_a = toolCall.rawToolCall) === null || _a === void 0 ? void 0 : _a.id;
33192
- if (rawId) {
33193
- return `id:${rawId}`;
33194
- }
33195
- const argsKey = (() => {
33196
- if (typeof toolCall.arguments === 'string') {
33197
- return toolCall.arguments;
33198
- }
33199
- if (!toolCall.arguments) {
33200
- return '';
33201
- }
33202
- try {
33203
- return JSON.stringify(toolCall.arguments);
33204
- }
33205
- catch (_a) {
33206
- return '';
33207
- }
33208
- })();
33209
- return `${toolCall.name}:${toolCall.createdAt || ''}:${argsKey}`;
34484
+ return getToolCallIdentity(toolCall);
33210
34485
  };
33211
34486
  const mergeToolCall = (existing, incoming) => {
33212
34487
  const incomingResult = incoming.result;