@promptbook/remote-server 0.110.0-0 → 0.110.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1785 -510
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -2
- package/esm/typings/src/_packages/openai.index.d.ts +8 -4
- package/esm/typings/src/_packages/types.index.d.ts +12 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
- package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
- package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
- package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +4 -2
- package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +10 -1
- package/esm/typings/src/book-components/Chat/SourceChip/SourceChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/utils/collectTeamToolCallSummary.d.ts +69 -0
- package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +13 -13
- package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/utils/toolCallParsing.d.ts +4 -0
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +0 -3
- package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +1 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
- package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +10 -0
- package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +13 -2
- package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +150 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionToolsOptions.d.ts +15 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +3 -4
- package/esm/typings/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +135 -0
- package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +1 -1
- package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
- package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
- package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
- package/esm/typings/src/utils/agents/resolveAgentAvatarImageUrl.d.ts +29 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
- package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
- package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +7 -3
- package/umd/index.umd.js +1788 -514
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentExecutionTools.d.ts +0 -43
- package/esm/typings/src/llm-providers/openai/createOpenAiAgentExecutionTools.d.ts +0 -11
package/esm/index.es.js
CHANGED
|
@@ -22,6 +22,7 @@ import moment from 'moment';
|
|
|
22
22
|
import sha256 from 'crypto-js/sha256';
|
|
23
23
|
import { lookup, extension } from 'mime-types';
|
|
24
24
|
import { parse, unparse } from 'papaparse';
|
|
25
|
+
import { Agent as Agent$1, setDefaultOpenAIClient, setDefaultOpenAIKey, fileSearchTool, tool, run } from '@openai/agents';
|
|
25
26
|
import Bottleneck from 'bottleneck';
|
|
26
27
|
import OpenAI from 'openai';
|
|
27
28
|
|
|
@@ -39,7 +40,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
39
40
|
* @generated
|
|
40
41
|
* @see https://github.com/webgptorg/promptbook
|
|
41
42
|
*/
|
|
42
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-
|
|
43
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-10';
|
|
43
44
|
/**
|
|
44
45
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
45
46
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -10800,6 +10801,28 @@ class BaseCommitmentDefinition {
|
|
|
10800
10801
|
return currentMessage + separator + content;
|
|
10801
10802
|
});
|
|
10802
10803
|
}
|
|
10804
|
+
/**
|
|
10805
|
+
* Helper method to create a new requirements object with updated prompt suffix
|
|
10806
|
+
*/
|
|
10807
|
+
updatePromptSuffix(requirements, contentUpdate) {
|
|
10808
|
+
const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
|
|
10809
|
+
return {
|
|
10810
|
+
...requirements,
|
|
10811
|
+
promptSuffix: newSuffix,
|
|
10812
|
+
};
|
|
10813
|
+
}
|
|
10814
|
+
/**
|
|
10815
|
+
* Helper method to append content to the prompt suffix
|
|
10816
|
+
* Default separator is a single newline for bullet lists.
|
|
10817
|
+
*/
|
|
10818
|
+
appendToPromptSuffix(requirements, content, separator = '\n') {
|
|
10819
|
+
return this.updatePromptSuffix(requirements, (currentSuffix) => {
|
|
10820
|
+
if (!currentSuffix.trim()) {
|
|
10821
|
+
return content;
|
|
10822
|
+
}
|
|
10823
|
+
return `${currentSuffix}${separator}${content}`;
|
|
10824
|
+
});
|
|
10825
|
+
}
|
|
10803
10826
|
/**
|
|
10804
10827
|
* Helper method to add a comment section to the system message
|
|
10805
10828
|
* Comments are lines starting with # that will be removed from the final system message
|
|
@@ -10977,13 +11000,9 @@ class ClosedCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
10977
11000
|
`);
|
|
10978
11001
|
}
|
|
10979
11002
|
applyToAgentModelRequirements(requirements, _content) {
|
|
10980
|
-
const updatedMetadata = {
|
|
10981
|
-
...requirements.metadata,
|
|
10982
|
-
isClosed: true,
|
|
10983
|
-
};
|
|
10984
11003
|
return {
|
|
10985
11004
|
...requirements,
|
|
10986
|
-
|
|
11005
|
+
isClosed: true,
|
|
10987
11006
|
};
|
|
10988
11007
|
}
|
|
10989
11008
|
}
|
|
@@ -11261,12 +11280,12 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11261
11280
|
return requirements;
|
|
11262
11281
|
}
|
|
11263
11282
|
// Get existing dictionary entries from metadata
|
|
11264
|
-
const existingDictionary = ((_a = requirements.
|
|
11283
|
+
const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
|
|
11265
11284
|
// Merge the new dictionary entry with existing entries
|
|
11266
11285
|
const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
|
|
11267
11286
|
// Store the merged dictionary in metadata for debugging and inspection
|
|
11268
11287
|
const updatedMetadata = {
|
|
11269
|
-
...requirements.
|
|
11288
|
+
...requirements._metadata,
|
|
11270
11289
|
DICTIONARY: mergedDictionary,
|
|
11271
11290
|
};
|
|
11272
11291
|
// Create the dictionary section for the system message
|
|
@@ -11274,7 +11293,7 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11274
11293
|
const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
|
|
11275
11294
|
return {
|
|
11276
11295
|
...this.appendToSystemMessage(requirements, dictionarySection),
|
|
11277
|
-
|
|
11296
|
+
_metadata: updatedMetadata,
|
|
11278
11297
|
};
|
|
11279
11298
|
}
|
|
11280
11299
|
}
|
|
@@ -11414,10 +11433,7 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11414
11433
|
applyToAgentModelRequirements(requirements, content) {
|
|
11415
11434
|
const trimmedContent = content.trim();
|
|
11416
11435
|
if (!trimmedContent) {
|
|
11417
|
-
return
|
|
11418
|
-
...requirements,
|
|
11419
|
-
parentAgentUrl: undefined,
|
|
11420
|
-
};
|
|
11436
|
+
return requirements;
|
|
11421
11437
|
}
|
|
11422
11438
|
if (trimmedContent.toUpperCase() === 'VOID' ||
|
|
11423
11439
|
trimmedContent.toUpperCase() === 'NULL' ||
|
|
@@ -11631,6 +11647,136 @@ class ImportCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11631
11647
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
11632
11648
|
*/
|
|
11633
11649
|
|
|
11650
|
+
/**
|
|
11651
|
+
* @@@
|
|
11652
|
+
*
|
|
11653
|
+
* @private thing of inline knowledge
|
|
11654
|
+
*/
|
|
11655
|
+
const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
|
|
11656
|
+
/**
|
|
11657
|
+
* @@@
|
|
11658
|
+
*
|
|
11659
|
+
* @private thing of inline knowledge
|
|
11660
|
+
*/
|
|
11661
|
+
const INLINE_KNOWLEDGE_EXTENSION = '.txt';
|
|
11662
|
+
/**
|
|
11663
|
+
* @@@
|
|
11664
|
+
*
|
|
11665
|
+
* @private thing of inline knowledge
|
|
11666
|
+
*/
|
|
11667
|
+
const DATA_URL_PREFIX = 'data:';
|
|
11668
|
+
/**
|
|
11669
|
+
* @@@
|
|
11670
|
+
*
|
|
11671
|
+
* @private thing of inline knowledge
|
|
11672
|
+
*/
|
|
11673
|
+
function getFirstNonEmptyLine(content) {
|
|
11674
|
+
const lines = content.split(/\r?\n/);
|
|
11675
|
+
for (const line of lines) {
|
|
11676
|
+
const trimmed = line.trim();
|
|
11677
|
+
if (trimmed) {
|
|
11678
|
+
return trimmed;
|
|
11679
|
+
}
|
|
11680
|
+
}
|
|
11681
|
+
return null;
|
|
11682
|
+
}
|
|
11683
|
+
/**
|
|
11684
|
+
* @@@
|
|
11685
|
+
*
|
|
11686
|
+
* @private thing of inline knowledge
|
|
11687
|
+
*/
|
|
11688
|
+
function deriveBaseFilename(content) {
|
|
11689
|
+
const firstLine = getFirstNonEmptyLine(content);
|
|
11690
|
+
if (!firstLine) {
|
|
11691
|
+
return INLINE_KNOWLEDGE_BASE_NAME;
|
|
11692
|
+
}
|
|
11693
|
+
const normalized = normalizeToKebabCase(firstLine);
|
|
11694
|
+
return normalized || INLINE_KNOWLEDGE_BASE_NAME;
|
|
11695
|
+
}
|
|
11696
|
+
/**
|
|
11697
|
+
* Creates a data URL that represents the inline knowledge content as a text file.
|
|
11698
|
+
*
|
|
11699
|
+
* @private thing of inline knowledge
|
|
11700
|
+
*/
|
|
11701
|
+
function createInlineKnowledgeSourceFile(content) {
|
|
11702
|
+
const trimmedContent = content.trim();
|
|
11703
|
+
const baseName = deriveBaseFilename(trimmedContent);
|
|
11704
|
+
const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
11705
|
+
const mimeType = 'text/plain';
|
|
11706
|
+
const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
|
|
11707
|
+
const encodedFilename = encodeURIComponent(filename);
|
|
11708
|
+
const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
|
|
11709
|
+
return {
|
|
11710
|
+
filename,
|
|
11711
|
+
mimeType,
|
|
11712
|
+
url,
|
|
11713
|
+
};
|
|
11714
|
+
}
|
|
11715
|
+
/**
|
|
11716
|
+
* Checks whether the provided source string is a data URL that can be decoded.
|
|
11717
|
+
*
|
|
11718
|
+
* @private thing of inline knowledge
|
|
11719
|
+
*/
|
|
11720
|
+
function isDataUrlKnowledgeSource(source) {
|
|
11721
|
+
return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
|
|
11722
|
+
}
|
|
11723
|
+
/**
|
|
11724
|
+
* Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
|
|
11725
|
+
*
|
|
11726
|
+
* @private thing of inline knowledge
|
|
11727
|
+
*/
|
|
11728
|
+
function parseDataUrlKnowledgeSource(source) {
|
|
11729
|
+
if (!isDataUrlKnowledgeSource(source)) {
|
|
11730
|
+
return null;
|
|
11731
|
+
}
|
|
11732
|
+
const commaIndex = source.indexOf(',');
|
|
11733
|
+
if (commaIndex === -1) {
|
|
11734
|
+
return null;
|
|
11735
|
+
}
|
|
11736
|
+
const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
|
|
11737
|
+
const payload = source.slice(commaIndex + 1);
|
|
11738
|
+
const tokens = header.split(';');
|
|
11739
|
+
const mediaType = tokens[0] || 'text/plain';
|
|
11740
|
+
let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
11741
|
+
let isBase64 = false;
|
|
11742
|
+
for (let i = 1; i < tokens.length; i++) {
|
|
11743
|
+
const token = tokens[i];
|
|
11744
|
+
if (!token) {
|
|
11745
|
+
continue;
|
|
11746
|
+
}
|
|
11747
|
+
if (token.toLowerCase() === 'base64') {
|
|
11748
|
+
isBase64 = true;
|
|
11749
|
+
continue;
|
|
11750
|
+
}
|
|
11751
|
+
const [key, value] = token.split('=');
|
|
11752
|
+
if (key === 'name' && value !== undefined) {
|
|
11753
|
+
try {
|
|
11754
|
+
filename = decodeURIComponent(value);
|
|
11755
|
+
}
|
|
11756
|
+
catch (_a) {
|
|
11757
|
+
filename = value;
|
|
11758
|
+
}
|
|
11759
|
+
}
|
|
11760
|
+
}
|
|
11761
|
+
if (!isBase64) {
|
|
11762
|
+
return null;
|
|
11763
|
+
}
|
|
11764
|
+
try {
|
|
11765
|
+
const buffer = Buffer.from(payload, 'base64');
|
|
11766
|
+
return {
|
|
11767
|
+
buffer,
|
|
11768
|
+
filename,
|
|
11769
|
+
mimeType: mediaType,
|
|
11770
|
+
};
|
|
11771
|
+
}
|
|
11772
|
+
catch (_b) {
|
|
11773
|
+
return null;
|
|
11774
|
+
}
|
|
11775
|
+
}
|
|
11776
|
+
/**
|
|
11777
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
11778
|
+
*/
|
|
11779
|
+
|
|
11634
11780
|
/**
|
|
11635
11781
|
* KNOWLEDGE commitment definition
|
|
11636
11782
|
*
|
|
@@ -11729,9 +11875,13 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11729
11875
|
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
11730
11876
|
}
|
|
11731
11877
|
else {
|
|
11732
|
-
|
|
11733
|
-
const
|
|
11734
|
-
|
|
11878
|
+
const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
|
|
11879
|
+
const updatedRequirements = {
|
|
11880
|
+
...requirements,
|
|
11881
|
+
knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
|
|
11882
|
+
};
|
|
11883
|
+
const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
|
|
11884
|
+
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
11735
11885
|
}
|
|
11736
11886
|
}
|
|
11737
11887
|
}
|
|
@@ -11978,16 +12128,16 @@ class AgentMessageCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11978
12128
|
// and typically doesn't need to be added to the system prompt or model requirements directly.
|
|
11979
12129
|
// It is extracted separately for the chat interface.
|
|
11980
12130
|
var _a;
|
|
11981
|
-
const pendingUserMessage = (_a = requirements.
|
|
12131
|
+
const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
|
|
11982
12132
|
if (pendingUserMessage) {
|
|
11983
12133
|
const newSample = { question: pendingUserMessage, answer: content };
|
|
11984
12134
|
const newSamples = [...(requirements.samples || []), newSample];
|
|
11985
|
-
const newMetadata = { ...requirements.
|
|
12135
|
+
const newMetadata = { ...requirements._metadata };
|
|
11986
12136
|
delete newMetadata.pendingUserMessage;
|
|
11987
12137
|
return {
|
|
11988
12138
|
...requirements,
|
|
11989
12139
|
samples: newSamples,
|
|
11990
|
-
|
|
12140
|
+
_metadata: newMetadata,
|
|
11991
12141
|
};
|
|
11992
12142
|
}
|
|
11993
12143
|
return requirements;
|
|
@@ -12235,8 +12385,8 @@ class UserMessageCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
12235
12385
|
applyToAgentModelRequirements(requirements, content) {
|
|
12236
12386
|
return {
|
|
12237
12387
|
...requirements,
|
|
12238
|
-
|
|
12239
|
-
...requirements.
|
|
12388
|
+
_metadata: {
|
|
12389
|
+
...requirements._metadata,
|
|
12240
12390
|
pendingUserMessage: content,
|
|
12241
12391
|
},
|
|
12242
12392
|
};
|
|
@@ -13094,11 +13244,7 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13094
13244
|
if (trimmedContent === '') {
|
|
13095
13245
|
return requirements;
|
|
13096
13246
|
}
|
|
13097
|
-
|
|
13098
|
-
return {
|
|
13099
|
-
...requirements,
|
|
13100
|
-
notes: [...(requirements.notes || []), trimmedContent],
|
|
13101
|
-
};
|
|
13247
|
+
return requirements;
|
|
13102
13248
|
}
|
|
13103
13249
|
}
|
|
13104
13250
|
/**
|
|
@@ -13160,12 +13306,12 @@ class OpenCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13160
13306
|
// Since OPEN is default, we can just ensure isClosed is false
|
|
13161
13307
|
// But to be explicit we can set it
|
|
13162
13308
|
const updatedMetadata = {
|
|
13163
|
-
...requirements.
|
|
13309
|
+
...requirements._metadata,
|
|
13164
13310
|
isClosed: false,
|
|
13165
13311
|
};
|
|
13166
13312
|
return {
|
|
13167
13313
|
...requirements,
|
|
13168
|
-
|
|
13314
|
+
_metadata: updatedMetadata,
|
|
13169
13315
|
};
|
|
13170
13316
|
}
|
|
13171
13317
|
}
|
|
@@ -13246,7 +13392,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13246
13392
|
return requirements;
|
|
13247
13393
|
}
|
|
13248
13394
|
// Get existing persona content from metadata
|
|
13249
|
-
const existingPersonaContent = ((_a = requirements.
|
|
13395
|
+
const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
|
|
13250
13396
|
// Merge the new content with existing persona content
|
|
13251
13397
|
// When multiple PERSONA commitments exist, they are merged into one
|
|
13252
13398
|
const mergedPersonaContent = existingPersonaContent
|
|
@@ -13254,12 +13400,12 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13254
13400
|
: trimmedContent;
|
|
13255
13401
|
// Store the merged persona content in metadata for debugging and inspection
|
|
13256
13402
|
const updatedMetadata = {
|
|
13257
|
-
...requirements.
|
|
13403
|
+
...requirements._metadata,
|
|
13258
13404
|
PERSONA: mergedPersonaContent,
|
|
13259
13405
|
};
|
|
13260
13406
|
// Get the agent name from metadata (which should contain the first line of agent source)
|
|
13261
13407
|
// If not available, extract from current system message as fallback
|
|
13262
|
-
let agentName = (_b = requirements.
|
|
13408
|
+
let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
|
|
13263
13409
|
if (!agentName) {
|
|
13264
13410
|
// Fallback: extract from current system message
|
|
13265
13411
|
const currentMessage = requirements.systemMessage.trim();
|
|
@@ -13306,7 +13452,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13306
13452
|
return {
|
|
13307
13453
|
...requirements,
|
|
13308
13454
|
systemMessage: newSystemMessage,
|
|
13309
|
-
|
|
13455
|
+
_metadata: updatedMetadata,
|
|
13310
13456
|
};
|
|
13311
13457
|
}
|
|
13312
13458
|
}
|
|
@@ -13389,7 +13535,16 @@ class RuleCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13389
13535
|
}
|
|
13390
13536
|
// Add rule to the system message
|
|
13391
13537
|
const ruleSection = `Rule: ${trimmedContent}`;
|
|
13392
|
-
|
|
13538
|
+
const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
|
|
13539
|
+
const ruleLines = trimmedContent
|
|
13540
|
+
.split(/\r?\n/)
|
|
13541
|
+
.map((line) => line.trim())
|
|
13542
|
+
.filter(Boolean)
|
|
13543
|
+
.map((line) => `- ${line}`);
|
|
13544
|
+
if (ruleLines.length === 0) {
|
|
13545
|
+
return requirementsWithRule;
|
|
13546
|
+
}
|
|
13547
|
+
return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
|
|
13393
13548
|
}
|
|
13394
13549
|
}
|
|
13395
13550
|
/**
|
|
@@ -13895,7 +14050,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13895
14050
|
if (teammates.length === 0) {
|
|
13896
14051
|
return requirements;
|
|
13897
14052
|
}
|
|
13898
|
-
const agentName = ((_a = requirements.
|
|
14053
|
+
const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
|
|
13899
14054
|
const teamEntries = teammates.map((teammate) => ({
|
|
13900
14055
|
toolName: createTeamToolName(teammate.url),
|
|
13901
14056
|
teammate,
|
|
@@ -13935,7 +14090,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13935
14090
|
},
|
|
13936
14091
|
});
|
|
13937
14092
|
}
|
|
13938
|
-
const existingTeammates = ((_b = requirements.
|
|
14093
|
+
const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
|
|
13939
14094
|
const updatedTeammates = [...existingTeammates];
|
|
13940
14095
|
for (const entry of teamEntries) {
|
|
13941
14096
|
if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
|
|
@@ -13964,8 +14119,8 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13964
14119
|
return this.appendToSystemMessage({
|
|
13965
14120
|
...requirements,
|
|
13966
14121
|
tools: updatedTools,
|
|
13967
|
-
|
|
13968
|
-
...requirements.
|
|
14122
|
+
_metadata: {
|
|
14123
|
+
...requirements._metadata,
|
|
13969
14124
|
teammates: updatedTeammates,
|
|
13970
14125
|
},
|
|
13971
14126
|
}, teamSystemMessage);
|
|
@@ -14065,11 +14220,16 @@ function createTeamToolFunction(entry) {
|
|
|
14065
14220
|
const request = buildTeammateRequest(message, args.context);
|
|
14066
14221
|
let response = '';
|
|
14067
14222
|
let error = null;
|
|
14223
|
+
let toolCalls;
|
|
14068
14224
|
try {
|
|
14069
14225
|
const remoteAgent = await getRemoteTeammateAgent(entry.teammate.url);
|
|
14070
14226
|
const prompt = buildTeammatePrompt(request);
|
|
14071
14227
|
const teammateResult = await remoteAgent.callChatModel(prompt);
|
|
14072
14228
|
response = teammateResult.content || '';
|
|
14229
|
+
toolCalls =
|
|
14230
|
+
'toolCalls' in teammateResult && Array.isArray(teammateResult.toolCalls)
|
|
14231
|
+
? teammateResult.toolCalls
|
|
14232
|
+
: undefined;
|
|
14073
14233
|
}
|
|
14074
14234
|
catch (err) {
|
|
14075
14235
|
error = err instanceof Error ? err.message : String(err);
|
|
@@ -14079,6 +14239,7 @@ function createTeamToolFunction(entry) {
|
|
|
14079
14239
|
teammate: teammateMetadata,
|
|
14080
14240
|
request,
|
|
14081
14241
|
response: teammateReply,
|
|
14242
|
+
toolCalls: toolCalls && toolCalls.length > 0 ? toolCalls : undefined,
|
|
14082
14243
|
error,
|
|
14083
14244
|
conversation: [
|
|
14084
14245
|
{
|
|
@@ -14191,7 +14352,7 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14191
14352
|
if (!trimmedContent) {
|
|
14192
14353
|
// Store template mode flag in metadata
|
|
14193
14354
|
const updatedMetadata = {
|
|
14194
|
-
...requirements.
|
|
14355
|
+
...requirements._metadata,
|
|
14195
14356
|
templateMode: true,
|
|
14196
14357
|
};
|
|
14197
14358
|
// Add a general instruction about using structured templates
|
|
@@ -14201,21 +14362,21 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14201
14362
|
`);
|
|
14202
14363
|
return {
|
|
14203
14364
|
...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
|
|
14204
|
-
|
|
14365
|
+
_metadata: updatedMetadata,
|
|
14205
14366
|
};
|
|
14206
14367
|
}
|
|
14207
14368
|
// If content is provided, add the specific template instructions
|
|
14208
14369
|
const templateSection = `Response Template: ${trimmedContent}`;
|
|
14209
14370
|
// Store the template in metadata for potential programmatic access
|
|
14210
|
-
const existingTemplates = ((_a = requirements.
|
|
14371
|
+
const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
|
|
14211
14372
|
const updatedMetadata = {
|
|
14212
|
-
...requirements.
|
|
14373
|
+
...requirements._metadata,
|
|
14213
14374
|
templates: [...existingTemplates, trimmedContent],
|
|
14214
14375
|
templateMode: true,
|
|
14215
14376
|
};
|
|
14216
14377
|
return {
|
|
14217
14378
|
...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
|
|
14218
|
-
|
|
14379
|
+
_metadata: updatedMetadata,
|
|
14219
14380
|
};
|
|
14220
14381
|
}
|
|
14221
14382
|
}
|
|
@@ -14552,8 +14713,8 @@ class UseBrowserCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14552
14713
|
return this.appendToSystemMessage({
|
|
14553
14714
|
...requirements,
|
|
14554
14715
|
tools: updatedTools,
|
|
14555
|
-
|
|
14556
|
-
...requirements.
|
|
14716
|
+
_metadata: {
|
|
14717
|
+
...requirements._metadata,
|
|
14557
14718
|
useBrowser: true,
|
|
14558
14719
|
},
|
|
14559
14720
|
}, spaceTrim$1(`
|
|
@@ -14782,8 +14943,8 @@ class UseEmailCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14782
14943
|
return this.appendToSystemMessage({
|
|
14783
14944
|
...requirements,
|
|
14784
14945
|
tools: updatedTools,
|
|
14785
|
-
|
|
14786
|
-
...requirements.
|
|
14946
|
+
_metadata: {
|
|
14947
|
+
...requirements._metadata,
|
|
14787
14948
|
useEmail: content || true,
|
|
14788
14949
|
},
|
|
14789
14950
|
}, spaceTrim$1((block) => `
|
|
@@ -14918,8 +15079,8 @@ class UseImageGeneratorCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14918
15079
|
return this.appendToSystemMessage({
|
|
14919
15080
|
...requirements,
|
|
14920
15081
|
tools: updatedTools,
|
|
14921
|
-
|
|
14922
|
-
...requirements.
|
|
15082
|
+
_metadata: {
|
|
15083
|
+
...requirements._metadata,
|
|
14923
15084
|
useImageGenerator: content || true,
|
|
14924
15085
|
},
|
|
14925
15086
|
}, spaceTrim$1(`
|
|
@@ -15210,8 +15371,8 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
15210
15371
|
return this.appendToSystemMessage({
|
|
15211
15372
|
...requirements,
|
|
15212
15373
|
tools: updatedTools,
|
|
15213
|
-
|
|
15214
|
-
...requirements.
|
|
15374
|
+
_metadata: {
|
|
15375
|
+
...requirements._metadata,
|
|
15215
15376
|
useSearchEngine: content || true,
|
|
15216
15377
|
},
|
|
15217
15378
|
}, spaceTrim$1((block) => `
|
|
@@ -15359,8 +15520,8 @@ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
15359
15520
|
return this.appendToSystemMessage({
|
|
15360
15521
|
...requirements,
|
|
15361
15522
|
tools: updatedTools,
|
|
15362
|
-
|
|
15363
|
-
...requirements.
|
|
15523
|
+
_metadata: {
|
|
15524
|
+
...requirements._metadata,
|
|
15364
15525
|
},
|
|
15365
15526
|
}, spaceTrim$1((block) => `
|
|
15366
15527
|
Time and date context:
|
|
@@ -17407,6 +17568,40 @@ function isAssistantPreparationToolCall(toolCall) {
|
|
|
17407
17568
|
return toolCall.name === ASSISTANT_PREPARATION_TOOL_CALL_NAME;
|
|
17408
17569
|
}
|
|
17409
17570
|
|
|
17571
|
+
/**
|
|
17572
|
+
* Builds a stable identity string for tool calls across partial updates.
|
|
17573
|
+
*
|
|
17574
|
+
* @param toolCall - Tool call entry to identify.
|
|
17575
|
+
* @returns Stable identity string for deduplication.
|
|
17576
|
+
*
|
|
17577
|
+
* @private function of <Chat/>
|
|
17578
|
+
*/
|
|
17579
|
+
function getToolCallIdentity(toolCall) {
|
|
17580
|
+
const rawToolCall = toolCall.rawToolCall;
|
|
17581
|
+
const rawId = (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.id) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.callId) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.call_id);
|
|
17582
|
+
if (rawId) {
|
|
17583
|
+
return `id:${rawId}`;
|
|
17584
|
+
}
|
|
17585
|
+
if (toolCall.createdAt) {
|
|
17586
|
+
return `time:${toolCall.createdAt}:${toolCall.name}`;
|
|
17587
|
+
}
|
|
17588
|
+
const argsKey = (() => {
|
|
17589
|
+
if (typeof toolCall.arguments === 'string') {
|
|
17590
|
+
return toolCall.arguments;
|
|
17591
|
+
}
|
|
17592
|
+
if (!toolCall.arguments) {
|
|
17593
|
+
return '';
|
|
17594
|
+
}
|
|
17595
|
+
try {
|
|
17596
|
+
return JSON.stringify(toolCall.arguments);
|
|
17597
|
+
}
|
|
17598
|
+
catch (_a) {
|
|
17599
|
+
return '';
|
|
17600
|
+
}
|
|
17601
|
+
})();
|
|
17602
|
+
return `fallback:${toolCall.name}:${argsKey}`;
|
|
17603
|
+
}
|
|
17604
|
+
|
|
17410
17605
|
/*! *****************************************************************************
|
|
17411
17606
|
Copyright (c) Microsoft Corporation.
|
|
17412
17607
|
|
|
@@ -18045,11 +18240,14 @@ function asUpdatableSubject(value) {
|
|
|
18045
18240
|
function createEmptyAgentModelRequirements() {
|
|
18046
18241
|
return {
|
|
18047
18242
|
systemMessage: '',
|
|
18243
|
+
promptSuffix: '',
|
|
18048
18244
|
// modelName: 'gpt-5',
|
|
18049
18245
|
modelName: 'gemini-2.5-flash-lite',
|
|
18050
18246
|
temperature: 0.7,
|
|
18051
18247
|
topP: 0.9,
|
|
18052
18248
|
topK: 50,
|
|
18249
|
+
parentAgentUrl: null,
|
|
18250
|
+
isClosed: false,
|
|
18053
18251
|
};
|
|
18054
18252
|
}
|
|
18055
18253
|
/**
|
|
@@ -18195,14 +18393,26 @@ function removeCommentsFromSystemMessage(systemMessage) {
|
|
|
18195
18393
|
}
|
|
18196
18394
|
|
|
18197
18395
|
/**
|
|
18198
|
-
* Creates agent model requirements using the new commitment system
|
|
18396
|
+
* Creates agent model requirements using the new commitment system.
|
|
18397
|
+
*
|
|
18199
18398
|
* This function uses a reduce-like pattern where each commitment applies its changes
|
|
18200
|
-
* to build the final requirements starting from a basic empty model
|
|
18399
|
+
* to build the final requirements starting from a basic empty model.
|
|
18201
18400
|
*
|
|
18202
|
-
* @
|
|
18401
|
+
* @param agentSource - Agent source book to parse.
|
|
18402
|
+
* @param modelName - Optional override for the agent model name.
|
|
18403
|
+
* @param options - Additional options such as the agent reference resolver.
|
|
18404
|
+
*
|
|
18405
|
+
* @private @@@
|
|
18406
|
+
*/
|
|
18407
|
+
const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
|
|
18408
|
+
/**
|
|
18409
|
+
* @@@
|
|
18410
|
+
*
|
|
18411
|
+
* @private @@@
|
|
18203
18412
|
*/
|
|
18204
|
-
async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
|
|
18413
|
+
async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
|
|
18205
18414
|
var _a;
|
|
18415
|
+
const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
|
|
18206
18416
|
// Parse the agent source to extract commitments
|
|
18207
18417
|
const parseResult = parseAgentSourceWithCommitments(agentSource);
|
|
18208
18418
|
// Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
|
|
@@ -18239,8 +18449,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
18239
18449
|
// Store the agent name in metadata so commitments can access it
|
|
18240
18450
|
requirements = {
|
|
18241
18451
|
...requirements,
|
|
18242
|
-
|
|
18243
|
-
...requirements.
|
|
18452
|
+
_metadata: {
|
|
18453
|
+
...requirements._metadata,
|
|
18244
18454
|
agentName: parseResult.agentName,
|
|
18245
18455
|
},
|
|
18246
18456
|
};
|
|
@@ -18254,6 +18464,11 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
18254
18464
|
// Apply each commitment in order using reduce-like pattern
|
|
18255
18465
|
for (let i = 0; i < filteredCommitments.length; i++) {
|
|
18256
18466
|
const commitment = filteredCommitments[i];
|
|
18467
|
+
const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
|
|
18468
|
+
let commitmentContent = commitment.content;
|
|
18469
|
+
if (isReferenceCommitment && agentReferenceResolver) {
|
|
18470
|
+
commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
|
|
18471
|
+
}
|
|
18257
18472
|
// CLOSED commitment should work only if its the last commitment in the book
|
|
18258
18473
|
if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
|
|
18259
18474
|
continue;
|
|
@@ -18261,7 +18476,7 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
18261
18476
|
const definition = getCommitmentDefinition(commitment.type);
|
|
18262
18477
|
if (definition) {
|
|
18263
18478
|
try {
|
|
18264
|
-
requirements = definition.applyToAgentModelRequirements(requirements,
|
|
18479
|
+
requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
|
|
18265
18480
|
}
|
|
18266
18481
|
catch (error) {
|
|
18267
18482
|
console.warn(`Failed to apply commitment ${commitment.type}:`, error);
|
|
@@ -18409,23 +18624,28 @@ function isBinaryMimeType(mimeType) {
|
|
|
18409
18624
|
}
|
|
18410
18625
|
|
|
18411
18626
|
/**
|
|
18412
|
-
* Creates model requirements for an agent based on its source
|
|
18627
|
+
* Creates model requirements for an agent based on its source.
|
|
18413
18628
|
*
|
|
18414
18629
|
* There are 2 similar functions:
|
|
18415
18630
|
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
18416
18631
|
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
|
|
18417
18632
|
*
|
|
18633
|
+
* @param agentSource - Book describing the agent.
|
|
18634
|
+
* @param modelName - Optional override for the agent's model.
|
|
18635
|
+
* @param availableModels - Models that could fulfill the agent.
|
|
18636
|
+
* @param llmTools - Execution tools used when selecting a best model.
|
|
18637
|
+
* @param options - Optional hooks such as the agent reference resolver.
|
|
18418
18638
|
* @public exported from `@promptbook/core`
|
|
18419
18639
|
*/
|
|
18420
|
-
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
|
|
18640
|
+
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
|
|
18421
18641
|
// If availableModels are provided and no specific modelName is given,
|
|
18422
18642
|
// use preparePersona to select the best model
|
|
18423
18643
|
if (availableModels && !modelName && llmTools) {
|
|
18424
18644
|
const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
|
|
18425
|
-
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
|
|
18645
|
+
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
|
|
18426
18646
|
}
|
|
18427
18647
|
// Use the new commitment-based system with provided or default model
|
|
18428
|
-
return createAgentModelRequirementsWithCommitments(agentSource, modelName);
|
|
18648
|
+
return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
|
|
18429
18649
|
}
|
|
18430
18650
|
/**
|
|
18431
18651
|
* Selects the best model using the preparePersona function
|
|
@@ -18723,6 +18943,66 @@ const OPENAI_MODELS = exportJson({
|
|
|
18723
18943
|
},
|
|
18724
18944
|
/**/
|
|
18725
18945
|
/**/
|
|
18946
|
+
{
|
|
18947
|
+
modelVariant: 'CHAT',
|
|
18948
|
+
modelTitle: 'gpt-5.2-codex',
|
|
18949
|
+
modelName: 'gpt-5.2-codex',
|
|
18950
|
+
modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
|
|
18951
|
+
pricing: {
|
|
18952
|
+
prompt: pricing(`$1.75 / 1M tokens`),
|
|
18953
|
+
output: pricing(`$14.00 / 1M tokens`),
|
|
18954
|
+
},
|
|
18955
|
+
},
|
|
18956
|
+
/**/
|
|
18957
|
+
/**/
|
|
18958
|
+
{
|
|
18959
|
+
modelVariant: 'CHAT',
|
|
18960
|
+
modelTitle: 'gpt-5.1-codex-max',
|
|
18961
|
+
modelName: 'gpt-5.1-codex-max',
|
|
18962
|
+
modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
|
|
18963
|
+
pricing: {
|
|
18964
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
18965
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
18966
|
+
},
|
|
18967
|
+
},
|
|
18968
|
+
/**/
|
|
18969
|
+
/**/
|
|
18970
|
+
{
|
|
18971
|
+
modelVariant: 'CHAT',
|
|
18972
|
+
modelTitle: 'gpt-5.1-codex',
|
|
18973
|
+
modelName: 'gpt-5.1-codex',
|
|
18974
|
+
modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
|
|
18975
|
+
pricing: {
|
|
18976
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
18977
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
18978
|
+
},
|
|
18979
|
+
},
|
|
18980
|
+
/**/
|
|
18981
|
+
/**/
|
|
18982
|
+
{
|
|
18983
|
+
modelVariant: 'CHAT',
|
|
18984
|
+
modelTitle: 'gpt-5.1-codex-mini',
|
|
18985
|
+
modelName: 'gpt-5.1-codex-mini',
|
|
18986
|
+
modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
|
|
18987
|
+
pricing: {
|
|
18988
|
+
prompt: pricing(`$0.25 / 1M tokens`),
|
|
18989
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
18990
|
+
},
|
|
18991
|
+
},
|
|
18992
|
+
/**/
|
|
18993
|
+
/**/
|
|
18994
|
+
{
|
|
18995
|
+
modelVariant: 'CHAT',
|
|
18996
|
+
modelTitle: 'gpt-5-codex',
|
|
18997
|
+
modelName: 'gpt-5-codex',
|
|
18998
|
+
modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
|
|
18999
|
+
pricing: {
|
|
19000
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
19001
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
19002
|
+
},
|
|
19003
|
+
},
|
|
19004
|
+
/**/
|
|
19005
|
+
/**/
|
|
18726
19006
|
{
|
|
18727
19007
|
modelVariant: 'CHAT',
|
|
18728
19008
|
modelTitle: 'gpt-5-mini',
|
|
@@ -19427,6 +19707,32 @@ function isUnsupportedParameterError(error) {
|
|
|
19427
19707
|
errorMessage.includes('does not support'));
|
|
19428
19708
|
}
|
|
19429
19709
|
|
|
19710
|
+
/**
|
|
19711
|
+
* Provides access to the structured clone implementation when available.
|
|
19712
|
+
*/
|
|
19713
|
+
function getStructuredCloneFunction() {
|
|
19714
|
+
return globalThis.structuredClone;
|
|
19715
|
+
}
|
|
19716
|
+
/**
|
|
19717
|
+
* Checks whether the prompt is a chat prompt that carries file attachments.
|
|
19718
|
+
*/
|
|
19719
|
+
function hasChatPromptFiles(prompt) {
|
|
19720
|
+
return 'files' in prompt && Array.isArray(prompt.files);
|
|
19721
|
+
}
|
|
19722
|
+
/**
|
|
19723
|
+
* Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
|
|
19724
|
+
*/
|
|
19725
|
+
function clonePromptPreservingFiles(prompt) {
|
|
19726
|
+
const structuredCloneFn = getStructuredCloneFunction();
|
|
19727
|
+
if (typeof structuredCloneFn === 'function') {
|
|
19728
|
+
return structuredCloneFn(prompt);
|
|
19729
|
+
}
|
|
19730
|
+
const clonedPrompt = JSON.parse(JSON.stringify(prompt));
|
|
19731
|
+
if (hasChatPromptFiles(prompt)) {
|
|
19732
|
+
clonedPrompt.files = prompt.files;
|
|
19733
|
+
}
|
|
19734
|
+
return clonedPrompt;
|
|
19735
|
+
}
|
|
19430
19736
|
/**
|
|
19431
19737
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
19432
19738
|
*
|
|
@@ -19456,16 +19762,11 @@ class OpenAiCompatibleExecutionTools {
|
|
|
19456
19762
|
const openAiOptions = { ...this.options };
|
|
19457
19763
|
delete openAiOptions.isVerbose;
|
|
19458
19764
|
delete openAiOptions.userId;
|
|
19459
|
-
// Enhanced configuration
|
|
19765
|
+
// Enhanced configuration with retries and timeouts.
|
|
19460
19766
|
const enhancedOptions = {
|
|
19461
19767
|
...openAiOptions,
|
|
19462
19768
|
timeout: API_REQUEST_TIMEOUT,
|
|
19463
19769
|
maxRetries: CONNECTION_RETRIES_LIMIT,
|
|
19464
|
-
defaultHeaders: {
|
|
19465
|
-
Connection: 'keep-alive',
|
|
19466
|
-
'Keep-Alive': 'timeout=30, max=100',
|
|
19467
|
-
...openAiOptions.defaultHeaders,
|
|
19468
|
-
},
|
|
19469
19770
|
};
|
|
19470
19771
|
this.client = new OpenAI(enhancedOptions);
|
|
19471
19772
|
}
|
|
@@ -19516,7 +19817,7 @@ class OpenAiCompatibleExecutionTools {
|
|
|
19516
19817
|
*/
|
|
19517
19818
|
async callChatModelStream(prompt, onProgress) {
|
|
19518
19819
|
// Deep clone prompt and modelRequirements to avoid mutation across calls
|
|
19519
|
-
const clonedPrompt =
|
|
19820
|
+
const clonedPrompt = clonePromptPreservingFiles(prompt);
|
|
19520
19821
|
// Use local Set for retried parameters to ensure independence and thread safety
|
|
19521
19822
|
const retriedUnsupportedParameters = new Set();
|
|
19522
19823
|
return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
|
|
@@ -19543,7 +19844,10 @@ class OpenAiCompatibleExecutionTools {
|
|
|
19543
19844
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
19544
19845
|
// <- Note: [🧆]
|
|
19545
19846
|
}; // <- TODO: [💩] Guard here types better
|
|
19546
|
-
if (
|
|
19847
|
+
if (currentModelRequirements.responseFormat !== undefined) {
|
|
19848
|
+
modelSettings.response_format = currentModelRequirements.responseFormat;
|
|
19849
|
+
}
|
|
19850
|
+
else if (format === 'JSON') {
|
|
19547
19851
|
modelSettings.response_format = {
|
|
19548
19852
|
type: 'json_object',
|
|
19549
19853
|
};
|
|
@@ -20354,18 +20658,6 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
|
20354
20658
|
get profile() {
|
|
20355
20659
|
return OPENAI_PROVIDER_PROFILE;
|
|
20356
20660
|
}
|
|
20357
|
-
/*
|
|
20358
|
-
Note: Commenting this out to avoid circular dependency
|
|
20359
|
-
/**
|
|
20360
|
-
* Create (sub)tools for calling OpenAI API Assistants
|
|
20361
|
-
*
|
|
20362
|
-
* @param assistantId Which assistant to use
|
|
20363
|
-
* @returns Tools for calling OpenAI API Assistants with same token
|
|
20364
|
-
* /
|
|
20365
|
-
public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
|
|
20366
|
-
return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
|
|
20367
|
-
}
|
|
20368
|
-
*/
|
|
20369
20661
|
/**
|
|
20370
20662
|
* List all available models (non dynamically)
|
|
20371
20663
|
*
|
|
@@ -20400,206 +20692,1259 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
|
20400
20692
|
}
|
|
20401
20693
|
}
|
|
20402
20694
|
|
|
20695
|
+
const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
|
|
20696
|
+
const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
|
|
20697
|
+
const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
|
|
20698
|
+
const VECTOR_STORE_STALL_LOG_THRESHOLD_MS = 30000;
|
|
20403
20699
|
/**
|
|
20404
|
-
*
|
|
20700
|
+
* Base class for OpenAI execution tools that need hosted vector stores.
|
|
20405
20701
|
*
|
|
20406
20702
|
* @public exported from `@promptbook/openai`
|
|
20407
20703
|
*/
|
|
20408
|
-
class
|
|
20409
|
-
|
|
20410
|
-
|
|
20411
|
-
|
|
20704
|
+
class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
|
|
20705
|
+
/**
|
|
20706
|
+
* Returns the per-knowledge-source download timeout in milliseconds.
|
|
20707
|
+
*/
|
|
20708
|
+
getKnowledgeSourceDownloadTimeoutMs() {
|
|
20709
|
+
var _a;
|
|
20710
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceDownloadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS;
|
|
20412
20711
|
}
|
|
20413
|
-
|
|
20414
|
-
|
|
20712
|
+
/**
|
|
20713
|
+
* Returns the max concurrency for knowledge source uploads.
|
|
20714
|
+
*/
|
|
20715
|
+
getKnowledgeSourceUploadMaxConcurrency() {
|
|
20716
|
+
var _a;
|
|
20717
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadMaxConcurrency) !== null && _a !== void 0 ? _a : 5;
|
|
20415
20718
|
}
|
|
20416
|
-
|
|
20417
|
-
|
|
20719
|
+
/**
|
|
20720
|
+
* Returns the polling interval in milliseconds for vector store uploads.
|
|
20721
|
+
*/
|
|
20722
|
+
getKnowledgeSourceUploadPollIntervalMs() {
|
|
20723
|
+
var _a;
|
|
20724
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadPollIntervalMs) !== null && _a !== void 0 ? _a : 5000;
|
|
20418
20725
|
}
|
|
20419
20726
|
/**
|
|
20420
|
-
*
|
|
20727
|
+
* Returns the overall upload timeout in milliseconds for vector store uploads.
|
|
20421
20728
|
*/
|
|
20422
|
-
|
|
20729
|
+
getKnowledgeSourceUploadTimeoutMs() {
|
|
20730
|
+
var _a;
|
|
20731
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS;
|
|
20732
|
+
}
|
|
20733
|
+
/**
|
|
20734
|
+
* Returns true if we should continue even if vector store ingestion stalls.
|
|
20735
|
+
*/
|
|
20736
|
+
shouldContinueOnVectorStoreStall() {
|
|
20737
|
+
var _a;
|
|
20738
|
+
return (_a = this.vectorStoreOptions.shouldContinueOnVectorStoreStall) !== null && _a !== void 0 ? _a : true;
|
|
20739
|
+
}
|
|
20740
|
+
/**
|
|
20741
|
+
* Returns vector-store-specific options with extended settings.
|
|
20742
|
+
*/
|
|
20743
|
+
get vectorStoreOptions() {
|
|
20744
|
+
return this.options;
|
|
20745
|
+
}
|
|
20746
|
+
/**
|
|
20747
|
+
* Returns the OpenAI vector stores API surface, supporting stable and beta SDKs.
|
|
20748
|
+
*/
|
|
20749
|
+
getVectorStoresApi(client) {
|
|
20750
|
+
var _a, _b;
|
|
20751
|
+
const vectorStores = (_a = client.vectorStores) !== null && _a !== void 0 ? _a : (_b = client.beta) === null || _b === void 0 ? void 0 : _b.vectorStores;
|
|
20752
|
+
if (!vectorStores) {
|
|
20753
|
+
throw new Error('OpenAI client does not support vector stores. Please ensure you are using a compatible version of the OpenAI SDK with vector store support.');
|
|
20754
|
+
}
|
|
20755
|
+
return vectorStores;
|
|
20756
|
+
}
|
|
20757
|
+
/**
|
|
20758
|
+
* Downloads a knowledge source URL into a File for vector store upload.
|
|
20759
|
+
*/
|
|
20760
|
+
async downloadKnowledgeSourceFile(options) {
|
|
20761
|
+
var _a;
|
|
20762
|
+
const { source, timeoutMs, logLabel } = options;
|
|
20763
|
+
const startedAtMs = Date.now();
|
|
20764
|
+
const controller = new AbortController();
|
|
20765
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
|
|
20423
20766
|
if (this.options.isVerbose) {
|
|
20424
|
-
console.info('
|
|
20767
|
+
console.info('[🤰]', 'Downloading knowledge source', {
|
|
20768
|
+
source,
|
|
20769
|
+
timeoutMs,
|
|
20770
|
+
logLabel,
|
|
20771
|
+
});
|
|
20425
20772
|
}
|
|
20426
|
-
|
|
20427
|
-
|
|
20428
|
-
|
|
20429
|
-
|
|
20773
|
+
try {
|
|
20774
|
+
const response = await fetch(source, { signal: controller.signal });
|
|
20775
|
+
const contentType = (_a = response.headers.get('content-type')) !== null && _a !== void 0 ? _a : undefined;
|
|
20776
|
+
if (!response.ok) {
|
|
20777
|
+
console.error('[🤰]', 'Failed to download knowledge source', {
|
|
20778
|
+
source,
|
|
20779
|
+
status: response.status,
|
|
20780
|
+
statusText: response.statusText,
|
|
20781
|
+
contentType,
|
|
20782
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
20783
|
+
logLabel,
|
|
20784
|
+
});
|
|
20785
|
+
return null;
|
|
20786
|
+
}
|
|
20787
|
+
const buffer = await response.arrayBuffer();
|
|
20788
|
+
let filename = source.split('/').pop() || 'downloaded-file';
|
|
20789
|
+
try {
|
|
20790
|
+
const url = new URL(source);
|
|
20791
|
+
filename = url.pathname.split('/').pop() || filename;
|
|
20792
|
+
}
|
|
20793
|
+
catch (error) {
|
|
20794
|
+
// Keep default filename
|
|
20795
|
+
}
|
|
20796
|
+
const file = new File([buffer], filename, contentType ? { type: contentType } : undefined);
|
|
20797
|
+
const elapsedMs = Date.now() - startedAtMs;
|
|
20798
|
+
const sizeBytes = buffer.byteLength;
|
|
20799
|
+
if (this.options.isVerbose) {
|
|
20800
|
+
console.info('[🤰]', 'Downloaded knowledge source', {
|
|
20801
|
+
source,
|
|
20802
|
+
filename,
|
|
20803
|
+
sizeBytes,
|
|
20804
|
+
contentType,
|
|
20805
|
+
elapsedMs,
|
|
20806
|
+
logLabel,
|
|
20807
|
+
});
|
|
20808
|
+
}
|
|
20809
|
+
return { file, sizeBytes, filename, elapsedMs };
|
|
20430
20810
|
}
|
|
20431
|
-
|
|
20432
|
-
|
|
20433
|
-
|
|
20434
|
-
|
|
20435
|
-
|
|
20436
|
-
|
|
20437
|
-
|
|
20438
|
-
|
|
20439
|
-
|
|
20440
|
-
role: msg.sender === 'assistant' ? 'assistant' : 'user',
|
|
20441
|
-
content: msg.content,
|
|
20442
|
-
}));
|
|
20443
|
-
input.push(...previousMessages);
|
|
20811
|
+
catch (error) {
|
|
20812
|
+
assertsError(error);
|
|
20813
|
+
console.error('[🤰]', 'Error downloading knowledge source', {
|
|
20814
|
+
source,
|
|
20815
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
20816
|
+
logLabel,
|
|
20817
|
+
error: serializeError(error),
|
|
20818
|
+
});
|
|
20819
|
+
return null;
|
|
20444
20820
|
}
|
|
20445
|
-
|
|
20446
|
-
|
|
20447
|
-
|
|
20448
|
-
|
|
20449
|
-
|
|
20450
|
-
|
|
20451
|
-
|
|
20452
|
-
|
|
20453
|
-
|
|
20454
|
-
|
|
20455
|
-
if (this.
|
|
20456
|
-
|
|
20457
|
-
|
|
20458
|
-
|
|
20459
|
-
|
|
20460
|
-
|
|
20821
|
+
finally {
|
|
20822
|
+
clearTimeout(timeoutId);
|
|
20823
|
+
}
|
|
20824
|
+
}
|
|
20825
|
+
/**
|
|
20826
|
+
* Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
|
|
20827
|
+
*/
|
|
20828
|
+
async logVectorStoreFileBatchDiagnostics(options) {
|
|
20829
|
+
var _a, _b, _c, _d, _e;
|
|
20830
|
+
const { client, vectorStoreId, batchId, uploadedFiles, logLabel, reason } = options;
|
|
20831
|
+
if (reason === 'stalled' && !this.options.isVerbose) {
|
|
20832
|
+
return;
|
|
20833
|
+
}
|
|
20834
|
+
if (!batchId.startsWith('vsfb_')) {
|
|
20835
|
+
console.error('[🤰]', 'Vector store file batch diagnostics skipped (invalid batch id)', {
|
|
20836
|
+
vectorStoreId,
|
|
20837
|
+
batchId,
|
|
20838
|
+
reason,
|
|
20839
|
+
logLabel,
|
|
20840
|
+
});
|
|
20841
|
+
return;
|
|
20842
|
+
}
|
|
20843
|
+
const fileIdToMetadata = new Map();
|
|
20844
|
+
for (const file of uploadedFiles) {
|
|
20845
|
+
fileIdToMetadata.set(file.fileId, file);
|
|
20846
|
+
}
|
|
20847
|
+
try {
|
|
20848
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
20849
|
+
const limit = Math.min(100, Math.max(10, uploadedFiles.length));
|
|
20850
|
+
const batchFilesPage = await vectorStores.fileBatches.listFiles(batchId, {
|
|
20851
|
+
vector_store_id: vectorStoreId,
|
|
20852
|
+
limit,
|
|
20853
|
+
});
|
|
20854
|
+
const batchFiles = (_a = batchFilesPage.data) !== null && _a !== void 0 ? _a : [];
|
|
20855
|
+
const statusCounts = {
|
|
20856
|
+
in_progress: 0,
|
|
20857
|
+
completed: 0,
|
|
20858
|
+
failed: 0,
|
|
20859
|
+
cancelled: 0,
|
|
20860
|
+
};
|
|
20861
|
+
const errorSamples = [];
|
|
20862
|
+
const inProgressSamples = [];
|
|
20863
|
+
const batchFileIds = new Set();
|
|
20864
|
+
for (const file of batchFiles) {
|
|
20865
|
+
const status = (_b = file.status) !== null && _b !== void 0 ? _b : 'unknown';
|
|
20866
|
+
statusCounts[status] = ((_c = statusCounts[status]) !== null && _c !== void 0 ? _c : 0) + 1;
|
|
20867
|
+
const vectorStoreFileId = file.id;
|
|
20868
|
+
const uploadedFileId = (_d = file.file_id) !== null && _d !== void 0 ? _d : file.fileId;
|
|
20869
|
+
const fileId = uploadedFileId !== null && uploadedFileId !== void 0 ? uploadedFileId : vectorStoreFileId;
|
|
20870
|
+
batchFileIds.add(fileId);
|
|
20871
|
+
const metadata = fileIdToMetadata.get(fileId);
|
|
20872
|
+
if (status === 'failed') {
|
|
20873
|
+
errorSamples.push({
|
|
20874
|
+
fileId,
|
|
20875
|
+
status,
|
|
20876
|
+
error: (_e = file.last_error) === null || _e === void 0 ? void 0 : _e.message,
|
|
20877
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
20878
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
20879
|
+
});
|
|
20880
|
+
}
|
|
20881
|
+
if (status === 'in_progress') {
|
|
20882
|
+
inProgressSamples.push({
|
|
20883
|
+
fileId,
|
|
20884
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
20885
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
20886
|
+
});
|
|
20887
|
+
}
|
|
20888
|
+
}
|
|
20889
|
+
const missingSamples = uploadedFiles
|
|
20890
|
+
.filter((file) => !batchFileIds.has(file.fileId))
|
|
20891
|
+
.slice(0, 5)
|
|
20892
|
+
.map((file) => ({
|
|
20893
|
+
fileId: file.fileId,
|
|
20894
|
+
filename: file.filename,
|
|
20895
|
+
sizeBytes: file.sizeBytes,
|
|
20896
|
+
}));
|
|
20897
|
+
const vectorStore = await vectorStores.retrieve(vectorStoreId);
|
|
20898
|
+
const logPayload = {
|
|
20899
|
+
vectorStoreId,
|
|
20900
|
+
batchId,
|
|
20901
|
+
reason,
|
|
20902
|
+
vectorStoreStatus: vectorStore.status,
|
|
20903
|
+
vectorStoreFileCounts: vectorStore.file_counts,
|
|
20904
|
+
vectorStoreUsageBytes: vectorStore.usage_bytes,
|
|
20905
|
+
batchFileCount: batchFiles.length,
|
|
20906
|
+
statusCounts,
|
|
20907
|
+
errorSamples: errorSamples.slice(0, 5),
|
|
20908
|
+
inProgressSamples,
|
|
20909
|
+
missingFileCount: uploadedFiles.length - batchFileIds.size,
|
|
20910
|
+
missingSamples,
|
|
20911
|
+
logLabel,
|
|
20461
20912
|
};
|
|
20913
|
+
const logFunction = reason === 'stalled' ? console.info : console.error;
|
|
20914
|
+
logFunction('[🤰]', 'Vector store file batch diagnostics', logPayload);
|
|
20915
|
+
}
|
|
20916
|
+
catch (error) {
|
|
20917
|
+
assertsError(error);
|
|
20918
|
+
console.error('[🤰]', 'Vector store file batch diagnostics failed', {
|
|
20919
|
+
vectorStoreId,
|
|
20920
|
+
batchId,
|
|
20921
|
+
reason,
|
|
20922
|
+
logLabel,
|
|
20923
|
+
error: serializeError(error),
|
|
20924
|
+
});
|
|
20925
|
+
}
|
|
20926
|
+
}
|
|
20927
|
+
/**
|
|
20928
|
+
* Uploads knowledge source files to the vector store and polls until processing completes.
|
|
20929
|
+
*/
|
|
20930
|
+
async uploadKnowledgeSourceFilesToVectorStore(options) {
|
|
20931
|
+
var _a, _b, _c, _d, _e, _f;
|
|
20932
|
+
const { client, vectorStoreId, files, totalBytes, logLabel } = options;
|
|
20933
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
20934
|
+
const uploadStartedAtMs = Date.now();
|
|
20935
|
+
const maxConcurrency = Math.max(1, this.getKnowledgeSourceUploadMaxConcurrency());
|
|
20936
|
+
const pollIntervalMs = Math.max(1000, this.getKnowledgeSourceUploadPollIntervalMs());
|
|
20937
|
+
const uploadTimeoutMs = Math.max(1000, this.getKnowledgeSourceUploadTimeoutMs());
|
|
20938
|
+
if (this.options.isVerbose) {
|
|
20939
|
+
console.info('[🤰]', 'Uploading knowledge source files to OpenAI', {
|
|
20940
|
+
vectorStoreId,
|
|
20941
|
+
fileCount: files.length,
|
|
20942
|
+
totalBytes,
|
|
20943
|
+
maxConcurrency,
|
|
20944
|
+
pollIntervalMs,
|
|
20945
|
+
uploadTimeoutMs,
|
|
20946
|
+
logLabel,
|
|
20947
|
+
});
|
|
20948
|
+
}
|
|
20949
|
+
const fileTypeSummary = {};
|
|
20950
|
+
for (const file of files) {
|
|
20951
|
+
const filename = (_a = file.name) !== null && _a !== void 0 ? _a : '';
|
|
20952
|
+
const extension = filename.includes('.')
|
|
20953
|
+
? (_c = (_b = filename.split('.').pop()) === null || _b === void 0 ? void 0 : _b.toLowerCase()) !== null && _c !== void 0 ? _c : 'unknown'
|
|
20954
|
+
: 'unknown';
|
|
20955
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : 0;
|
|
20956
|
+
const summary = (_d = fileTypeSummary[extension]) !== null && _d !== void 0 ? _d : { count: 0, totalBytes: 0 };
|
|
20957
|
+
summary.count += 1;
|
|
20958
|
+
summary.totalBytes += sizeBytes;
|
|
20959
|
+
fileTypeSummary[extension] = summary;
|
|
20960
|
+
}
|
|
20961
|
+
if (this.options.isVerbose) {
|
|
20962
|
+
console.info('[🤰]', 'Knowledge source file summary', {
|
|
20963
|
+
vectorStoreId,
|
|
20964
|
+
fileCount: files.length,
|
|
20965
|
+
totalBytes,
|
|
20966
|
+
fileTypeSummary,
|
|
20967
|
+
logLabel,
|
|
20968
|
+
});
|
|
20969
|
+
}
|
|
20970
|
+
const fileEntries = files.map((file, index) => ({ file, index }));
|
|
20971
|
+
const fileIterator = fileEntries.values();
|
|
20972
|
+
const fileIds = [];
|
|
20973
|
+
const uploadedFiles = [];
|
|
20974
|
+
const failedUploads = [];
|
|
20975
|
+
let uploadedCount = 0;
|
|
20976
|
+
const processFiles = async (iterator) => {
|
|
20977
|
+
var _a, _b;
|
|
20978
|
+
for (const { file, index } of iterator) {
|
|
20979
|
+
const uploadIndex = index + 1;
|
|
20980
|
+
const filename = file.name || `knowledge-source-${uploadIndex}`;
|
|
20981
|
+
const extension = filename.includes('.')
|
|
20982
|
+
? (_b = (_a = filename.split('.').pop()) === null || _a === void 0 ? void 0 : _a.toLowerCase()) !== null && _b !== void 0 ? _b : 'unknown'
|
|
20983
|
+
: 'unknown';
|
|
20984
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : undefined;
|
|
20985
|
+
const fileUploadStartedAtMs = Date.now();
|
|
20986
|
+
if (this.options.isVerbose) {
|
|
20987
|
+
console.info('[🤰]', 'Uploading knowledge source file', {
|
|
20988
|
+
index: uploadIndex,
|
|
20989
|
+
total: files.length,
|
|
20990
|
+
filename,
|
|
20991
|
+
extension,
|
|
20992
|
+
sizeBytes,
|
|
20993
|
+
logLabel,
|
|
20994
|
+
});
|
|
20995
|
+
}
|
|
20996
|
+
try {
|
|
20997
|
+
const uploaded = await client.files.create({ file, purpose: 'assistants' });
|
|
20998
|
+
fileIds.push(uploaded.id);
|
|
20999
|
+
uploadedFiles.push({ fileId: uploaded.id, filename, sizeBytes });
|
|
21000
|
+
uploadedCount += 1;
|
|
21001
|
+
if (this.options.isVerbose) {
|
|
21002
|
+
console.info('[🤰]', 'Uploaded knowledge source file', {
|
|
21003
|
+
index: uploadIndex,
|
|
21004
|
+
total: files.length,
|
|
21005
|
+
filename,
|
|
21006
|
+
sizeBytes,
|
|
21007
|
+
fileId: uploaded.id,
|
|
21008
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
21009
|
+
logLabel,
|
|
21010
|
+
});
|
|
21011
|
+
}
|
|
21012
|
+
}
|
|
21013
|
+
catch (error) {
|
|
21014
|
+
assertsError(error);
|
|
21015
|
+
const serializedError = serializeError(error);
|
|
21016
|
+
failedUploads.push({ index: uploadIndex, filename, error: serializedError });
|
|
21017
|
+
console.error('[🤰]', 'Failed to upload knowledge source file', {
|
|
21018
|
+
index: uploadIndex,
|
|
21019
|
+
total: files.length,
|
|
21020
|
+
filename,
|
|
21021
|
+
sizeBytes,
|
|
21022
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
21023
|
+
logLabel,
|
|
21024
|
+
error: serializedError,
|
|
21025
|
+
});
|
|
21026
|
+
}
|
|
21027
|
+
}
|
|
21028
|
+
};
|
|
21029
|
+
const workerCount = Math.min(maxConcurrency, files.length);
|
|
21030
|
+
const workers = Array.from({ length: workerCount }, () => processFiles(fileIterator));
|
|
21031
|
+
await Promise.all(workers);
|
|
21032
|
+
if (this.options.isVerbose) {
|
|
21033
|
+
console.info('[🤰]', 'Finished uploading knowledge source files', {
|
|
21034
|
+
vectorStoreId,
|
|
21035
|
+
fileCount: files.length,
|
|
21036
|
+
uploadedCount,
|
|
21037
|
+
failedCount: failedUploads.length,
|
|
21038
|
+
elapsedMs: Date.now() - uploadStartedAtMs,
|
|
21039
|
+
failedSamples: failedUploads.slice(0, 3),
|
|
21040
|
+
logLabel,
|
|
21041
|
+
});
|
|
21042
|
+
}
|
|
21043
|
+
if (fileIds.length === 0) {
|
|
21044
|
+
console.error('[🤰]', 'No knowledge source files were uploaded', {
|
|
21045
|
+
vectorStoreId,
|
|
21046
|
+
fileCount: files.length,
|
|
21047
|
+
failedCount: failedUploads.length,
|
|
21048
|
+
logLabel,
|
|
21049
|
+
});
|
|
21050
|
+
return null;
|
|
21051
|
+
}
|
|
21052
|
+
const batch = await vectorStores.fileBatches.create(vectorStoreId, {
|
|
21053
|
+
file_ids: fileIds,
|
|
21054
|
+
});
|
|
21055
|
+
const expectedBatchId = batch.id;
|
|
21056
|
+
const expectedBatchIdValid = expectedBatchId.startsWith('vsfb_');
|
|
21057
|
+
if (!expectedBatchIdValid) {
|
|
21058
|
+
console.error('[🤰]', 'Vector store file batch id looks invalid', {
|
|
21059
|
+
vectorStoreId,
|
|
21060
|
+
batchId: expectedBatchId,
|
|
21061
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
21062
|
+
logLabel,
|
|
21063
|
+
});
|
|
21064
|
+
}
|
|
21065
|
+
else if (batch.vector_store_id !== vectorStoreId) {
|
|
21066
|
+
console.error('[🤰]', 'Vector store file batch vector store id mismatch', {
|
|
21067
|
+
vectorStoreId,
|
|
21068
|
+
batchId: expectedBatchId,
|
|
21069
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
21070
|
+
logLabel,
|
|
21071
|
+
});
|
|
21072
|
+
}
|
|
21073
|
+
if (this.options.isVerbose) {
|
|
21074
|
+
console.info('[🤰]', 'Created vector store file batch', {
|
|
21075
|
+
vectorStoreId,
|
|
21076
|
+
batchId: expectedBatchId,
|
|
21077
|
+
fileCount: fileIds.length,
|
|
21078
|
+
logLabel,
|
|
21079
|
+
});
|
|
21080
|
+
}
|
|
21081
|
+
const pollStartedAtMs = Date.now();
|
|
21082
|
+
const progressLogIntervalMs = Math.max(VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS, pollIntervalMs);
|
|
21083
|
+
const diagnosticsIntervalMs = Math.max(60000, pollIntervalMs * 5);
|
|
21084
|
+
// let lastStatus: string | undefined;
|
|
21085
|
+
let lastCountsKey = '';
|
|
21086
|
+
let lastProgressKey = '';
|
|
21087
|
+
let lastLogAtMs = 0;
|
|
21088
|
+
let lastProgressAtMs = pollStartedAtMs;
|
|
21089
|
+
let lastDiagnosticsAtMs = pollStartedAtMs;
|
|
21090
|
+
let latestBatch = batch;
|
|
21091
|
+
let loggedBatchIdMismatch = false;
|
|
21092
|
+
let loggedBatchIdFallback = false;
|
|
21093
|
+
let loggedBatchIdInvalid = false;
|
|
21094
|
+
let shouldPoll = true;
|
|
21095
|
+
while (shouldPoll) {
|
|
21096
|
+
const nowMs = Date.now();
|
|
21097
|
+
// [🤰] Note: Sometimes OpenAI returns Vector Store object instead of Batch object, or IDs get swapped.
|
|
21098
|
+
const rawBatchId = typeof latestBatch.id === 'string' ? latestBatch.id : '';
|
|
21099
|
+
const rawVectorStoreId = latestBatch.vector_store_id;
|
|
21100
|
+
let returnedBatchId = rawBatchId;
|
|
21101
|
+
let returnedBatchIdValid = typeof returnedBatchId === 'string' && returnedBatchId.startsWith('vsfb_');
|
|
21102
|
+
if (!returnedBatchIdValid && expectedBatchIdValid) {
|
|
21103
|
+
if (!loggedBatchIdFallback) {
|
|
21104
|
+
console.error('[🤰]', 'Vector store file batch id missing from response; falling back to expected', {
|
|
21105
|
+
vectorStoreId,
|
|
21106
|
+
expectedBatchId,
|
|
21107
|
+
returnedBatchId,
|
|
21108
|
+
rawVectorStoreId,
|
|
21109
|
+
logLabel,
|
|
21110
|
+
});
|
|
21111
|
+
loggedBatchIdFallback = true;
|
|
21112
|
+
}
|
|
21113
|
+
returnedBatchId = expectedBatchId;
|
|
21114
|
+
returnedBatchIdValid = true;
|
|
21115
|
+
}
|
|
21116
|
+
if (!returnedBatchIdValid && !loggedBatchIdInvalid) {
|
|
21117
|
+
console.error('[🤰]', 'Vector store file batch id is invalid; stopping polling', {
|
|
21118
|
+
vectorStoreId,
|
|
21119
|
+
expectedBatchId,
|
|
21120
|
+
returnedBatchId,
|
|
21121
|
+
rawVectorStoreId,
|
|
21122
|
+
logLabel,
|
|
21123
|
+
});
|
|
21124
|
+
loggedBatchIdInvalid = true;
|
|
21125
|
+
}
|
|
21126
|
+
const batchIdMismatch = expectedBatchIdValid && returnedBatchIdValid && returnedBatchId !== expectedBatchId;
|
|
21127
|
+
if (batchIdMismatch && !loggedBatchIdMismatch) {
|
|
21128
|
+
console.error('[🤰]', 'Vector store file batch id mismatch', {
|
|
21129
|
+
vectorStoreId,
|
|
21130
|
+
expectedBatchId,
|
|
21131
|
+
returnedBatchId,
|
|
21132
|
+
logLabel,
|
|
21133
|
+
});
|
|
21134
|
+
loggedBatchIdMismatch = true;
|
|
21135
|
+
}
|
|
21136
|
+
if (returnedBatchIdValid) {
|
|
21137
|
+
latestBatch = await vectorStores.fileBatches.retrieve(returnedBatchId, {
|
|
21138
|
+
vector_store_id: vectorStoreId,
|
|
21139
|
+
});
|
|
21140
|
+
}
|
|
21141
|
+
else {
|
|
21142
|
+
shouldPoll = false;
|
|
21143
|
+
continue;
|
|
21144
|
+
}
|
|
21145
|
+
const status = (_e = latestBatch.status) !== null && _e !== void 0 ? _e : 'unknown';
|
|
21146
|
+
const fileCounts = (_f = latestBatch.file_counts) !== null && _f !== void 0 ? _f : {};
|
|
21147
|
+
const progressKey = JSON.stringify(fileCounts);
|
|
21148
|
+
const statusCountsKey = `${status}-${progressKey}`;
|
|
21149
|
+
const isProgressing = progressKey !== lastProgressKey;
|
|
21150
|
+
if (isProgressing) {
|
|
21151
|
+
lastProgressAtMs = nowMs;
|
|
21152
|
+
lastProgressKey = progressKey;
|
|
21153
|
+
}
|
|
21154
|
+
if (this.options.isVerbose &&
|
|
21155
|
+
(statusCountsKey !== lastCountsKey || nowMs - lastLogAtMs >= progressLogIntervalMs)) {
|
|
21156
|
+
console.info('[🤰]', 'Vector store file batch status', {
|
|
21157
|
+
vectorStoreId,
|
|
21158
|
+
batchId: returnedBatchId,
|
|
21159
|
+
status,
|
|
21160
|
+
fileCounts,
|
|
21161
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
21162
|
+
logLabel,
|
|
21163
|
+
});
|
|
21164
|
+
lastCountsKey = statusCountsKey;
|
|
21165
|
+
lastLogAtMs = nowMs;
|
|
21166
|
+
}
|
|
21167
|
+
if (status === 'in_progress' &&
|
|
21168
|
+
nowMs - lastProgressAtMs >= VECTOR_STORE_STALL_LOG_THRESHOLD_MS &&
|
|
21169
|
+
nowMs - lastDiagnosticsAtMs >= diagnosticsIntervalMs) {
|
|
21170
|
+
lastDiagnosticsAtMs = nowMs;
|
|
21171
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
21172
|
+
client,
|
|
21173
|
+
vectorStoreId,
|
|
21174
|
+
batchId: returnedBatchId,
|
|
21175
|
+
uploadedFiles,
|
|
21176
|
+
logLabel,
|
|
21177
|
+
reason: 'stalled',
|
|
21178
|
+
});
|
|
21179
|
+
}
|
|
21180
|
+
if (status === 'completed') {
|
|
21181
|
+
if (this.options.isVerbose) {
|
|
21182
|
+
console.info('[🤰]', 'Vector store file batch completed', {
|
|
21183
|
+
vectorStoreId,
|
|
21184
|
+
batchId: returnedBatchId,
|
|
21185
|
+
fileCounts,
|
|
21186
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
21187
|
+
logLabel,
|
|
21188
|
+
});
|
|
21189
|
+
}
|
|
21190
|
+
shouldPoll = false;
|
|
21191
|
+
continue;
|
|
21192
|
+
}
|
|
21193
|
+
if (status === 'failed') {
|
|
21194
|
+
console.error('[🤰]', 'Vector store file batch completed with failures', {
|
|
21195
|
+
vectorStoreId,
|
|
21196
|
+
batchId: returnedBatchId,
|
|
21197
|
+
fileCounts,
|
|
21198
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
21199
|
+
logLabel,
|
|
21200
|
+
});
|
|
21201
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
21202
|
+
client,
|
|
21203
|
+
vectorStoreId,
|
|
21204
|
+
batchId: returnedBatchId,
|
|
21205
|
+
uploadedFiles,
|
|
21206
|
+
logLabel,
|
|
21207
|
+
reason: 'failed',
|
|
21208
|
+
});
|
|
21209
|
+
shouldPoll = false;
|
|
21210
|
+
continue;
|
|
21211
|
+
}
|
|
21212
|
+
if (status === 'cancelled') {
|
|
21213
|
+
console.error('[🤰]', 'Vector store file batch did not complete', {
|
|
21214
|
+
vectorStoreId,
|
|
21215
|
+
batchId: returnedBatchId,
|
|
21216
|
+
status,
|
|
21217
|
+
fileCounts,
|
|
21218
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
21219
|
+
logLabel,
|
|
21220
|
+
});
|
|
21221
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
21222
|
+
client,
|
|
21223
|
+
vectorStoreId,
|
|
21224
|
+
batchId: returnedBatchId,
|
|
21225
|
+
uploadedFiles,
|
|
21226
|
+
logLabel,
|
|
21227
|
+
reason: 'failed',
|
|
21228
|
+
});
|
|
21229
|
+
shouldPoll = false;
|
|
21230
|
+
continue;
|
|
21231
|
+
}
|
|
21232
|
+
if (nowMs - pollStartedAtMs >= uploadTimeoutMs) {
|
|
21233
|
+
console.error('[🤰]', 'Timed out waiting for vector store file batch', {
|
|
21234
|
+
vectorStoreId,
|
|
21235
|
+
batchId: returnedBatchId,
|
|
21236
|
+
fileCounts,
|
|
21237
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
21238
|
+
uploadTimeoutMs,
|
|
21239
|
+
logLabel,
|
|
21240
|
+
});
|
|
21241
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
21242
|
+
client,
|
|
21243
|
+
vectorStoreId,
|
|
21244
|
+
batchId: returnedBatchId,
|
|
21245
|
+
uploadedFiles,
|
|
21246
|
+
logLabel,
|
|
21247
|
+
reason: 'timeout',
|
|
21248
|
+
});
|
|
21249
|
+
if (this.shouldContinueOnVectorStoreStall()) {
|
|
21250
|
+
console.warn('[🤰]', 'Continuing despite vector store timeout as requested', {
|
|
21251
|
+
vectorStoreId,
|
|
21252
|
+
logLabel,
|
|
21253
|
+
});
|
|
21254
|
+
shouldPoll = false;
|
|
21255
|
+
continue;
|
|
21256
|
+
}
|
|
21257
|
+
try {
|
|
21258
|
+
const cancelBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
|
|
21259
|
+
if (!cancelBatchId.startsWith('vsfb_')) {
|
|
21260
|
+
console.error('[🤰]', 'Skipping vector store file batch cancel (invalid batch id)', {
|
|
21261
|
+
vectorStoreId,
|
|
21262
|
+
batchId: cancelBatchId,
|
|
21263
|
+
logLabel,
|
|
21264
|
+
});
|
|
21265
|
+
}
|
|
21266
|
+
else {
|
|
21267
|
+
await vectorStores.fileBatches.cancel(cancelBatchId, {
|
|
21268
|
+
vector_store_id: vectorStoreId,
|
|
21269
|
+
});
|
|
21270
|
+
}
|
|
21271
|
+
if (this.options.isVerbose) {
|
|
21272
|
+
console.info('[🤰]', 'Cancelled vector store file batch after timeout', {
|
|
21273
|
+
vectorStoreId,
|
|
21274
|
+
batchId: batchIdMismatch && returnedBatchId.startsWith('vsfb_')
|
|
21275
|
+
? returnedBatchId
|
|
21276
|
+
: expectedBatchId,
|
|
21277
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
21278
|
+
logLabel,
|
|
21279
|
+
});
|
|
21280
|
+
}
|
|
21281
|
+
}
|
|
21282
|
+
catch (error) {
|
|
21283
|
+
assertsError(error);
|
|
21284
|
+
console.error('[🤰]', 'Failed to cancel vector store file batch after timeout', {
|
|
21285
|
+
vectorStoreId,
|
|
21286
|
+
batchId: expectedBatchId,
|
|
21287
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
21288
|
+
logLabel,
|
|
21289
|
+
error: serializeError(error),
|
|
21290
|
+
});
|
|
21291
|
+
}
|
|
21292
|
+
shouldPoll = false;
|
|
21293
|
+
continue;
|
|
21294
|
+
}
|
|
21295
|
+
await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
|
|
21296
|
+
}
|
|
21297
|
+
return latestBatch;
|
|
21298
|
+
}
|
|
21299
|
+
/**
|
|
21300
|
+
* Creates a vector store and uploads knowledge sources, returning its ID.
|
|
21301
|
+
*/
|
|
21302
|
+
async createVectorStoreWithKnowledgeSources(options) {
|
|
21303
|
+
const { client, name, knowledgeSources, logLabel } = options;
|
|
21304
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
21305
|
+
const knowledgeSourcesCount = knowledgeSources.length;
|
|
21306
|
+
const downloadTimeoutMs = this.getKnowledgeSourceDownloadTimeoutMs();
|
|
21307
|
+
if (this.options.isVerbose) {
|
|
21308
|
+
console.info('[🤰]', 'Creating vector store with knowledge sources', {
|
|
21309
|
+
name,
|
|
21310
|
+
knowledgeSourcesCount,
|
|
21311
|
+
downloadTimeoutMs,
|
|
21312
|
+
logLabel,
|
|
21313
|
+
});
|
|
21314
|
+
}
|
|
21315
|
+
const vectorStore = await vectorStores.create({
|
|
21316
|
+
name: `${name} Knowledge Base`,
|
|
21317
|
+
});
|
|
21318
|
+
const vectorStoreId = vectorStore.id;
|
|
21319
|
+
if (this.options.isVerbose) {
|
|
21320
|
+
console.info('[🤰]', 'Vector store created', {
|
|
21321
|
+
vectorStoreId,
|
|
21322
|
+
logLabel,
|
|
21323
|
+
});
|
|
21324
|
+
}
|
|
21325
|
+
const fileStreams = [];
|
|
21326
|
+
const skippedSources = [];
|
|
21327
|
+
let totalBytes = 0;
|
|
21328
|
+
const processingStartedAtMs = Date.now();
|
|
21329
|
+
for (const [index, source] of knowledgeSources.entries()) {
|
|
21330
|
+
try {
|
|
21331
|
+
const isDataUrl = isDataUrlKnowledgeSource(source);
|
|
21332
|
+
const isHttp = source.startsWith('http://') || source.startsWith('https://');
|
|
21333
|
+
const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
|
|
21334
|
+
if (this.options.isVerbose) {
|
|
21335
|
+
console.info('[🤰]', 'Processing knowledge source', {
|
|
21336
|
+
index: index + 1,
|
|
21337
|
+
total: knowledgeSourcesCount,
|
|
21338
|
+
source,
|
|
21339
|
+
sourceType,
|
|
21340
|
+
logLabel,
|
|
21341
|
+
});
|
|
21342
|
+
}
|
|
21343
|
+
if (isDataUrl) {
|
|
21344
|
+
const parsed = parseDataUrlKnowledgeSource(source);
|
|
21345
|
+
if (!parsed) {
|
|
21346
|
+
skippedSources.push({ source, reason: 'invalid_data_url' });
|
|
21347
|
+
if (this.options.isVerbose) {
|
|
21348
|
+
console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
|
|
21349
|
+
source,
|
|
21350
|
+
sourceType,
|
|
21351
|
+
logLabel,
|
|
21352
|
+
});
|
|
21353
|
+
}
|
|
21354
|
+
continue;
|
|
21355
|
+
}
|
|
21356
|
+
const dataUrlFile = new File([parsed.buffer], parsed.filename, {
|
|
21357
|
+
type: parsed.mimeType,
|
|
21358
|
+
});
|
|
21359
|
+
fileStreams.push(dataUrlFile);
|
|
21360
|
+
totalBytes += parsed.buffer.length;
|
|
21361
|
+
continue;
|
|
21362
|
+
}
|
|
21363
|
+
if (isHttp) {
|
|
21364
|
+
const downloadResult = await this.downloadKnowledgeSourceFile({
|
|
21365
|
+
source,
|
|
21366
|
+
timeoutMs: downloadTimeoutMs,
|
|
21367
|
+
logLabel,
|
|
21368
|
+
});
|
|
21369
|
+
if (downloadResult) {
|
|
21370
|
+
fileStreams.push(downloadResult.file);
|
|
21371
|
+
totalBytes += downloadResult.sizeBytes;
|
|
21372
|
+
}
|
|
21373
|
+
else {
|
|
21374
|
+
skippedSources.push({ source, reason: 'download_failed' });
|
|
21375
|
+
}
|
|
21376
|
+
}
|
|
21377
|
+
else {
|
|
21378
|
+
skippedSources.push({ source, reason: 'unsupported_source_type' });
|
|
21379
|
+
if (this.options.isVerbose) {
|
|
21380
|
+
console.info('[🤰]', 'Skipping knowledge source (unsupported type)', {
|
|
21381
|
+
source,
|
|
21382
|
+
sourceType,
|
|
21383
|
+
logLabel,
|
|
21384
|
+
});
|
|
21385
|
+
}
|
|
21386
|
+
/*
|
|
21387
|
+
TODO: [🤰] Resolve problem with browser environment
|
|
21388
|
+
// Assume it's a local file path
|
|
21389
|
+
// Note: This will work in Node.js environment
|
|
21390
|
+
// For browser environments, this would need different handling
|
|
21391
|
+
const fs = await import('fs');
|
|
21392
|
+
const fileStream = fs.createReadStream(source);
|
|
21393
|
+
fileStreams.push(fileStream);
|
|
21394
|
+
*/
|
|
21395
|
+
}
|
|
21396
|
+
}
|
|
21397
|
+
catch (error) {
|
|
21398
|
+
assertsError(error);
|
|
21399
|
+
skippedSources.push({ source, reason: 'processing_error' });
|
|
21400
|
+
console.error('[🤰]', 'Error processing knowledge source', {
|
|
21401
|
+
source,
|
|
21402
|
+
logLabel,
|
|
21403
|
+
error: serializeError(error),
|
|
21404
|
+
});
|
|
21405
|
+
}
|
|
21406
|
+
}
|
|
21407
|
+
if (this.options.isVerbose) {
|
|
21408
|
+
console.info('[🤰]', 'Finished processing knowledge sources', {
|
|
21409
|
+
total: knowledgeSourcesCount,
|
|
21410
|
+
downloadedCount: fileStreams.length,
|
|
21411
|
+
skippedCount: skippedSources.length,
|
|
21412
|
+
totalBytes,
|
|
21413
|
+
elapsedMs: Date.now() - processingStartedAtMs,
|
|
21414
|
+
skippedSamples: skippedSources.slice(0, 3),
|
|
21415
|
+
logLabel,
|
|
21416
|
+
});
|
|
21417
|
+
}
|
|
21418
|
+
if (fileStreams.length > 0) {
|
|
21419
|
+
if (this.options.isVerbose) {
|
|
21420
|
+
console.info('[🤰]', 'Uploading files to vector store', {
|
|
21421
|
+
vectorStoreId,
|
|
21422
|
+
fileCount: fileStreams.length,
|
|
21423
|
+
totalBytes,
|
|
21424
|
+
maxConcurrency: this.getKnowledgeSourceUploadMaxConcurrency(),
|
|
21425
|
+
pollIntervalMs: this.getKnowledgeSourceUploadPollIntervalMs(),
|
|
21426
|
+
uploadTimeoutMs: this.getKnowledgeSourceUploadTimeoutMs(),
|
|
21427
|
+
logLabel,
|
|
21428
|
+
});
|
|
21429
|
+
}
|
|
21430
|
+
try {
|
|
21431
|
+
await this.uploadKnowledgeSourceFilesToVectorStore({
|
|
21432
|
+
client,
|
|
21433
|
+
vectorStoreId,
|
|
21434
|
+
files: fileStreams,
|
|
21435
|
+
totalBytes,
|
|
21436
|
+
logLabel,
|
|
21437
|
+
});
|
|
21438
|
+
}
|
|
21439
|
+
catch (error) {
|
|
21440
|
+
assertsError(error);
|
|
21441
|
+
console.error('[🤰]', 'Error uploading files to vector store', {
|
|
21442
|
+
vectorStoreId,
|
|
21443
|
+
logLabel,
|
|
21444
|
+
error: serializeError(error),
|
|
21445
|
+
});
|
|
21446
|
+
}
|
|
21447
|
+
}
|
|
21448
|
+
else if (this.options.isVerbose) {
|
|
21449
|
+
console.info('[🤰]', 'No knowledge source files to upload', {
|
|
21450
|
+
vectorStoreId,
|
|
21451
|
+
skippedCount: skippedSources.length,
|
|
21452
|
+
logLabel,
|
|
21453
|
+
});
|
|
21454
|
+
}
|
|
21455
|
+
return {
|
|
21456
|
+
vectorStoreId,
|
|
21457
|
+
uploadedFileCount: fileStreams.length,
|
|
21458
|
+
skippedCount: skippedSources.length,
|
|
21459
|
+
totalBytes,
|
|
21460
|
+
};
|
|
21461
|
+
}
|
|
21462
|
+
}
|
|
21463
|
+
|
|
21464
|
+
const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
|
|
21465
|
+
const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
|
|
21466
|
+
/*
|
|
21467
|
+
TODO: Use or remove
|
|
21468
|
+
const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
|
|
21469
|
+
type: 'object',
|
|
21470
|
+
properties: {},
|
|
21471
|
+
required: [],
|
|
21472
|
+
additionalProperties: true,
|
|
21473
|
+
};
|
|
21474
|
+
*/
|
|
21475
|
+
function buildJsonSchemaDefinition(jsonSchema) {
|
|
21476
|
+
var _a, _b, _c;
|
|
21477
|
+
const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
|
|
21478
|
+
return {
|
|
21479
|
+
type: 'json_schema',
|
|
21480
|
+
name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
|
|
21481
|
+
strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
|
|
21482
|
+
schema: {
|
|
21483
|
+
type: 'object',
|
|
21484
|
+
properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
|
|
21485
|
+
required: Array.isArray(schema.required) ? schema.required : [],
|
|
21486
|
+
additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
|
|
21487
|
+
description: schema.description,
|
|
21488
|
+
},
|
|
21489
|
+
};
|
|
21490
|
+
}
|
|
21491
|
+
/**
|
|
21492
|
+
* Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
|
|
21493
|
+
* structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
|
|
21494
|
+
*
|
|
21495
|
+
* @param responseFormat - The OpenAI `response_format` payload from the user request.
|
|
21496
|
+
* @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
|
|
21497
|
+
* @private utility of Open AI
|
|
21498
|
+
*/
|
|
21499
|
+
function mapResponseFormatToAgentOutputType(responseFormat) {
|
|
21500
|
+
if (!responseFormat) {
|
|
21501
|
+
return undefined;
|
|
21502
|
+
}
|
|
21503
|
+
if (typeof responseFormat === 'string') {
|
|
21504
|
+
if (responseFormat === 'text') {
|
|
21505
|
+
return 'text';
|
|
21506
|
+
}
|
|
21507
|
+
if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
|
|
21508
|
+
return buildJsonSchemaDefinition();
|
|
21509
|
+
}
|
|
21510
|
+
return 'text';
|
|
21511
|
+
}
|
|
21512
|
+
switch (responseFormat.type) {
|
|
21513
|
+
case 'text':
|
|
21514
|
+
return 'text';
|
|
21515
|
+
case 'json_schema':
|
|
21516
|
+
return buildJsonSchemaDefinition(responseFormat.json_schema);
|
|
21517
|
+
case 'json_object':
|
|
21518
|
+
return buildJsonSchemaDefinition();
|
|
21519
|
+
default:
|
|
21520
|
+
return undefined;
|
|
21521
|
+
}
|
|
21522
|
+
}
|
|
21523
|
+
/**
|
|
21524
|
+
* Execution tools for OpenAI AgentKit (Agents SDK).
|
|
21525
|
+
*
|
|
21526
|
+
* @public exported from `@promptbook/openai`
|
|
21527
|
+
*/
|
|
21528
|
+
class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
|
|
21529
|
+
/**
|
|
21530
|
+
* Creates OpenAI AgentKit execution tools.
|
|
21531
|
+
*/
|
|
21532
|
+
constructor(options) {
|
|
21533
|
+
var _a;
|
|
21534
|
+
if (options.isProxied) {
|
|
21535
|
+
throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI AgentKit`);
|
|
21536
|
+
}
|
|
21537
|
+
super(options);
|
|
21538
|
+
this.preparedAgentKitAgent = null;
|
|
21539
|
+
this.agentKitModelName = (_a = options.agentKitModelName) !== null && _a !== void 0 ? _a : DEFAULT_AGENT_KIT_MODEL_NAME;
|
|
21540
|
+
}
|
|
21541
|
+
get title() {
|
|
21542
|
+
return 'OpenAI AgentKit';
|
|
21543
|
+
}
|
|
21544
|
+
get description() {
|
|
21545
|
+
return 'Use OpenAI AgentKit for agent-style chat with tools and knowledge';
|
|
21546
|
+
}
|
|
21547
|
+
/**
|
|
21548
|
+
* Calls OpenAI AgentKit with a chat prompt (non-streaming).
|
|
21549
|
+
*/
|
|
21550
|
+
async callChatModel(prompt) {
|
|
21551
|
+
return this.callChatModelStream(prompt, () => { });
|
|
21552
|
+
}
|
|
21553
|
+
/**
|
|
21554
|
+
* Calls OpenAI AgentKit with a chat prompt (streaming).
|
|
21555
|
+
*/
|
|
21556
|
+
async callChatModelStream(prompt, onProgress) {
|
|
21557
|
+
const { content, parameters, modelRequirements } = prompt;
|
|
21558
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
21559
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
21560
|
+
}
|
|
21561
|
+
for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
|
|
21562
|
+
if (modelRequirements[key] !== undefined) {
|
|
21563
|
+
throw new NotYetImplementedError(`In \`OpenAiAgentKitExecutionTools\` you cannot specify \`${key}\``);
|
|
21564
|
+
}
|
|
21565
|
+
}
|
|
21566
|
+
const rawPromptContent = templateParameters(content, {
|
|
21567
|
+
...parameters,
|
|
21568
|
+
modelName: this.agentKitModelName,
|
|
21569
|
+
});
|
|
21570
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
|
|
21571
|
+
const preparedAgentKitAgent = await this.prepareAgentKitAgent({
|
|
21572
|
+
name: (prompt.title || 'Agent'),
|
|
21573
|
+
instructions: modelRequirements.systemMessage || '',
|
|
21574
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
21575
|
+
tools: 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : modelRequirements.tools,
|
|
21576
|
+
});
|
|
21577
|
+
return this.callChatModelStreamWithPreparedAgent({
|
|
21578
|
+
openAiAgentKitAgent: preparedAgentKitAgent.agent,
|
|
21579
|
+
prompt,
|
|
21580
|
+
rawPromptContent,
|
|
21581
|
+
onProgress,
|
|
21582
|
+
responseFormatOutputType,
|
|
21583
|
+
});
|
|
21584
|
+
}
|
|
21585
|
+
/**
|
|
21586
|
+
* Returns a prepared AgentKit agent when the server wants to manage caching externally.
|
|
21587
|
+
*/
|
|
21588
|
+
getPreparedAgentKitAgent() {
|
|
21589
|
+
return this.preparedAgentKitAgent;
|
|
21590
|
+
}
|
|
21591
|
+
/**
|
|
21592
|
+
* Stores a prepared AgentKit agent for later reuse by external cache managers.
|
|
21593
|
+
*/
|
|
21594
|
+
setPreparedAgentKitAgent(preparedAgent) {
|
|
21595
|
+
this.preparedAgentKitAgent = preparedAgent;
|
|
21596
|
+
}
|
|
21597
|
+
/**
|
|
21598
|
+
* Creates a new tools instance bound to a prepared AgentKit agent.
|
|
21599
|
+
*/
|
|
21600
|
+
getPreparedAgentTools(preparedAgent) {
|
|
21601
|
+
const tools = new OpenAiAgentKitExecutionTools(this.agentKitOptions);
|
|
21602
|
+
tools.setPreparedAgentKitAgent(preparedAgent);
|
|
21603
|
+
return tools;
|
|
21604
|
+
}
|
|
21605
|
+
/**
|
|
21606
|
+
* Prepares an AgentKit agent with optional knowledge sources and tool definitions.
|
|
21607
|
+
*/
|
|
21608
|
+
async prepareAgentKitAgent(options) {
|
|
21609
|
+
var _a, _b;
|
|
21610
|
+
const { name, instructions, knowledgeSources, tools, vectorStoreId: cachedVectorStoreId, storeAsPrepared, } = options;
|
|
21611
|
+
await this.ensureAgentKitDefaults();
|
|
21612
|
+
if (this.options.isVerbose) {
|
|
21613
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
21614
|
+
name,
|
|
21615
|
+
instructionsLength: instructions.length,
|
|
21616
|
+
knowledgeSourcesCount: (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0,
|
|
21617
|
+
toolsCount: (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0,
|
|
21618
|
+
});
|
|
21619
|
+
}
|
|
21620
|
+
let vectorStoreId = cachedVectorStoreId;
|
|
21621
|
+
if (!vectorStoreId && knowledgeSources && knowledgeSources.length > 0) {
|
|
21622
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
21623
|
+
client: await this.getClient(),
|
|
21624
|
+
name,
|
|
21625
|
+
knowledgeSources,
|
|
21626
|
+
logLabel: 'agentkit preparation',
|
|
21627
|
+
});
|
|
21628
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
21629
|
+
}
|
|
21630
|
+
else if (vectorStoreId && this.options.isVerbose) {
|
|
21631
|
+
console.info('[🤰]', 'Using cached vector store for AgentKit agent', {
|
|
21632
|
+
name,
|
|
21633
|
+
vectorStoreId,
|
|
21634
|
+
});
|
|
21635
|
+
}
|
|
21636
|
+
const agentKitTools = this.buildAgentKitTools({ tools, vectorStoreId });
|
|
21637
|
+
const openAiAgentKitAgent = new Agent$1({
|
|
21638
|
+
name,
|
|
21639
|
+
model: this.agentKitModelName,
|
|
21640
|
+
instructions: instructions || 'You are a helpful assistant.',
|
|
21641
|
+
tools: agentKitTools,
|
|
21642
|
+
});
|
|
21643
|
+
const preparedAgent = {
|
|
21644
|
+
agent: openAiAgentKitAgent,
|
|
21645
|
+
vectorStoreId,
|
|
21646
|
+
};
|
|
21647
|
+
if (storeAsPrepared) {
|
|
21648
|
+
this.setPreparedAgentKitAgent(preparedAgent);
|
|
21649
|
+
}
|
|
21650
|
+
if (this.options.isVerbose) {
|
|
21651
|
+
console.info('[🤰]', 'OpenAI AgentKit agent ready', {
|
|
21652
|
+
name,
|
|
21653
|
+
model: this.agentKitModelName,
|
|
21654
|
+
toolCount: agentKitTools.length,
|
|
21655
|
+
hasVectorStore: Boolean(vectorStoreId),
|
|
21656
|
+
});
|
|
21657
|
+
}
|
|
21658
|
+
return preparedAgent;
|
|
21659
|
+
}
|
|
21660
|
+
/**
|
|
21661
|
+
* Ensures the AgentKit SDK is wired to the OpenAI client and API key.
|
|
21662
|
+
*/
|
|
21663
|
+
async ensureAgentKitDefaults() {
|
|
21664
|
+
const client = await this.getClient();
|
|
21665
|
+
setDefaultOpenAIClient(client);
|
|
21666
|
+
const apiKey = this.agentKitOptions.apiKey;
|
|
21667
|
+
if (apiKey && typeof apiKey === 'string') {
|
|
21668
|
+
setDefaultOpenAIKey(apiKey);
|
|
21669
|
+
}
|
|
21670
|
+
}
|
|
21671
|
+
/**
|
|
21672
|
+
* Builds the tool list for AgentKit, including hosted file search when applicable.
|
|
21673
|
+
*/
|
|
21674
|
+
buildAgentKitTools(options) {
|
|
21675
|
+
var _a;
|
|
21676
|
+
const { tools, vectorStoreId } = options;
|
|
21677
|
+
const agentKitTools = [];
|
|
21678
|
+
if (vectorStoreId) {
|
|
21679
|
+
agentKitTools.push(fileSearchTool(vectorStoreId));
|
|
21680
|
+
}
|
|
21681
|
+
if (tools && tools.length > 0) {
|
|
21682
|
+
const scriptTools = this.resolveScriptTools();
|
|
21683
|
+
for (const toolDefinition of tools) {
|
|
21684
|
+
agentKitTools.push(tool({
|
|
21685
|
+
name: toolDefinition.name,
|
|
21686
|
+
description: toolDefinition.description,
|
|
21687
|
+
parameters: toolDefinition.parameters
|
|
21688
|
+
? {
|
|
21689
|
+
...toolDefinition.parameters,
|
|
21690
|
+
additionalProperties: false,
|
|
21691
|
+
required: (_a = toolDefinition.parameters.required) !== null && _a !== void 0 ? _a : [],
|
|
21692
|
+
}
|
|
21693
|
+
: undefined,
|
|
21694
|
+
strict: false,
|
|
21695
|
+
execute: async (input, runContext, details) => {
|
|
21696
|
+
var _a, _b, _c;
|
|
21697
|
+
const scriptTool = scriptTools[0];
|
|
21698
|
+
const functionName = toolDefinition.name;
|
|
21699
|
+
const calledAt = $getCurrentDate();
|
|
21700
|
+
const callId = (_a = details === null || details === void 0 ? void 0 : details.toolCall) === null || _a === void 0 ? void 0 : _a.callId;
|
|
21701
|
+
const functionArgs = input !== null && input !== void 0 ? input : {};
|
|
21702
|
+
if (this.options.isVerbose) {
|
|
21703
|
+
console.info('[🤰]', 'Executing AgentKit tool', {
|
|
21704
|
+
functionName,
|
|
21705
|
+
callId,
|
|
21706
|
+
calledAt,
|
|
21707
|
+
});
|
|
21708
|
+
}
|
|
21709
|
+
try {
|
|
21710
|
+
return await scriptTool.execute({
|
|
21711
|
+
scriptLanguage: 'javascript',
|
|
21712
|
+
script: `
|
|
21713
|
+
const args = ${JSON.stringify(functionArgs)};
|
|
21714
|
+
return await ${functionName}(args);
|
|
21715
|
+
`,
|
|
21716
|
+
parameters: (_c = (_b = runContext === null || runContext === void 0 ? void 0 : runContext.context) === null || _b === void 0 ? void 0 : _b.parameters) !== null && _c !== void 0 ? _c : {},
|
|
21717
|
+
});
|
|
21718
|
+
}
|
|
21719
|
+
catch (error) {
|
|
21720
|
+
assertsError(error);
|
|
21721
|
+
const serializedError = serializeError(error);
|
|
21722
|
+
const errorMessage = spaceTrim$2((block) => `
|
|
21723
|
+
|
|
21724
|
+
The invoked tool \`${functionName}\` failed with error:
|
|
21725
|
+
|
|
21726
|
+
\`\`\`json
|
|
21727
|
+
${block(JSON.stringify(serializedError, null, 4))}
|
|
21728
|
+
\`\`\`
|
|
21729
|
+
|
|
21730
|
+
`);
|
|
21731
|
+
console.error('[🤰]', 'AgentKit tool execution failed', {
|
|
21732
|
+
functionName,
|
|
21733
|
+
callId,
|
|
21734
|
+
error: serializedError,
|
|
21735
|
+
});
|
|
21736
|
+
return errorMessage;
|
|
21737
|
+
}
|
|
21738
|
+
},
|
|
21739
|
+
}));
|
|
21740
|
+
}
|
|
20462
21741
|
}
|
|
20463
|
-
|
|
20464
|
-
|
|
20465
|
-
|
|
20466
|
-
|
|
20467
|
-
|
|
20468
|
-
|
|
20469
|
-
|
|
20470
|
-
|
|
21742
|
+
return agentKitTools;
|
|
21743
|
+
}
|
|
21744
|
+
/**
|
|
21745
|
+
* Resolves the configured script tools for tool execution.
|
|
21746
|
+
*/
|
|
21747
|
+
resolveScriptTools() {
|
|
21748
|
+
const executionTools = this.options.executionTools;
|
|
21749
|
+
if (!executionTools || !executionTools.script) {
|
|
21750
|
+
throw new PipelineExecutionError(`Model requested tools but no executionTools.script were provided in OpenAiAgentKitExecutionTools options`);
|
|
20471
21751
|
}
|
|
21752
|
+
return Array.isArray(executionTools.script) ? executionTools.script : [executionTools.script];
|
|
21753
|
+
}
|
|
21754
|
+
/**
|
|
21755
|
+
* Runs a prepared AgentKit agent and streams results back to the caller.
|
|
21756
|
+
*/
|
|
21757
|
+
async callChatModelStreamWithPreparedAgent(options) {
|
|
21758
|
+
var _a, _b, _c, _d;
|
|
21759
|
+
const { openAiAgentKitAgent, prompt, onProgress } = options;
|
|
21760
|
+
const rawPromptContent = (_a = options.rawPromptContent) !== null && _a !== void 0 ? _a : templateParameters(prompt.content, {
|
|
21761
|
+
...prompt.parameters,
|
|
21762
|
+
modelName: this.agentKitModelName,
|
|
21763
|
+
});
|
|
21764
|
+
const agentForRun = options.responseFormatOutputType !== undefined
|
|
21765
|
+
? openAiAgentKitAgent.clone({
|
|
21766
|
+
outputType: options.responseFormatOutputType,
|
|
21767
|
+
})
|
|
21768
|
+
: openAiAgentKitAgent;
|
|
20472
21769
|
const start = $getCurrentDate();
|
|
20473
|
-
|
|
21770
|
+
let latestContent = '';
|
|
21771
|
+
const toolCalls = [];
|
|
21772
|
+
const toolCallIndexById = new Map();
|
|
21773
|
+
const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
|
|
20474
21774
|
const rawRequest = {
|
|
20475
|
-
|
|
20476
|
-
|
|
20477
|
-
input,
|
|
20478
|
-
instructions: modelRequirements.systemMessage,
|
|
20479
|
-
tools: agentTools.length > 0 ? agentTools : undefined,
|
|
20480
|
-
tool_resources: toolResources,
|
|
20481
|
-
store: false, // Stateless by default as we pass full history
|
|
21775
|
+
agentName: agentForRun.name,
|
|
21776
|
+
input: inputItems,
|
|
20482
21777
|
};
|
|
20483
|
-
|
|
20484
|
-
|
|
20485
|
-
|
|
20486
|
-
|
|
20487
|
-
|
|
20488
|
-
|
|
20489
|
-
|
|
20490
|
-
|
|
20491
|
-
|
|
20492
|
-
|
|
20493
|
-
|
|
20494
|
-
|
|
20495
|
-
|
|
20496
|
-
|
|
20497
|
-
|
|
20498
|
-
|
|
20499
|
-
|
|
20500
|
-
|
|
20501
|
-
|
|
20502
|
-
|
|
20503
|
-
|
|
20504
|
-
|
|
20505
|
-
|
|
20506
|
-
|
|
21778
|
+
const streamResult = await run(agentForRun, inputItems, {
|
|
21779
|
+
stream: true,
|
|
21780
|
+
context: { parameters: prompt.parameters },
|
|
21781
|
+
});
|
|
21782
|
+
for await (const event of streamResult) {
|
|
21783
|
+
if (event.type === 'raw_model_stream_event' && ((_b = event.data) === null || _b === void 0 ? void 0 : _b.type) === 'output_text_delta') {
|
|
21784
|
+
latestContent += event.data.delta;
|
|
21785
|
+
onProgress({
|
|
21786
|
+
content: latestContent,
|
|
21787
|
+
modelName: this.agentKitModelName,
|
|
21788
|
+
timing: { start, complete: $getCurrentDate() },
|
|
21789
|
+
usage: UNCERTAIN_USAGE,
|
|
21790
|
+
rawPromptContent: rawPromptContent,
|
|
21791
|
+
rawRequest: null,
|
|
21792
|
+
rawResponse: {},
|
|
21793
|
+
});
|
|
21794
|
+
continue;
|
|
21795
|
+
}
|
|
21796
|
+
if (event.type === 'run_item_stream_event') {
|
|
21797
|
+
const rawItem = (_c = event.item) === null || _c === void 0 ? void 0 : _c.rawItem;
|
|
21798
|
+
if (event.name === 'tool_called' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call') {
|
|
21799
|
+
const toolCall = {
|
|
21800
|
+
name: rawItem.name,
|
|
21801
|
+
arguments: rawItem.arguments,
|
|
21802
|
+
rawToolCall: rawItem,
|
|
21803
|
+
createdAt: $getCurrentDate(),
|
|
21804
|
+
};
|
|
21805
|
+
toolCallIndexById.set(rawItem.callId, toolCalls.length);
|
|
21806
|
+
toolCalls.push(toolCall);
|
|
21807
|
+
onProgress({
|
|
21808
|
+
content: latestContent,
|
|
21809
|
+
modelName: this.agentKitModelName,
|
|
21810
|
+
timing: { start, complete: $getCurrentDate() },
|
|
21811
|
+
usage: UNCERTAIN_USAGE,
|
|
21812
|
+
rawPromptContent: rawPromptContent,
|
|
21813
|
+
rawRequest: null,
|
|
21814
|
+
rawResponse: {},
|
|
21815
|
+
toolCalls: [toolCall],
|
|
21816
|
+
});
|
|
21817
|
+
}
|
|
21818
|
+
if (event.name === 'tool_output' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call_result') {
|
|
21819
|
+
const index = toolCallIndexById.get(rawItem.callId);
|
|
21820
|
+
const result = this.formatAgentKitToolOutput(rawItem.output);
|
|
21821
|
+
if (index !== undefined) {
|
|
21822
|
+
const existingToolCall = toolCalls[index];
|
|
21823
|
+
const completedToolCall = {
|
|
21824
|
+
...existingToolCall,
|
|
21825
|
+
result,
|
|
21826
|
+
rawToolCall: rawItem,
|
|
21827
|
+
};
|
|
21828
|
+
toolCalls[index] = completedToolCall;
|
|
21829
|
+
onProgress({
|
|
21830
|
+
content: latestContent,
|
|
21831
|
+
modelName: this.agentKitModelName,
|
|
21832
|
+
timing: { start, complete: $getCurrentDate() },
|
|
21833
|
+
usage: UNCERTAIN_USAGE,
|
|
21834
|
+
rawPromptContent: rawPromptContent,
|
|
21835
|
+
rawRequest: null,
|
|
21836
|
+
rawResponse: {},
|
|
21837
|
+
toolCalls: [completedToolCall],
|
|
21838
|
+
});
|
|
20507
21839
|
}
|
|
20508
21840
|
}
|
|
20509
|
-
else if (item.type === 'function_call') ;
|
|
20510
21841
|
}
|
|
20511
21842
|
}
|
|
20512
|
-
|
|
20513
|
-
|
|
20514
|
-
|
|
20515
|
-
|
|
20516
|
-
|
|
20517
|
-
|
|
20518
|
-
content: resultContent,
|
|
20519
|
-
modelName: response.model || 'agent',
|
|
21843
|
+
await streamResult.completed;
|
|
21844
|
+
const complete = $getCurrentDate();
|
|
21845
|
+
const finalContent = ((_d = streamResult.finalOutput) !== null && _d !== void 0 ? _d : latestContent);
|
|
21846
|
+
const finalResult = {
|
|
21847
|
+
content: finalContent,
|
|
21848
|
+
modelName: this.agentKitModelName,
|
|
20520
21849
|
timing: { start, complete },
|
|
20521
21850
|
usage: UNCERTAIN_USAGE,
|
|
20522
|
-
rawPromptContent,
|
|
21851
|
+
rawPromptContent: rawPromptContent,
|
|
20523
21852
|
rawRequest,
|
|
20524
|
-
rawResponse:
|
|
20525
|
-
|
|
20526
|
-
|
|
20527
|
-
|
|
20528
|
-
|
|
20529
|
-
order: [],
|
|
20530
|
-
value: {
|
|
20531
|
-
content: resultContent,
|
|
20532
|
-
modelName: response.model || 'agent',
|
|
20533
|
-
timing: { start, complete },
|
|
20534
|
-
usage: UNCERTAIN_USAGE,
|
|
20535
|
-
rawPromptContent,
|
|
20536
|
-
rawRequest,
|
|
20537
|
-
rawResponse: response,
|
|
20538
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
20539
|
-
},
|
|
20540
|
-
});
|
|
21853
|
+
rawResponse: { runResult: streamResult },
|
|
21854
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
21855
|
+
};
|
|
21856
|
+
onProgress(finalResult);
|
|
21857
|
+
return finalResult;
|
|
20541
21858
|
}
|
|
20542
21859
|
/**
|
|
20543
|
-
*
|
|
21860
|
+
* Builds AgentKit input items from the prompt and optional thread.
|
|
20544
21861
|
*/
|
|
20545
|
-
|
|
20546
|
-
|
|
20547
|
-
const
|
|
20548
|
-
|
|
20549
|
-
|
|
20550
|
-
|
|
20551
|
-
|
|
20552
|
-
|
|
20553
|
-
|
|
20554
|
-
|
|
20555
|
-
|
|
20556
|
-
|
|
20557
|
-
|
|
20558
|
-
const response = await fetch(source);
|
|
20559
|
-
if (!response.ok) {
|
|
20560
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
20561
|
-
continue;
|
|
20562
|
-
}
|
|
20563
|
-
const buffer = await response.arrayBuffer();
|
|
20564
|
-
const filename = source.split('/').pop() || 'downloaded-file';
|
|
20565
|
-
const blob = new Blob([buffer]);
|
|
20566
|
-
const file = new File([blob], filename);
|
|
20567
|
-
fileStreams.push(file);
|
|
21862
|
+
async buildAgentKitInputItems(prompt, rawPromptContent) {
|
|
21863
|
+
var _a;
|
|
21864
|
+
const inputItems = [];
|
|
21865
|
+
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
|
21866
|
+
for (const message of prompt.thread) {
|
|
21867
|
+
const sender = message.sender;
|
|
21868
|
+
const content = (_a = message.content) !== null && _a !== void 0 ? _a : '';
|
|
21869
|
+
if (sender === 'assistant' || sender === 'agent') {
|
|
21870
|
+
inputItems.push({
|
|
21871
|
+
role: 'assistant',
|
|
21872
|
+
status: 'completed',
|
|
21873
|
+
content: [{ type: 'output_text', text: content }],
|
|
21874
|
+
});
|
|
20568
21875
|
}
|
|
20569
21876
|
else {
|
|
20570
|
-
|
|
21877
|
+
inputItems.push({
|
|
21878
|
+
role: 'user',
|
|
21879
|
+
content,
|
|
21880
|
+
});
|
|
20571
21881
|
}
|
|
20572
21882
|
}
|
|
20573
|
-
catch (error) {
|
|
20574
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
20575
|
-
}
|
|
20576
21883
|
}
|
|
20577
|
-
|
|
20578
|
-
|
|
20579
|
-
|
|
20580
|
-
|
|
20581
|
-
|
|
20582
|
-
|
|
20583
|
-
|
|
20584
|
-
|
|
20585
|
-
|
|
21884
|
+
const userContent = await this.buildAgentKitUserContent(prompt, rawPromptContent);
|
|
21885
|
+
inputItems.push({
|
|
21886
|
+
role: 'user',
|
|
21887
|
+
content: userContent,
|
|
21888
|
+
});
|
|
21889
|
+
return inputItems;
|
|
21890
|
+
}
|
|
21891
|
+
/**
|
|
21892
|
+
* Builds the user message content for AgentKit runs, including file inputs when provided.
|
|
21893
|
+
*/
|
|
21894
|
+
async buildAgentKitUserContent(prompt, rawPromptContent) {
|
|
21895
|
+
if ('files' in prompt && Array.isArray(prompt.files) && prompt.files.length > 0) {
|
|
21896
|
+
const fileItems = await Promise.all(prompt.files.map(async (file) => {
|
|
21897
|
+
const arrayBuffer = await file.arrayBuffer();
|
|
21898
|
+
const base64 = Buffer.from(arrayBuffer).toString('base64');
|
|
21899
|
+
return {
|
|
21900
|
+
type: 'input_image',
|
|
21901
|
+
image: `data:${file.type};base64,${base64}`,
|
|
21902
|
+
};
|
|
21903
|
+
}));
|
|
21904
|
+
return [{ type: 'input_text', text: rawPromptContent }, ...fileItems];
|
|
21905
|
+
}
|
|
21906
|
+
return rawPromptContent;
|
|
21907
|
+
}
|
|
21908
|
+
/**
|
|
21909
|
+
* Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
|
|
21910
|
+
*/
|
|
21911
|
+
formatAgentKitToolOutput(output) {
|
|
21912
|
+
if (typeof output === 'string') {
|
|
21913
|
+
return output;
|
|
21914
|
+
}
|
|
21915
|
+
if (output && typeof output === 'object') {
|
|
21916
|
+
const textOutput = output;
|
|
21917
|
+
if (textOutput.type === 'text' && typeof textOutput.text === 'string') {
|
|
21918
|
+
return textOutput.text;
|
|
20586
21919
|
}
|
|
20587
21920
|
}
|
|
20588
|
-
return
|
|
21921
|
+
return JSON.stringify(output !== null && output !== void 0 ? output : null);
|
|
20589
21922
|
}
|
|
20590
21923
|
/**
|
|
20591
|
-
*
|
|
21924
|
+
* Returns AgentKit-specific options.
|
|
21925
|
+
*/
|
|
21926
|
+
get agentKitOptions() {
|
|
21927
|
+
return this.options;
|
|
21928
|
+
}
|
|
21929
|
+
/**
|
|
21930
|
+
* Discriminant for type guards.
|
|
20592
21931
|
*/
|
|
20593
21932
|
get discriminant() {
|
|
20594
|
-
return
|
|
21933
|
+
return DISCRIMINANT$1;
|
|
20595
21934
|
}
|
|
20596
21935
|
/**
|
|
20597
|
-
* Type guard to check if given `LlmExecutionTools` are instanceof `
|
|
21936
|
+
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
|
|
20598
21937
|
*/
|
|
20599
|
-
static
|
|
20600
|
-
return llmExecutionTools.discriminant ===
|
|
21938
|
+
static isOpenAiAgentKitExecutionTools(llmExecutionTools) {
|
|
21939
|
+
return llmExecutionTools.discriminant === DISCRIMINANT$1;
|
|
20601
21940
|
}
|
|
20602
21941
|
}
|
|
21942
|
+
/**
|
|
21943
|
+
* Discriminant for type guards.
|
|
21944
|
+
*
|
|
21945
|
+
* @private const of `OpenAiAgentKitExecutionTools`
|
|
21946
|
+
*/
|
|
21947
|
+
const DISCRIMINANT$1 = 'OPEN_AI_AGENT_KIT_V1';
|
|
20603
21948
|
|
|
20604
21949
|
/**
|
|
20605
21950
|
* Uploads files to OpenAI and returns their IDs
|
|
@@ -20634,10 +21979,10 @@ async function uploadFilesToOpenAi(client, files) {
|
|
|
20634
21979
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
20635
21980
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
20636
21981
|
*
|
|
21982
|
+
* @deprecated Use `OpenAiAgentKitExecutionTools` instead.
|
|
20637
21983
|
* @public exported from `@promptbook/openai`
|
|
20638
|
-
* @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
|
|
20639
21984
|
*/
|
|
20640
|
-
class OpenAiAssistantExecutionTools extends
|
|
21985
|
+
class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
|
|
20641
21986
|
/**
|
|
20642
21987
|
* Creates OpenAI Execution Tools.
|
|
20643
21988
|
*
|
|
@@ -20766,8 +22111,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
20766
22111
|
console.info(colors.bgWhite('rawRequest (non-streaming with tools)'), JSON.stringify(rawRequest, null, 4));
|
|
20767
22112
|
}
|
|
20768
22113
|
// Create thread and run
|
|
20769
|
-
|
|
20770
|
-
let run = threadAndRun;
|
|
22114
|
+
let run = (await client.beta.threads.createAndRun(rawRequest));
|
|
20771
22115
|
const completedToolCalls = [];
|
|
20772
22116
|
const toolCallStartedAt = new Map();
|
|
20773
22117
|
// Poll until run completes or requires action
|
|
@@ -20862,14 +22206,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
20862
22206
|
}
|
|
20863
22207
|
}
|
|
20864
22208
|
// Submit tool outputs
|
|
20865
|
-
run = await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
22209
|
+
run = (await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
20866
22210
|
tool_outputs: toolOutputs,
|
|
20867
|
-
});
|
|
22211
|
+
}));
|
|
20868
22212
|
}
|
|
20869
22213
|
else {
|
|
20870
22214
|
// Wait a bit before polling again
|
|
20871
22215
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
20872
|
-
run = await client.beta.threads.runs.retrieve(run.thread_id, run.id);
|
|
22216
|
+
run = (await client.beta.threads.runs.retrieve(run.thread_id, run.id));
|
|
20873
22217
|
}
|
|
20874
22218
|
}
|
|
20875
22219
|
if (run.status !== 'completed') {
|
|
@@ -21068,6 +22412,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
21068
22412
|
getAssistant(assistantId) {
|
|
21069
22413
|
return new OpenAiAssistantExecutionTools({
|
|
21070
22414
|
...this.options,
|
|
22415
|
+
isCreatingNewAssistantsAllowed: this.isCreatingNewAssistantsAllowed,
|
|
21071
22416
|
assistantId,
|
|
21072
22417
|
});
|
|
21073
22418
|
}
|
|
@@ -21093,88 +22438,13 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
21093
22438
|
let vectorStoreId;
|
|
21094
22439
|
// If knowledge sources are provided, create a vector store with them
|
|
21095
22440
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
21096
|
-
|
|
21097
|
-
|
|
21098
|
-
|
|
21099
|
-
|
|
21100
|
-
|
|
21101
|
-
}
|
|
21102
|
-
// Create a vector store
|
|
21103
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
21104
|
-
name: `${name} Knowledge Base`,
|
|
22441
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
22442
|
+
client,
|
|
22443
|
+
name,
|
|
22444
|
+
knowledgeSources,
|
|
22445
|
+
logLabel: 'assistant creation',
|
|
21105
22446
|
});
|
|
21106
|
-
vectorStoreId =
|
|
21107
|
-
if (this.options.isVerbose) {
|
|
21108
|
-
console.info('[🤰]', 'Vector store created', {
|
|
21109
|
-
vectorStoreId,
|
|
21110
|
-
});
|
|
21111
|
-
}
|
|
21112
|
-
// Upload files from knowledge sources to the vector store
|
|
21113
|
-
const fileStreams = [];
|
|
21114
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
21115
|
-
try {
|
|
21116
|
-
if (this.options.isVerbose) {
|
|
21117
|
-
console.info('[🤰]', 'Processing knowledge source', {
|
|
21118
|
-
index: index + 1,
|
|
21119
|
-
total: knowledgeSources.length,
|
|
21120
|
-
source,
|
|
21121
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
21122
|
-
});
|
|
21123
|
-
}
|
|
21124
|
-
// Check if it's a URL
|
|
21125
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
21126
|
-
// Download the file
|
|
21127
|
-
const response = await fetch(source);
|
|
21128
|
-
if (!response.ok) {
|
|
21129
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
21130
|
-
continue;
|
|
21131
|
-
}
|
|
21132
|
-
const buffer = await response.arrayBuffer();
|
|
21133
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
21134
|
-
try {
|
|
21135
|
-
const url = new URL(source);
|
|
21136
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
21137
|
-
}
|
|
21138
|
-
catch (error) {
|
|
21139
|
-
// Keep default filename
|
|
21140
|
-
}
|
|
21141
|
-
const blob = new Blob([buffer]);
|
|
21142
|
-
const file = new File([blob], filename);
|
|
21143
|
-
fileStreams.push(file);
|
|
21144
|
-
}
|
|
21145
|
-
else {
|
|
21146
|
-
/*
|
|
21147
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
21148
|
-
// Assume it's a local file path
|
|
21149
|
-
// Note: This will work in Node.js environment
|
|
21150
|
-
// For browser environments, this would need different handling
|
|
21151
|
-
const fs = await import('fs');
|
|
21152
|
-
const fileStream = fs.createReadStream(source);
|
|
21153
|
-
fileStreams.push(fileStream);
|
|
21154
|
-
*/
|
|
21155
|
-
}
|
|
21156
|
-
}
|
|
21157
|
-
catch (error) {
|
|
21158
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
21159
|
-
}
|
|
21160
|
-
}
|
|
21161
|
-
// Batch upload files to the vector store
|
|
21162
|
-
if (fileStreams.length > 0) {
|
|
21163
|
-
try {
|
|
21164
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
21165
|
-
files: fileStreams,
|
|
21166
|
-
});
|
|
21167
|
-
if (this.options.isVerbose) {
|
|
21168
|
-
console.info('[🤰]', 'Uploaded files to vector store', {
|
|
21169
|
-
vectorStoreId,
|
|
21170
|
-
fileCount: fileStreams.length,
|
|
21171
|
-
});
|
|
21172
|
-
}
|
|
21173
|
-
}
|
|
21174
|
-
catch (error) {
|
|
21175
|
-
console.error('Error uploading files to vector store:', error);
|
|
21176
|
-
}
|
|
21177
|
-
}
|
|
22447
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
21178
22448
|
}
|
|
21179
22449
|
// Create assistant with vector store attached
|
|
21180
22450
|
const assistantConfig = {
|
|
@@ -21241,91 +22511,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
21241
22511
|
const client = await this.getClient();
|
|
21242
22512
|
let vectorStoreId;
|
|
21243
22513
|
// If knowledge sources are provided, create a vector store with them
|
|
21244
|
-
// TODO: [🧠] Reuse vector store creation logic from createNewAssistant
|
|
21245
22514
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
21246
|
-
|
|
21247
|
-
|
|
21248
|
-
|
|
21249
|
-
|
|
21250
|
-
|
|
21251
|
-
});
|
|
21252
|
-
}
|
|
21253
|
-
// Create a vector store
|
|
21254
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
21255
|
-
name: `${name} Knowledge Base`,
|
|
22515
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
22516
|
+
client,
|
|
22517
|
+
name: name !== null && name !== void 0 ? name : assistantId,
|
|
22518
|
+
knowledgeSources,
|
|
22519
|
+
logLabel: 'assistant update',
|
|
21256
22520
|
});
|
|
21257
|
-
vectorStoreId =
|
|
21258
|
-
if (this.options.isVerbose) {
|
|
21259
|
-
console.info('[🤰]', 'Vector store created for assistant update', {
|
|
21260
|
-
vectorStoreId,
|
|
21261
|
-
});
|
|
21262
|
-
}
|
|
21263
|
-
// Upload files from knowledge sources to the vector store
|
|
21264
|
-
const fileStreams = [];
|
|
21265
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
21266
|
-
try {
|
|
21267
|
-
if (this.options.isVerbose) {
|
|
21268
|
-
console.info('[🤰]', 'Processing knowledge source for update', {
|
|
21269
|
-
index: index + 1,
|
|
21270
|
-
total: knowledgeSources.length,
|
|
21271
|
-
source,
|
|
21272
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
21273
|
-
});
|
|
21274
|
-
}
|
|
21275
|
-
// Check if it's a URL
|
|
21276
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
21277
|
-
// Download the file
|
|
21278
|
-
const response = await fetch(source);
|
|
21279
|
-
if (!response.ok) {
|
|
21280
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
21281
|
-
continue;
|
|
21282
|
-
}
|
|
21283
|
-
const buffer = await response.arrayBuffer();
|
|
21284
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
21285
|
-
try {
|
|
21286
|
-
const url = new URL(source);
|
|
21287
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
21288
|
-
}
|
|
21289
|
-
catch (error) {
|
|
21290
|
-
// Keep default filename
|
|
21291
|
-
}
|
|
21292
|
-
const blob = new Blob([buffer]);
|
|
21293
|
-
const file = new File([blob], filename);
|
|
21294
|
-
fileStreams.push(file);
|
|
21295
|
-
}
|
|
21296
|
-
else {
|
|
21297
|
-
/*
|
|
21298
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
21299
|
-
// Assume it's a local file path
|
|
21300
|
-
// Note: This will work in Node.js environment
|
|
21301
|
-
// For browser environments, this would need different handling
|
|
21302
|
-
const fs = await import('fs');
|
|
21303
|
-
const fileStream = fs.createReadStream(source);
|
|
21304
|
-
fileStreams.push(fileStream);
|
|
21305
|
-
*/
|
|
21306
|
-
}
|
|
21307
|
-
}
|
|
21308
|
-
catch (error) {
|
|
21309
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
21310
|
-
}
|
|
21311
|
-
}
|
|
21312
|
-
// Batch upload files to the vector store
|
|
21313
|
-
if (fileStreams.length > 0) {
|
|
21314
|
-
try {
|
|
21315
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
21316
|
-
files: fileStreams,
|
|
21317
|
-
});
|
|
21318
|
-
if (this.options.isVerbose) {
|
|
21319
|
-
console.info('[🤰]', 'Uploaded files to vector store for update', {
|
|
21320
|
-
vectorStoreId,
|
|
21321
|
-
fileCount: fileStreams.length,
|
|
21322
|
-
});
|
|
21323
|
-
}
|
|
21324
|
-
}
|
|
21325
|
-
catch (error) {
|
|
21326
|
-
console.error('Error uploading files to vector store:', error);
|
|
21327
|
-
}
|
|
21328
|
-
}
|
|
22521
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
21329
22522
|
}
|
|
21330
22523
|
const assistantUpdate = {
|
|
21331
22524
|
name,
|
|
@@ -21429,8 +22622,8 @@ function emitAssistantPreparationProgress(options) {
|
|
|
21429
22622
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
21430
22623
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
21431
22624
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
21432
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
21433
22625
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
22626
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
21434
22627
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
21435
22628
|
*
|
|
21436
22629
|
* @public exported from `@promptbook/core`
|
|
@@ -21565,97 +22758,129 @@ class AgentLlmExecutionTools {
|
|
|
21565
22758
|
* Calls the chat model with agent-specific system prompt and requirements with streaming
|
|
21566
22759
|
*/
|
|
21567
22760
|
async callChatModelStream(prompt, onProgress) {
|
|
22761
|
+
var _a, _b;
|
|
21568
22762
|
// Ensure we're working with a chat prompt
|
|
21569
22763
|
if (prompt.modelRequirements.modelVariant !== 'CHAT') {
|
|
21570
22764
|
throw new Error('AgentLlmExecutionTools only supports chat prompts');
|
|
21571
22765
|
}
|
|
21572
22766
|
const modelRequirements = await this.getModelRequirements();
|
|
22767
|
+
const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
|
|
21573
22768
|
const chatPrompt = prompt;
|
|
21574
22769
|
let underlyingLlmResult;
|
|
21575
|
-
|
|
22770
|
+
const chatPromptContentWithSuffix = promptSuffix
|
|
22771
|
+
? `${chatPrompt.content}\n\n${promptSuffix}`
|
|
22772
|
+
: chatPrompt.content;
|
|
21576
22773
|
const promptWithAgentModelRequirements = {
|
|
21577
22774
|
...chatPrompt,
|
|
22775
|
+
content: chatPromptContentWithSuffix,
|
|
21578
22776
|
modelRequirements: {
|
|
21579
22777
|
...chatPrompt.modelRequirements,
|
|
21580
|
-
...
|
|
22778
|
+
...sanitizedRequirements,
|
|
21581
22779
|
// Spread tools to convert readonly array to mutable
|
|
21582
|
-
tools:
|
|
22780
|
+
tools: sanitizedRequirements.tools
|
|
22781
|
+
? [...sanitizedRequirements.tools]
|
|
22782
|
+
: chatPrompt.modelRequirements.tools,
|
|
21583
22783
|
// Spread knowledgeSources to convert readonly array to mutable
|
|
21584
|
-
knowledgeSources:
|
|
21585
|
-
? [...
|
|
22784
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources
|
|
22785
|
+
? [...sanitizedRequirements.knowledgeSources]
|
|
21586
22786
|
: undefined,
|
|
21587
22787
|
// Prepend agent system message to existing system message
|
|
21588
|
-
systemMessage:
|
|
22788
|
+
systemMessage: sanitizedRequirements.systemMessage +
|
|
21589
22789
|
(chatPrompt.modelRequirements.systemMessage
|
|
21590
22790
|
? `\n\n${chatPrompt.modelRequirements.systemMessage}`
|
|
21591
22791
|
: ''),
|
|
21592
22792
|
}, // Cast to avoid readonly mismatch from spread
|
|
21593
22793
|
};
|
|
21594
22794
|
console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
|
|
21595
|
-
if (
|
|
21596
|
-
const requirementsHash = SHA256(JSON.stringify(
|
|
21597
|
-
const
|
|
21598
|
-
|
|
21599
|
-
|
|
22795
|
+
if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
|
|
22796
|
+
const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
22797
|
+
const vectorStoreHash = SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
|
|
22798
|
+
const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
|
|
22799
|
+
const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
|
|
22800
|
+
let preparedAgentKit = this.options.assistantPreparationMode === 'external'
|
|
22801
|
+
? this.options.llmTools.getPreparedAgentKitAgent()
|
|
22802
|
+
: null;
|
|
22803
|
+
const vectorStoreId = (preparedAgentKit === null || preparedAgentKit === void 0 ? void 0 : preparedAgentKit.vectorStoreId) ||
|
|
22804
|
+
(cachedVectorStore && cachedVectorStore.requirementsHash === vectorStoreHash
|
|
22805
|
+
? cachedVectorStore.vectorStoreId
|
|
22806
|
+
: undefined);
|
|
22807
|
+
if (!preparedAgentKit && cachedAgentKit && cachedAgentKit.requirementsHash === requirementsHash) {
|
|
21600
22808
|
if (this.options.isVerbose) {
|
|
21601
|
-
console.
|
|
22809
|
+
console.info('[🤰]', 'Using cached OpenAI AgentKit agent', {
|
|
22810
|
+
agent: this.title,
|
|
22811
|
+
});
|
|
21602
22812
|
}
|
|
21603
|
-
|
|
21604
|
-
|
|
21605
|
-
|
|
21606
|
-
|
|
21607
|
-
// We can cast to access options if they were public, or use a method to clone.
|
|
21608
|
-
// OpenAiAgentExecutionTools doesn't have a clone method.
|
|
21609
|
-
// However, we can just assume the passed tool *might* not have the vector store yet, or we are replacing it.
|
|
21610
|
-
// Actually, if the passed tool IS OpenAiAgentExecutionTools, we should use it as a base.
|
|
21611
|
-
// TODO: [🧠] This is a bit hacky, accessing protected options or recreating tools.
|
|
21612
|
-
// Ideally OpenAiAgentExecutionTools should have a method `withVectorStoreId`.
|
|
21613
|
-
agentTools = new OpenAiAgentExecutionTools({
|
|
21614
|
-
...this.options.llmTools.options,
|
|
21615
|
-
vectorStoreId: cached.vectorStoreId,
|
|
21616
|
-
});
|
|
22813
|
+
preparedAgentKit = {
|
|
22814
|
+
agent: cachedAgentKit.agent,
|
|
22815
|
+
vectorStoreId: cachedAgentKit.vectorStoreId,
|
|
22816
|
+
};
|
|
21617
22817
|
}
|
|
21618
|
-
|
|
22818
|
+
if (!preparedAgentKit) {
|
|
21619
22819
|
if (this.options.isVerbose) {
|
|
21620
|
-
console.
|
|
21621
|
-
|
|
21622
|
-
|
|
21623
|
-
if (modelRequirements.knowledgeSources && modelRequirements.knowledgeSources.length > 0) {
|
|
21624
|
-
const client = await this.options.llmTools.getClient();
|
|
21625
|
-
vectorStoreId = await OpenAiAgentExecutionTools.createVectorStore(client, this.title, modelRequirements.knowledgeSources);
|
|
22820
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
22821
|
+
agent: this.title,
|
|
22822
|
+
});
|
|
21626
22823
|
}
|
|
21627
|
-
if (vectorStoreId) {
|
|
21628
|
-
|
|
21629
|
-
|
|
21630
|
-
|
|
22824
|
+
if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
|
|
22825
|
+
emitAssistantPreparationProgress({
|
|
22826
|
+
onProgress,
|
|
22827
|
+
prompt,
|
|
22828
|
+
modelName: this.modelName,
|
|
22829
|
+
phase: 'Creating knowledge base',
|
|
21631
22830
|
});
|
|
21632
22831
|
}
|
|
21633
|
-
|
|
21634
|
-
|
|
22832
|
+
emitAssistantPreparationProgress({
|
|
22833
|
+
onProgress,
|
|
22834
|
+
prompt,
|
|
22835
|
+
modelName: this.modelName,
|
|
22836
|
+
phase: 'Preparing AgentKit agent',
|
|
22837
|
+
});
|
|
22838
|
+
preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
|
|
22839
|
+
name: this.title,
|
|
22840
|
+
instructions: sanitizedRequirements.systemMessage || '',
|
|
22841
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
22842
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
21635
22843
|
vectorStoreId,
|
|
21636
22844
|
});
|
|
21637
22845
|
}
|
|
21638
|
-
|
|
21639
|
-
|
|
21640
|
-
|
|
21641
|
-
|
|
21642
|
-
|
|
21643
|
-
|
|
21644
|
-
|
|
21645
|
-
|
|
21646
|
-
|
|
21647
|
-
|
|
21648
|
-
|
|
21649
|
-
|
|
21650
|
-
|
|
21651
|
-
|
|
22846
|
+
if (preparedAgentKit.vectorStoreId) {
|
|
22847
|
+
AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
|
|
22848
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
22849
|
+
requirementsHash: vectorStoreHash,
|
|
22850
|
+
});
|
|
22851
|
+
}
|
|
22852
|
+
AgentLlmExecutionTools.agentKitAgentCache.set(this.title, {
|
|
22853
|
+
agent: preparedAgentKit.agent,
|
|
22854
|
+
requirementsHash,
|
|
22855
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
22856
|
+
});
|
|
22857
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
|
|
22858
|
+
underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
|
|
22859
|
+
openAiAgentKitAgent: preparedAgentKit.agent,
|
|
22860
|
+
prompt: promptWithAgentModelRequirements,
|
|
22861
|
+
onProgress,
|
|
22862
|
+
responseFormatOutputType,
|
|
22863
|
+
});
|
|
21652
22864
|
}
|
|
21653
22865
|
else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
|
|
21654
22866
|
// ... deprecated path ...
|
|
21655
|
-
const requirementsHash = SHA256(JSON.stringify(
|
|
22867
|
+
const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
21656
22868
|
const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
|
|
21657
22869
|
let assistant;
|
|
21658
|
-
if (
|
|
22870
|
+
if (this.options.assistantPreparationMode === 'external') {
|
|
22871
|
+
assistant = this.options.llmTools;
|
|
22872
|
+
if (this.options.isVerbose) {
|
|
22873
|
+
console.info('[🤰]', 'Using externally managed OpenAI Assistant', {
|
|
22874
|
+
agent: this.title,
|
|
22875
|
+
assistantId: assistant.assistantId,
|
|
22876
|
+
});
|
|
22877
|
+
}
|
|
22878
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
22879
|
+
assistantId: assistant.assistantId,
|
|
22880
|
+
requirementsHash,
|
|
22881
|
+
});
|
|
22882
|
+
}
|
|
22883
|
+
else if (cached) {
|
|
21659
22884
|
if (cached.requirementsHash === requirementsHash) {
|
|
21660
22885
|
if (this.options.isVerbose) {
|
|
21661
22886
|
console.info('[🤰]', 'Using cached OpenAI Assistant', {
|
|
@@ -21681,9 +22906,9 @@ class AgentLlmExecutionTools {
|
|
|
21681
22906
|
assistant = await this.options.llmTools.updateAssistant({
|
|
21682
22907
|
assistantId: cached.assistantId,
|
|
21683
22908
|
name: this.title,
|
|
21684
|
-
instructions:
|
|
21685
|
-
knowledgeSources:
|
|
21686
|
-
tools:
|
|
22909
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
22910
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
22911
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
21687
22912
|
});
|
|
21688
22913
|
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
21689
22914
|
assistantId: assistant.assistantId,
|
|
@@ -21706,9 +22931,9 @@ class AgentLlmExecutionTools {
|
|
|
21706
22931
|
});
|
|
21707
22932
|
assistant = await this.options.llmTools.createNewAssistant({
|
|
21708
22933
|
name: this.title,
|
|
21709
|
-
instructions:
|
|
21710
|
-
knowledgeSources:
|
|
21711
|
-
tools:
|
|
22934
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
22935
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
22936
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
21712
22937
|
/*
|
|
21713
22938
|
!!!
|
|
21714
22939
|
metadata: {
|
|
@@ -21750,18 +22975,28 @@ class AgentLlmExecutionTools {
|
|
|
21750
22975
|
}
|
|
21751
22976
|
}
|
|
21752
22977
|
let content = underlyingLlmResult.content;
|
|
21753
|
-
|
|
21754
|
-
|
|
21755
|
-
|
|
21756
|
-
|
|
22978
|
+
if (typeof content === 'string') {
|
|
22979
|
+
// Note: Cleanup the AI artifacts from the content
|
|
22980
|
+
content = humanizeAiText(content);
|
|
22981
|
+
// Note: Make sure the content is Promptbook-like
|
|
22982
|
+
content = promptbookifyAiText(content);
|
|
22983
|
+
}
|
|
22984
|
+
else {
|
|
22985
|
+
// TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
|
|
22986
|
+
content = JSON.stringify(content);
|
|
22987
|
+
}
|
|
21757
22988
|
const agentResult = {
|
|
21758
22989
|
...underlyingLlmResult,
|
|
21759
|
-
content,
|
|
22990
|
+
content: content,
|
|
21760
22991
|
modelName: this.modelName,
|
|
21761
22992
|
};
|
|
21762
22993
|
return agentResult;
|
|
21763
22994
|
}
|
|
21764
22995
|
}
|
|
22996
|
+
/**
|
|
22997
|
+
* Cached AgentKit agents to avoid rebuilding identical instances.
|
|
22998
|
+
*/
|
|
22999
|
+
AgentLlmExecutionTools.agentKitAgentCache = new Map();
|
|
21765
23000
|
/**
|
|
21766
23001
|
* Cache of OpenAI assistants to avoid creating duplicates
|
|
21767
23002
|
*/
|
|
@@ -21842,8 +23077,8 @@ function buildTeacherSummary(commitments, used) {
|
|
|
21842
23077
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
21843
23078
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
21844
23079
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
21845
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
21846
23080
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
23081
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
21847
23082
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
21848
23083
|
*
|
|
21849
23084
|
* @public exported from `@promptbook/core`
|
|
@@ -21874,6 +23109,7 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
21874
23109
|
super({
|
|
21875
23110
|
isVerbose: options.isVerbose,
|
|
21876
23111
|
llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
|
|
23112
|
+
assistantPreparationMode: options.assistantPreparationMode,
|
|
21877
23113
|
agentSource: agentSource.value, // <- TODO: [🐱🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
21878
23114
|
});
|
|
21879
23115
|
_Agent_instances.add(this);
|
|
@@ -21940,7 +23176,6 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
21940
23176
|
* Note: This method also implements the learning mechanism
|
|
21941
23177
|
*/
|
|
21942
23178
|
async callChatModelStream(prompt, onProgress) {
|
|
21943
|
-
var _a;
|
|
21944
23179
|
// [1] Check if the user is asking the same thing as in the samples
|
|
21945
23180
|
const modelRequirements = await this.getModelRequirements();
|
|
21946
23181
|
if (modelRequirements.samples) {
|
|
@@ -21988,7 +23223,7 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
21988
23223
|
if (result.rawResponse && 'sample' in result.rawResponse) {
|
|
21989
23224
|
return result;
|
|
21990
23225
|
}
|
|
21991
|
-
if (
|
|
23226
|
+
if (modelRequirements.isClosed) {
|
|
21992
23227
|
return result;
|
|
21993
23228
|
}
|
|
21994
23229
|
// Note: [0] Notify start of self-learning
|
|
@@ -22149,6 +23384,63 @@ async function _Agent_selfLearnTeacher(prompt, result) {
|
|
|
22149
23384
|
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
22150
23385
|
*/
|
|
22151
23386
|
|
|
23387
|
+
/**
|
|
23388
|
+
* Resolve a remote META IMAGE value into an absolute URL when possible.
|
|
23389
|
+
*/
|
|
23390
|
+
function resolveRemoteImageUrl(imageUrl, agentUrl) {
|
|
23391
|
+
if (!imageUrl) {
|
|
23392
|
+
return undefined;
|
|
23393
|
+
}
|
|
23394
|
+
if (imageUrl.startsWith('http://') ||
|
|
23395
|
+
imageUrl.startsWith('https://') ||
|
|
23396
|
+
imageUrl.startsWith('data:') ||
|
|
23397
|
+
imageUrl.startsWith('blob:')) {
|
|
23398
|
+
return imageUrl;
|
|
23399
|
+
}
|
|
23400
|
+
try {
|
|
23401
|
+
return new URL(imageUrl, agentUrl).href;
|
|
23402
|
+
}
|
|
23403
|
+
catch (_a) {
|
|
23404
|
+
return imageUrl;
|
|
23405
|
+
}
|
|
23406
|
+
}
|
|
23407
|
+
/**
|
|
23408
|
+
* Format a META commitment line when the value is provided.
|
|
23409
|
+
*/
|
|
23410
|
+
function formatMetaLine(label, value) {
|
|
23411
|
+
if (!value) {
|
|
23412
|
+
return null;
|
|
23413
|
+
}
|
|
23414
|
+
return `META ${label} ${value}`;
|
|
23415
|
+
}
|
|
23416
|
+
/**
|
|
23417
|
+
* Build a minimal agent source snapshot for remote agents.
|
|
23418
|
+
*/
|
|
23419
|
+
function buildRemoteAgentSource(profile, meta) {
|
|
23420
|
+
const metaLines = [
|
|
23421
|
+
formatMetaLine('FULLNAME', meta === null || meta === void 0 ? void 0 : meta.fullname),
|
|
23422
|
+
formatMetaLine('IMAGE', meta === null || meta === void 0 ? void 0 : meta.image),
|
|
23423
|
+
formatMetaLine('DESCRIPTION', meta === null || meta === void 0 ? void 0 : meta.description),
|
|
23424
|
+
formatMetaLine('COLOR', meta === null || meta === void 0 ? void 0 : meta.color),
|
|
23425
|
+
formatMetaLine('FONT', meta === null || meta === void 0 ? void 0 : meta.font),
|
|
23426
|
+
formatMetaLine('LINK', meta === null || meta === void 0 ? void 0 : meta.link),
|
|
23427
|
+
]
|
|
23428
|
+
.filter((line) => Boolean(line))
|
|
23429
|
+
.join('\n');
|
|
23430
|
+
const personaBlock = profile.personaDescription
|
|
23431
|
+
? spaceTrim$2((block) => `
|
|
23432
|
+
PERSONA
|
|
23433
|
+
${block(profile.personaDescription || '')}
|
|
23434
|
+
`)
|
|
23435
|
+
: '';
|
|
23436
|
+
return book `
|
|
23437
|
+
${profile.agentName}
|
|
23438
|
+
|
|
23439
|
+
${metaLines}
|
|
23440
|
+
|
|
23441
|
+
${personaBlock}
|
|
23442
|
+
`;
|
|
23443
|
+
}
|
|
22152
23444
|
/**
|
|
22153
23445
|
* Represents one AI Agent
|
|
22154
23446
|
*
|
|
@@ -22156,13 +23448,15 @@ async function _Agent_selfLearnTeacher(prompt, result) {
|
|
|
22156
23448
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
22157
23449
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
22158
23450
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
22159
|
-
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
23451
|
+
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
23452
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
22160
23453
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
22161
23454
|
*
|
|
22162
23455
|
* @public exported from `@promptbook/core`
|
|
22163
23456
|
*/
|
|
22164
23457
|
class RemoteAgent extends Agent {
|
|
22165
23458
|
static async connect(options) {
|
|
23459
|
+
var _a, _b, _c;
|
|
22166
23460
|
const agentProfileUrl = `${options.agentUrl}/api/profile`;
|
|
22167
23461
|
const profileResponse = await fetch(agentProfileUrl);
|
|
22168
23462
|
// <- TODO: [🐱🚀] What about closed-source agents?
|
|
@@ -22182,14 +23476,14 @@ class RemoteAgent extends Agent {
|
|
|
22182
23476
|
|
|
22183
23477
|
`));
|
|
22184
23478
|
}
|
|
22185
|
-
const profile = await profileResponse.json();
|
|
23479
|
+
const profile = (await profileResponse.json());
|
|
23480
|
+
const resolvedMeta = {
|
|
23481
|
+
...(profile.meta || {}),
|
|
23482
|
+
image: resolveRemoteImageUrl((_a = profile.meta) === null || _a === void 0 ? void 0 : _a.image, options.agentUrl),
|
|
23483
|
+
};
|
|
22186
23484
|
// Note: We are creating dummy agent source because we don't have the source from the remote agent
|
|
22187
23485
|
// But we populate the metadata from the profile
|
|
22188
|
-
const agentSource = new BehaviorSubject(
|
|
22189
|
-
${profile.agentName}
|
|
22190
|
-
|
|
22191
|
-
${profile.personaDescription}
|
|
22192
|
-
`);
|
|
23486
|
+
const agentSource = new BehaviorSubject(buildRemoteAgentSource(profile, resolvedMeta));
|
|
22193
23487
|
// <- TODO: [🐱🚀] createBookFromProfile
|
|
22194
23488
|
// <- TODO: [🐱🚀] Support updating and self-updating
|
|
22195
23489
|
const remoteAgent = new RemoteAgent({
|
|
@@ -22212,10 +23506,10 @@ class RemoteAgent extends Agent {
|
|
|
22212
23506
|
});
|
|
22213
23507
|
remoteAgent._remoteAgentName = profile.agentName;
|
|
22214
23508
|
remoteAgent._remoteAgentHash = profile.agentHash;
|
|
22215
|
-
remoteAgent.personaDescription = profile.personaDescription;
|
|
22216
|
-
remoteAgent.initialMessage = profile.initialMessage;
|
|
22217
|
-
remoteAgent.links = profile.links;
|
|
22218
|
-
remoteAgent.meta =
|
|
23509
|
+
remoteAgent.personaDescription = (_b = profile.personaDescription) !== null && _b !== void 0 ? _b : null;
|
|
23510
|
+
remoteAgent.initialMessage = (_c = profile.initialMessage) !== null && _c !== void 0 ? _c : null;
|
|
23511
|
+
remoteAgent.links = profile.links || [];
|
|
23512
|
+
remoteAgent.meta = resolvedMeta;
|
|
22219
23513
|
remoteAgent.capabilities = profile.capabilities || [];
|
|
22220
23514
|
remoteAgent.samples = profile.samples || [];
|
|
22221
23515
|
remoteAgent.toolTitles = profile.toolTitles || {};
|
|
@@ -22319,26 +23613,7 @@ class RemoteAgent extends Agent {
|
|
|
22319
23613
|
};
|
|
22320
23614
|
};
|
|
22321
23615
|
const getToolCallKey = (toolCall) => {
|
|
22322
|
-
|
|
22323
|
-
const rawId = (_a = toolCall.rawToolCall) === null || _a === void 0 ? void 0 : _a.id;
|
|
22324
|
-
if (rawId) {
|
|
22325
|
-
return `id:${rawId}`;
|
|
22326
|
-
}
|
|
22327
|
-
const argsKey = (() => {
|
|
22328
|
-
if (typeof toolCall.arguments === 'string') {
|
|
22329
|
-
return toolCall.arguments;
|
|
22330
|
-
}
|
|
22331
|
-
if (!toolCall.arguments) {
|
|
22332
|
-
return '';
|
|
22333
|
-
}
|
|
22334
|
-
try {
|
|
22335
|
-
return JSON.stringify(toolCall.arguments);
|
|
22336
|
-
}
|
|
22337
|
-
catch (_a) {
|
|
22338
|
-
return '';
|
|
22339
|
-
}
|
|
22340
|
-
})();
|
|
22341
|
-
return `${toolCall.name}:${toolCall.createdAt || ''}:${argsKey}`;
|
|
23616
|
+
return getToolCallIdentity(toolCall);
|
|
22342
23617
|
};
|
|
22343
23618
|
const mergeToolCall = (existing, incoming) => {
|
|
22344
23619
|
const incomingResult = incoming.result;
|