@promptbook/node 0.110.0-1 → 0.110.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1785 -510
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -2
- package/esm/typings/src/_packages/openai.index.d.ts +8 -4
- package/esm/typings/src/_packages/types.index.d.ts +12 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
- package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
- package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
- package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +4 -2
- package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +10 -1
- package/esm/typings/src/book-components/Chat/SourceChip/SourceChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/utils/collectTeamToolCallSummary.d.ts +69 -0
- package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +13 -13
- package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/utils/toolCallParsing.d.ts +4 -0
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +0 -3
- package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +1 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
- package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +10 -0
- package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +13 -2
- package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +150 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionToolsOptions.d.ts +15 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +3 -4
- package/esm/typings/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +135 -0
- package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +1 -1
- package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
- package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
- package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
- package/esm/typings/src/utils/agents/resolveAgentAvatarImageUrl.d.ts +29 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
- package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
- package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +7 -3
- package/umd/index.umd.js +1788 -514
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentExecutionTools.d.ts +0 -43
- package/esm/typings/src/llm-providers/openai/createOpenAiAgentExecutionTools.d.ts +0 -11
package/esm/index.es.js
CHANGED
|
@@ -17,6 +17,7 @@ import { Subject, BehaviorSubject } from 'rxjs';
|
|
|
17
17
|
import moment from 'moment';
|
|
18
18
|
import { lookup, extension } from 'mime-types';
|
|
19
19
|
import { parse, unparse } from 'papaparse';
|
|
20
|
+
import { Agent as Agent$1, setDefaultOpenAIClient, setDefaultOpenAIKey, fileSearchTool, tool, run } from '@openai/agents';
|
|
20
21
|
import Bottleneck from 'bottleneck';
|
|
21
22
|
import OpenAI from 'openai';
|
|
22
23
|
|
|
@@ -34,7 +35,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
34
35
|
* @generated
|
|
35
36
|
* @see https://github.com/webgptorg/promptbook
|
|
36
37
|
*/
|
|
37
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-
|
|
38
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-10';
|
|
38
39
|
/**
|
|
39
40
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
40
41
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -13709,6 +13710,28 @@ class BaseCommitmentDefinition {
|
|
|
13709
13710
|
return currentMessage + separator + content;
|
|
13710
13711
|
});
|
|
13711
13712
|
}
|
|
13713
|
+
/**
|
|
13714
|
+
* Helper method to create a new requirements object with updated prompt suffix
|
|
13715
|
+
*/
|
|
13716
|
+
updatePromptSuffix(requirements, contentUpdate) {
|
|
13717
|
+
const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
|
|
13718
|
+
return {
|
|
13719
|
+
...requirements,
|
|
13720
|
+
promptSuffix: newSuffix,
|
|
13721
|
+
};
|
|
13722
|
+
}
|
|
13723
|
+
/**
|
|
13724
|
+
* Helper method to append content to the prompt suffix
|
|
13725
|
+
* Default separator is a single newline for bullet lists.
|
|
13726
|
+
*/
|
|
13727
|
+
appendToPromptSuffix(requirements, content, separator = '\n') {
|
|
13728
|
+
return this.updatePromptSuffix(requirements, (currentSuffix) => {
|
|
13729
|
+
if (!currentSuffix.trim()) {
|
|
13730
|
+
return content;
|
|
13731
|
+
}
|
|
13732
|
+
return `${currentSuffix}${separator}${content}`;
|
|
13733
|
+
});
|
|
13734
|
+
}
|
|
13712
13735
|
/**
|
|
13713
13736
|
* Helper method to add a comment section to the system message
|
|
13714
13737
|
* Comments are lines starting with # that will be removed from the final system message
|
|
@@ -13886,13 +13909,9 @@ class ClosedCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13886
13909
|
`);
|
|
13887
13910
|
}
|
|
13888
13911
|
applyToAgentModelRequirements(requirements, _content) {
|
|
13889
|
-
const updatedMetadata = {
|
|
13890
|
-
...requirements.metadata,
|
|
13891
|
-
isClosed: true,
|
|
13892
|
-
};
|
|
13893
13912
|
return {
|
|
13894
13913
|
...requirements,
|
|
13895
|
-
|
|
13914
|
+
isClosed: true,
|
|
13896
13915
|
};
|
|
13897
13916
|
}
|
|
13898
13917
|
}
|
|
@@ -14170,12 +14189,12 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14170
14189
|
return requirements;
|
|
14171
14190
|
}
|
|
14172
14191
|
// Get existing dictionary entries from metadata
|
|
14173
|
-
const existingDictionary = ((_a = requirements.
|
|
14192
|
+
const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
|
|
14174
14193
|
// Merge the new dictionary entry with existing entries
|
|
14175
14194
|
const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
|
|
14176
14195
|
// Store the merged dictionary in metadata for debugging and inspection
|
|
14177
14196
|
const updatedMetadata = {
|
|
14178
|
-
...requirements.
|
|
14197
|
+
...requirements._metadata,
|
|
14179
14198
|
DICTIONARY: mergedDictionary,
|
|
14180
14199
|
};
|
|
14181
14200
|
// Create the dictionary section for the system message
|
|
@@ -14183,7 +14202,7 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14183
14202
|
const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
|
|
14184
14203
|
return {
|
|
14185
14204
|
...this.appendToSystemMessage(requirements, dictionarySection),
|
|
14186
|
-
|
|
14205
|
+
_metadata: updatedMetadata,
|
|
14187
14206
|
};
|
|
14188
14207
|
}
|
|
14189
14208
|
}
|
|
@@ -14323,10 +14342,7 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14323
14342
|
applyToAgentModelRequirements(requirements, content) {
|
|
14324
14343
|
const trimmedContent = content.trim();
|
|
14325
14344
|
if (!trimmedContent) {
|
|
14326
|
-
return
|
|
14327
|
-
...requirements,
|
|
14328
|
-
parentAgentUrl: undefined,
|
|
14329
|
-
};
|
|
14345
|
+
return requirements;
|
|
14330
14346
|
}
|
|
14331
14347
|
if (trimmedContent.toUpperCase() === 'VOID' ||
|
|
14332
14348
|
trimmedContent.toUpperCase() === 'NULL' ||
|
|
@@ -14540,6 +14556,136 @@ class ImportCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14540
14556
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
14541
14557
|
*/
|
|
14542
14558
|
|
|
14559
|
+
/**
|
|
14560
|
+
* @@@
|
|
14561
|
+
*
|
|
14562
|
+
* @private thing of inline knowledge
|
|
14563
|
+
*/
|
|
14564
|
+
const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
|
|
14565
|
+
/**
|
|
14566
|
+
* @@@
|
|
14567
|
+
*
|
|
14568
|
+
* @private thing of inline knowledge
|
|
14569
|
+
*/
|
|
14570
|
+
const INLINE_KNOWLEDGE_EXTENSION = '.txt';
|
|
14571
|
+
/**
|
|
14572
|
+
* @@@
|
|
14573
|
+
*
|
|
14574
|
+
* @private thing of inline knowledge
|
|
14575
|
+
*/
|
|
14576
|
+
const DATA_URL_PREFIX = 'data:';
|
|
14577
|
+
/**
|
|
14578
|
+
* @@@
|
|
14579
|
+
*
|
|
14580
|
+
* @private thing of inline knowledge
|
|
14581
|
+
*/
|
|
14582
|
+
function getFirstNonEmptyLine(content) {
|
|
14583
|
+
const lines = content.split(/\r?\n/);
|
|
14584
|
+
for (const line of lines) {
|
|
14585
|
+
const trimmed = line.trim();
|
|
14586
|
+
if (trimmed) {
|
|
14587
|
+
return trimmed;
|
|
14588
|
+
}
|
|
14589
|
+
}
|
|
14590
|
+
return null;
|
|
14591
|
+
}
|
|
14592
|
+
/**
|
|
14593
|
+
* @@@
|
|
14594
|
+
*
|
|
14595
|
+
* @private thing of inline knowledge
|
|
14596
|
+
*/
|
|
14597
|
+
function deriveBaseFilename(content) {
|
|
14598
|
+
const firstLine = getFirstNonEmptyLine(content);
|
|
14599
|
+
if (!firstLine) {
|
|
14600
|
+
return INLINE_KNOWLEDGE_BASE_NAME;
|
|
14601
|
+
}
|
|
14602
|
+
const normalized = normalizeToKebabCase(firstLine);
|
|
14603
|
+
return normalized || INLINE_KNOWLEDGE_BASE_NAME;
|
|
14604
|
+
}
|
|
14605
|
+
/**
|
|
14606
|
+
* Creates a data URL that represents the inline knowledge content as a text file.
|
|
14607
|
+
*
|
|
14608
|
+
* @private thing of inline knowledge
|
|
14609
|
+
*/
|
|
14610
|
+
function createInlineKnowledgeSourceFile(content) {
|
|
14611
|
+
const trimmedContent = content.trim();
|
|
14612
|
+
const baseName = deriveBaseFilename(trimmedContent);
|
|
14613
|
+
const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
14614
|
+
const mimeType = 'text/plain';
|
|
14615
|
+
const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
|
|
14616
|
+
const encodedFilename = encodeURIComponent(filename);
|
|
14617
|
+
const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
|
|
14618
|
+
return {
|
|
14619
|
+
filename,
|
|
14620
|
+
mimeType,
|
|
14621
|
+
url,
|
|
14622
|
+
};
|
|
14623
|
+
}
|
|
14624
|
+
/**
|
|
14625
|
+
* Checks whether the provided source string is a data URL that can be decoded.
|
|
14626
|
+
*
|
|
14627
|
+
* @private thing of inline knowledge
|
|
14628
|
+
*/
|
|
14629
|
+
function isDataUrlKnowledgeSource(source) {
|
|
14630
|
+
return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
|
|
14631
|
+
}
|
|
14632
|
+
/**
|
|
14633
|
+
* Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
|
|
14634
|
+
*
|
|
14635
|
+
* @private thing of inline knowledge
|
|
14636
|
+
*/
|
|
14637
|
+
function parseDataUrlKnowledgeSource(source) {
|
|
14638
|
+
if (!isDataUrlKnowledgeSource(source)) {
|
|
14639
|
+
return null;
|
|
14640
|
+
}
|
|
14641
|
+
const commaIndex = source.indexOf(',');
|
|
14642
|
+
if (commaIndex === -1) {
|
|
14643
|
+
return null;
|
|
14644
|
+
}
|
|
14645
|
+
const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
|
|
14646
|
+
const payload = source.slice(commaIndex + 1);
|
|
14647
|
+
const tokens = header.split(';');
|
|
14648
|
+
const mediaType = tokens[0] || 'text/plain';
|
|
14649
|
+
let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
14650
|
+
let isBase64 = false;
|
|
14651
|
+
for (let i = 1; i < tokens.length; i++) {
|
|
14652
|
+
const token = tokens[i];
|
|
14653
|
+
if (!token) {
|
|
14654
|
+
continue;
|
|
14655
|
+
}
|
|
14656
|
+
if (token.toLowerCase() === 'base64') {
|
|
14657
|
+
isBase64 = true;
|
|
14658
|
+
continue;
|
|
14659
|
+
}
|
|
14660
|
+
const [key, value] = token.split('=');
|
|
14661
|
+
if (key === 'name' && value !== undefined) {
|
|
14662
|
+
try {
|
|
14663
|
+
filename = decodeURIComponent(value);
|
|
14664
|
+
}
|
|
14665
|
+
catch (_a) {
|
|
14666
|
+
filename = value;
|
|
14667
|
+
}
|
|
14668
|
+
}
|
|
14669
|
+
}
|
|
14670
|
+
if (!isBase64) {
|
|
14671
|
+
return null;
|
|
14672
|
+
}
|
|
14673
|
+
try {
|
|
14674
|
+
const buffer = Buffer.from(payload, 'base64');
|
|
14675
|
+
return {
|
|
14676
|
+
buffer,
|
|
14677
|
+
filename,
|
|
14678
|
+
mimeType: mediaType,
|
|
14679
|
+
};
|
|
14680
|
+
}
|
|
14681
|
+
catch (_b) {
|
|
14682
|
+
return null;
|
|
14683
|
+
}
|
|
14684
|
+
}
|
|
14685
|
+
/**
|
|
14686
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
14687
|
+
*/
|
|
14688
|
+
|
|
14543
14689
|
/**
|
|
14544
14690
|
* KNOWLEDGE commitment definition
|
|
14545
14691
|
*
|
|
@@ -14638,9 +14784,13 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14638
14784
|
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
14639
14785
|
}
|
|
14640
14786
|
else {
|
|
14641
|
-
|
|
14642
|
-
const
|
|
14643
|
-
|
|
14787
|
+
const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
|
|
14788
|
+
const updatedRequirements = {
|
|
14789
|
+
...requirements,
|
|
14790
|
+
knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
|
|
14791
|
+
};
|
|
14792
|
+
const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
|
|
14793
|
+
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
14644
14794
|
}
|
|
14645
14795
|
}
|
|
14646
14796
|
}
|
|
@@ -14887,16 +15037,16 @@ class AgentMessageCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14887
15037
|
// and typically doesn't need to be added to the system prompt or model requirements directly.
|
|
14888
15038
|
// It is extracted separately for the chat interface.
|
|
14889
15039
|
var _a;
|
|
14890
|
-
const pendingUserMessage = (_a = requirements.
|
|
15040
|
+
const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
|
|
14891
15041
|
if (pendingUserMessage) {
|
|
14892
15042
|
const newSample = { question: pendingUserMessage, answer: content };
|
|
14893
15043
|
const newSamples = [...(requirements.samples || []), newSample];
|
|
14894
|
-
const newMetadata = { ...requirements.
|
|
15044
|
+
const newMetadata = { ...requirements._metadata };
|
|
14895
15045
|
delete newMetadata.pendingUserMessage;
|
|
14896
15046
|
return {
|
|
14897
15047
|
...requirements,
|
|
14898
15048
|
samples: newSamples,
|
|
14899
|
-
|
|
15049
|
+
_metadata: newMetadata,
|
|
14900
15050
|
};
|
|
14901
15051
|
}
|
|
14902
15052
|
return requirements;
|
|
@@ -15144,8 +15294,8 @@ class UserMessageCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
15144
15294
|
applyToAgentModelRequirements(requirements, content) {
|
|
15145
15295
|
return {
|
|
15146
15296
|
...requirements,
|
|
15147
|
-
|
|
15148
|
-
...requirements.
|
|
15297
|
+
_metadata: {
|
|
15298
|
+
...requirements._metadata,
|
|
15149
15299
|
pendingUserMessage: content,
|
|
15150
15300
|
},
|
|
15151
15301
|
};
|
|
@@ -16003,11 +16153,7 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16003
16153
|
if (trimmedContent === '') {
|
|
16004
16154
|
return requirements;
|
|
16005
16155
|
}
|
|
16006
|
-
|
|
16007
|
-
return {
|
|
16008
|
-
...requirements,
|
|
16009
|
-
notes: [...(requirements.notes || []), trimmedContent],
|
|
16010
|
-
};
|
|
16156
|
+
return requirements;
|
|
16011
16157
|
}
|
|
16012
16158
|
}
|
|
16013
16159
|
/**
|
|
@@ -16069,12 +16215,12 @@ class OpenCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16069
16215
|
// Since OPEN is default, we can just ensure isClosed is false
|
|
16070
16216
|
// But to be explicit we can set it
|
|
16071
16217
|
const updatedMetadata = {
|
|
16072
|
-
...requirements.
|
|
16218
|
+
...requirements._metadata,
|
|
16073
16219
|
isClosed: false,
|
|
16074
16220
|
};
|
|
16075
16221
|
return {
|
|
16076
16222
|
...requirements,
|
|
16077
|
-
|
|
16223
|
+
_metadata: updatedMetadata,
|
|
16078
16224
|
};
|
|
16079
16225
|
}
|
|
16080
16226
|
}
|
|
@@ -16155,7 +16301,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16155
16301
|
return requirements;
|
|
16156
16302
|
}
|
|
16157
16303
|
// Get existing persona content from metadata
|
|
16158
|
-
const existingPersonaContent = ((_a = requirements.
|
|
16304
|
+
const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
|
|
16159
16305
|
// Merge the new content with existing persona content
|
|
16160
16306
|
// When multiple PERSONA commitments exist, they are merged into one
|
|
16161
16307
|
const mergedPersonaContent = existingPersonaContent
|
|
@@ -16163,12 +16309,12 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16163
16309
|
: trimmedContent;
|
|
16164
16310
|
// Store the merged persona content in metadata for debugging and inspection
|
|
16165
16311
|
const updatedMetadata = {
|
|
16166
|
-
...requirements.
|
|
16312
|
+
...requirements._metadata,
|
|
16167
16313
|
PERSONA: mergedPersonaContent,
|
|
16168
16314
|
};
|
|
16169
16315
|
// Get the agent name from metadata (which should contain the first line of agent source)
|
|
16170
16316
|
// If not available, extract from current system message as fallback
|
|
16171
|
-
let agentName = (_b = requirements.
|
|
16317
|
+
let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
|
|
16172
16318
|
if (!agentName) {
|
|
16173
16319
|
// Fallback: extract from current system message
|
|
16174
16320
|
const currentMessage = requirements.systemMessage.trim();
|
|
@@ -16215,7 +16361,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16215
16361
|
return {
|
|
16216
16362
|
...requirements,
|
|
16217
16363
|
systemMessage: newSystemMessage,
|
|
16218
|
-
|
|
16364
|
+
_metadata: updatedMetadata,
|
|
16219
16365
|
};
|
|
16220
16366
|
}
|
|
16221
16367
|
}
|
|
@@ -16298,7 +16444,16 @@ class RuleCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16298
16444
|
}
|
|
16299
16445
|
// Add rule to the system message
|
|
16300
16446
|
const ruleSection = `Rule: ${trimmedContent}`;
|
|
16301
|
-
|
|
16447
|
+
const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
|
|
16448
|
+
const ruleLines = trimmedContent
|
|
16449
|
+
.split(/\r?\n/)
|
|
16450
|
+
.map((line) => line.trim())
|
|
16451
|
+
.filter(Boolean)
|
|
16452
|
+
.map((line) => `- ${line}`);
|
|
16453
|
+
if (ruleLines.length === 0) {
|
|
16454
|
+
return requirementsWithRule;
|
|
16455
|
+
}
|
|
16456
|
+
return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
|
|
16302
16457
|
}
|
|
16303
16458
|
}
|
|
16304
16459
|
/**
|
|
@@ -16804,7 +16959,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16804
16959
|
if (teammates.length === 0) {
|
|
16805
16960
|
return requirements;
|
|
16806
16961
|
}
|
|
16807
|
-
const agentName = ((_a = requirements.
|
|
16962
|
+
const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
|
|
16808
16963
|
const teamEntries = teammates.map((teammate) => ({
|
|
16809
16964
|
toolName: createTeamToolName(teammate.url),
|
|
16810
16965
|
teammate,
|
|
@@ -16844,7 +16999,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16844
16999
|
},
|
|
16845
17000
|
});
|
|
16846
17001
|
}
|
|
16847
|
-
const existingTeammates = ((_b = requirements.
|
|
17002
|
+
const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
|
|
16848
17003
|
const updatedTeammates = [...existingTeammates];
|
|
16849
17004
|
for (const entry of teamEntries) {
|
|
16850
17005
|
if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
|
|
@@ -16873,8 +17028,8 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
16873
17028
|
return this.appendToSystemMessage({
|
|
16874
17029
|
...requirements,
|
|
16875
17030
|
tools: updatedTools,
|
|
16876
|
-
|
|
16877
|
-
...requirements.
|
|
17031
|
+
_metadata: {
|
|
17032
|
+
...requirements._metadata,
|
|
16878
17033
|
teammates: updatedTeammates,
|
|
16879
17034
|
},
|
|
16880
17035
|
}, teamSystemMessage);
|
|
@@ -16974,11 +17129,16 @@ function createTeamToolFunction(entry) {
|
|
|
16974
17129
|
const request = buildTeammateRequest(message, args.context);
|
|
16975
17130
|
let response = '';
|
|
16976
17131
|
let error = null;
|
|
17132
|
+
let toolCalls;
|
|
16977
17133
|
try {
|
|
16978
17134
|
const remoteAgent = await getRemoteTeammateAgent(entry.teammate.url);
|
|
16979
17135
|
const prompt = buildTeammatePrompt(request);
|
|
16980
17136
|
const teammateResult = await remoteAgent.callChatModel(prompt);
|
|
16981
17137
|
response = teammateResult.content || '';
|
|
17138
|
+
toolCalls =
|
|
17139
|
+
'toolCalls' in teammateResult && Array.isArray(teammateResult.toolCalls)
|
|
17140
|
+
? teammateResult.toolCalls
|
|
17141
|
+
: undefined;
|
|
16982
17142
|
}
|
|
16983
17143
|
catch (err) {
|
|
16984
17144
|
error = err instanceof Error ? err.message : String(err);
|
|
@@ -16988,6 +17148,7 @@ function createTeamToolFunction(entry) {
|
|
|
16988
17148
|
teammate: teammateMetadata,
|
|
16989
17149
|
request,
|
|
16990
17150
|
response: teammateReply,
|
|
17151
|
+
toolCalls: toolCalls && toolCalls.length > 0 ? toolCalls : undefined,
|
|
16991
17152
|
error,
|
|
16992
17153
|
conversation: [
|
|
16993
17154
|
{
|
|
@@ -17100,7 +17261,7 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
17100
17261
|
if (!trimmedContent) {
|
|
17101
17262
|
// Store template mode flag in metadata
|
|
17102
17263
|
const updatedMetadata = {
|
|
17103
|
-
...requirements.
|
|
17264
|
+
...requirements._metadata,
|
|
17104
17265
|
templateMode: true,
|
|
17105
17266
|
};
|
|
17106
17267
|
// Add a general instruction about using structured templates
|
|
@@ -17110,21 +17271,21 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
17110
17271
|
`);
|
|
17111
17272
|
return {
|
|
17112
17273
|
...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
|
|
17113
|
-
|
|
17274
|
+
_metadata: updatedMetadata,
|
|
17114
17275
|
};
|
|
17115
17276
|
}
|
|
17116
17277
|
// If content is provided, add the specific template instructions
|
|
17117
17278
|
const templateSection = `Response Template: ${trimmedContent}`;
|
|
17118
17279
|
// Store the template in metadata for potential programmatic access
|
|
17119
|
-
const existingTemplates = ((_a = requirements.
|
|
17280
|
+
const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
|
|
17120
17281
|
const updatedMetadata = {
|
|
17121
|
-
...requirements.
|
|
17282
|
+
...requirements._metadata,
|
|
17122
17283
|
templates: [...existingTemplates, trimmedContent],
|
|
17123
17284
|
templateMode: true,
|
|
17124
17285
|
};
|
|
17125
17286
|
return {
|
|
17126
17287
|
...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
|
|
17127
|
-
|
|
17288
|
+
_metadata: updatedMetadata,
|
|
17128
17289
|
};
|
|
17129
17290
|
}
|
|
17130
17291
|
}
|
|
@@ -17461,8 +17622,8 @@ class UseBrowserCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
17461
17622
|
return this.appendToSystemMessage({
|
|
17462
17623
|
...requirements,
|
|
17463
17624
|
tools: updatedTools,
|
|
17464
|
-
|
|
17465
|
-
...requirements.
|
|
17625
|
+
_metadata: {
|
|
17626
|
+
...requirements._metadata,
|
|
17466
17627
|
useBrowser: true,
|
|
17467
17628
|
},
|
|
17468
17629
|
}, spaceTrim$1(`
|
|
@@ -17691,8 +17852,8 @@ class UseEmailCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
17691
17852
|
return this.appendToSystemMessage({
|
|
17692
17853
|
...requirements,
|
|
17693
17854
|
tools: updatedTools,
|
|
17694
|
-
|
|
17695
|
-
...requirements.
|
|
17855
|
+
_metadata: {
|
|
17856
|
+
...requirements._metadata,
|
|
17696
17857
|
useEmail: content || true,
|
|
17697
17858
|
},
|
|
17698
17859
|
}, spaceTrim$1((block) => `
|
|
@@ -17827,8 +17988,8 @@ class UseImageGeneratorCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
17827
17988
|
return this.appendToSystemMessage({
|
|
17828
17989
|
...requirements,
|
|
17829
17990
|
tools: updatedTools,
|
|
17830
|
-
|
|
17831
|
-
...requirements.
|
|
17991
|
+
_metadata: {
|
|
17992
|
+
...requirements._metadata,
|
|
17832
17993
|
useImageGenerator: content || true,
|
|
17833
17994
|
},
|
|
17834
17995
|
}, spaceTrim$1(`
|
|
@@ -18119,8 +18280,8 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
18119
18280
|
return this.appendToSystemMessage({
|
|
18120
18281
|
...requirements,
|
|
18121
18282
|
tools: updatedTools,
|
|
18122
|
-
|
|
18123
|
-
...requirements.
|
|
18283
|
+
_metadata: {
|
|
18284
|
+
...requirements._metadata,
|
|
18124
18285
|
useSearchEngine: content || true,
|
|
18125
18286
|
},
|
|
18126
18287
|
}, spaceTrim$1((block) => `
|
|
@@ -18268,8 +18429,8 @@ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
18268
18429
|
return this.appendToSystemMessage({
|
|
18269
18430
|
...requirements,
|
|
18270
18431
|
tools: updatedTools,
|
|
18271
|
-
|
|
18272
|
-
...requirements.
|
|
18432
|
+
_metadata: {
|
|
18433
|
+
...requirements._metadata,
|
|
18273
18434
|
},
|
|
18274
18435
|
}, spaceTrim$1((block) => `
|
|
18275
18436
|
Time and date context:
|
|
@@ -20303,6 +20464,40 @@ function isAssistantPreparationToolCall(toolCall) {
|
|
|
20303
20464
|
return toolCall.name === ASSISTANT_PREPARATION_TOOL_CALL_NAME;
|
|
20304
20465
|
}
|
|
20305
20466
|
|
|
20467
|
+
/**
|
|
20468
|
+
* Builds a stable identity string for tool calls across partial updates.
|
|
20469
|
+
*
|
|
20470
|
+
* @param toolCall - Tool call entry to identify.
|
|
20471
|
+
* @returns Stable identity string for deduplication.
|
|
20472
|
+
*
|
|
20473
|
+
* @private function of <Chat/>
|
|
20474
|
+
*/
|
|
20475
|
+
function getToolCallIdentity(toolCall) {
|
|
20476
|
+
const rawToolCall = toolCall.rawToolCall;
|
|
20477
|
+
const rawId = (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.id) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.callId) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.call_id);
|
|
20478
|
+
if (rawId) {
|
|
20479
|
+
return `id:${rawId}`;
|
|
20480
|
+
}
|
|
20481
|
+
if (toolCall.createdAt) {
|
|
20482
|
+
return `time:${toolCall.createdAt}:${toolCall.name}`;
|
|
20483
|
+
}
|
|
20484
|
+
const argsKey = (() => {
|
|
20485
|
+
if (typeof toolCall.arguments === 'string') {
|
|
20486
|
+
return toolCall.arguments;
|
|
20487
|
+
}
|
|
20488
|
+
if (!toolCall.arguments) {
|
|
20489
|
+
return '';
|
|
20490
|
+
}
|
|
20491
|
+
try {
|
|
20492
|
+
return JSON.stringify(toolCall.arguments);
|
|
20493
|
+
}
|
|
20494
|
+
catch (_a) {
|
|
20495
|
+
return '';
|
|
20496
|
+
}
|
|
20497
|
+
})();
|
|
20498
|
+
return `fallback:${toolCall.name}:${argsKey}`;
|
|
20499
|
+
}
|
|
20500
|
+
|
|
20306
20501
|
/*! *****************************************************************************
|
|
20307
20502
|
Copyright (c) Microsoft Corporation.
|
|
20308
20503
|
|
|
@@ -20941,11 +21136,14 @@ function asUpdatableSubject(value) {
|
|
|
20941
21136
|
function createEmptyAgentModelRequirements() {
|
|
20942
21137
|
return {
|
|
20943
21138
|
systemMessage: '',
|
|
21139
|
+
promptSuffix: '',
|
|
20944
21140
|
// modelName: 'gpt-5',
|
|
20945
21141
|
modelName: 'gemini-2.5-flash-lite',
|
|
20946
21142
|
temperature: 0.7,
|
|
20947
21143
|
topP: 0.9,
|
|
20948
21144
|
topK: 50,
|
|
21145
|
+
parentAgentUrl: null,
|
|
21146
|
+
isClosed: false,
|
|
20949
21147
|
};
|
|
20950
21148
|
}
|
|
20951
21149
|
/**
|
|
@@ -21091,14 +21289,26 @@ function removeCommentsFromSystemMessage(systemMessage) {
|
|
|
21091
21289
|
}
|
|
21092
21290
|
|
|
21093
21291
|
/**
|
|
21094
|
-
* Creates agent model requirements using the new commitment system
|
|
21292
|
+
* Creates agent model requirements using the new commitment system.
|
|
21293
|
+
*
|
|
21095
21294
|
* This function uses a reduce-like pattern where each commitment applies its changes
|
|
21096
|
-
* to build the final requirements starting from a basic empty model
|
|
21295
|
+
* to build the final requirements starting from a basic empty model.
|
|
21097
21296
|
*
|
|
21098
|
-
* @
|
|
21297
|
+
* @param agentSource - Agent source book to parse.
|
|
21298
|
+
* @param modelName - Optional override for the agent model name.
|
|
21299
|
+
* @param options - Additional options such as the agent reference resolver.
|
|
21300
|
+
*
|
|
21301
|
+
* @private @@@
|
|
21302
|
+
*/
|
|
21303
|
+
const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
|
|
21304
|
+
/**
|
|
21305
|
+
* @@@
|
|
21306
|
+
*
|
|
21307
|
+
* @private @@@
|
|
21099
21308
|
*/
|
|
21100
|
-
async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
|
|
21309
|
+
async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
|
|
21101
21310
|
var _a;
|
|
21311
|
+
const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
|
|
21102
21312
|
// Parse the agent source to extract commitments
|
|
21103
21313
|
const parseResult = parseAgentSourceWithCommitments(agentSource);
|
|
21104
21314
|
// Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
|
|
@@ -21135,8 +21345,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
21135
21345
|
// Store the agent name in metadata so commitments can access it
|
|
21136
21346
|
requirements = {
|
|
21137
21347
|
...requirements,
|
|
21138
|
-
|
|
21139
|
-
...requirements.
|
|
21348
|
+
_metadata: {
|
|
21349
|
+
...requirements._metadata,
|
|
21140
21350
|
agentName: parseResult.agentName,
|
|
21141
21351
|
},
|
|
21142
21352
|
};
|
|
@@ -21150,6 +21360,11 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
21150
21360
|
// Apply each commitment in order using reduce-like pattern
|
|
21151
21361
|
for (let i = 0; i < filteredCommitments.length; i++) {
|
|
21152
21362
|
const commitment = filteredCommitments[i];
|
|
21363
|
+
const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
|
|
21364
|
+
let commitmentContent = commitment.content;
|
|
21365
|
+
if (isReferenceCommitment && agentReferenceResolver) {
|
|
21366
|
+
commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
|
|
21367
|
+
}
|
|
21153
21368
|
// CLOSED commitment should work only if its the last commitment in the book
|
|
21154
21369
|
if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
|
|
21155
21370
|
continue;
|
|
@@ -21157,7 +21372,7 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
21157
21372
|
const definition = getCommitmentDefinition(commitment.type);
|
|
21158
21373
|
if (definition) {
|
|
21159
21374
|
try {
|
|
21160
|
-
requirements = definition.applyToAgentModelRequirements(requirements,
|
|
21375
|
+
requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
|
|
21161
21376
|
}
|
|
21162
21377
|
catch (error) {
|
|
21163
21378
|
console.warn(`Failed to apply commitment ${commitment.type}:`, error);
|
|
@@ -21305,23 +21520,28 @@ function isBinaryMimeType(mimeType) {
|
|
|
21305
21520
|
}
|
|
21306
21521
|
|
|
21307
21522
|
/**
|
|
21308
|
-
* Creates model requirements for an agent based on its source
|
|
21523
|
+
* Creates model requirements for an agent based on its source.
|
|
21309
21524
|
*
|
|
21310
21525
|
* There are 2 similar functions:
|
|
21311
21526
|
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
21312
21527
|
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
|
|
21313
21528
|
*
|
|
21529
|
+
* @param agentSource - Book describing the agent.
|
|
21530
|
+
* @param modelName - Optional override for the agent's model.
|
|
21531
|
+
* @param availableModels - Models that could fulfill the agent.
|
|
21532
|
+
* @param llmTools - Execution tools used when selecting a best model.
|
|
21533
|
+
* @param options - Optional hooks such as the agent reference resolver.
|
|
21314
21534
|
* @public exported from `@promptbook/core`
|
|
21315
21535
|
*/
|
|
21316
|
-
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
|
|
21536
|
+
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
|
|
21317
21537
|
// If availableModels are provided and no specific modelName is given,
|
|
21318
21538
|
// use preparePersona to select the best model
|
|
21319
21539
|
if (availableModels && !modelName && llmTools) {
|
|
21320
21540
|
const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
|
|
21321
|
-
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
|
|
21541
|
+
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
|
|
21322
21542
|
}
|
|
21323
21543
|
// Use the new commitment-based system with provided or default model
|
|
21324
|
-
return createAgentModelRequirementsWithCommitments(agentSource, modelName);
|
|
21544
|
+
return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
|
|
21325
21545
|
}
|
|
21326
21546
|
/**
|
|
21327
21547
|
* Selects the best model using the preparePersona function
|
|
@@ -21619,6 +21839,66 @@ const OPENAI_MODELS = exportJson({
|
|
|
21619
21839
|
},
|
|
21620
21840
|
/**/
|
|
21621
21841
|
/**/
|
|
21842
|
+
{
|
|
21843
|
+
modelVariant: 'CHAT',
|
|
21844
|
+
modelTitle: 'gpt-5.2-codex',
|
|
21845
|
+
modelName: 'gpt-5.2-codex',
|
|
21846
|
+
modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
|
|
21847
|
+
pricing: {
|
|
21848
|
+
prompt: pricing(`$1.75 / 1M tokens`),
|
|
21849
|
+
output: pricing(`$14.00 / 1M tokens`),
|
|
21850
|
+
},
|
|
21851
|
+
},
|
|
21852
|
+
/**/
|
|
21853
|
+
/**/
|
|
21854
|
+
{
|
|
21855
|
+
modelVariant: 'CHAT',
|
|
21856
|
+
modelTitle: 'gpt-5.1-codex-max',
|
|
21857
|
+
modelName: 'gpt-5.1-codex-max',
|
|
21858
|
+
modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
|
|
21859
|
+
pricing: {
|
|
21860
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21861
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21862
|
+
},
|
|
21863
|
+
},
|
|
21864
|
+
/**/
|
|
21865
|
+
/**/
|
|
21866
|
+
{
|
|
21867
|
+
modelVariant: 'CHAT',
|
|
21868
|
+
modelTitle: 'gpt-5.1-codex',
|
|
21869
|
+
modelName: 'gpt-5.1-codex',
|
|
21870
|
+
modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
|
|
21871
|
+
pricing: {
|
|
21872
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21873
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21874
|
+
},
|
|
21875
|
+
},
|
|
21876
|
+
/**/
|
|
21877
|
+
/**/
|
|
21878
|
+
{
|
|
21879
|
+
modelVariant: 'CHAT',
|
|
21880
|
+
modelTitle: 'gpt-5.1-codex-mini',
|
|
21881
|
+
modelName: 'gpt-5.1-codex-mini',
|
|
21882
|
+
modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
|
|
21883
|
+
pricing: {
|
|
21884
|
+
prompt: pricing(`$0.25 / 1M tokens`),
|
|
21885
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
21886
|
+
},
|
|
21887
|
+
},
|
|
21888
|
+
/**/
|
|
21889
|
+
/**/
|
|
21890
|
+
{
|
|
21891
|
+
modelVariant: 'CHAT',
|
|
21892
|
+
modelTitle: 'gpt-5-codex',
|
|
21893
|
+
modelName: 'gpt-5-codex',
|
|
21894
|
+
modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
|
|
21895
|
+
pricing: {
|
|
21896
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21897
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21898
|
+
},
|
|
21899
|
+
},
|
|
21900
|
+
/**/
|
|
21901
|
+
/**/
|
|
21622
21902
|
{
|
|
21623
21903
|
modelVariant: 'CHAT',
|
|
21624
21904
|
modelTitle: 'gpt-5-mini',
|
|
@@ -22323,6 +22603,32 @@ function isUnsupportedParameterError(error) {
|
|
|
22323
22603
|
errorMessage.includes('does not support'));
|
|
22324
22604
|
}
|
|
22325
22605
|
|
|
22606
|
+
/**
|
|
22607
|
+
* Provides access to the structured clone implementation when available.
|
|
22608
|
+
*/
|
|
22609
|
+
function getStructuredCloneFunction() {
|
|
22610
|
+
return globalThis.structuredClone;
|
|
22611
|
+
}
|
|
22612
|
+
/**
|
|
22613
|
+
* Checks whether the prompt is a chat prompt that carries file attachments.
|
|
22614
|
+
*/
|
|
22615
|
+
function hasChatPromptFiles(prompt) {
|
|
22616
|
+
return 'files' in prompt && Array.isArray(prompt.files);
|
|
22617
|
+
}
|
|
22618
|
+
/**
|
|
22619
|
+
* Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
|
|
22620
|
+
*/
|
|
22621
|
+
function clonePromptPreservingFiles(prompt) {
|
|
22622
|
+
const structuredCloneFn = getStructuredCloneFunction();
|
|
22623
|
+
if (typeof structuredCloneFn === 'function') {
|
|
22624
|
+
return structuredCloneFn(prompt);
|
|
22625
|
+
}
|
|
22626
|
+
const clonedPrompt = JSON.parse(JSON.stringify(prompt));
|
|
22627
|
+
if (hasChatPromptFiles(prompt)) {
|
|
22628
|
+
clonedPrompt.files = prompt.files;
|
|
22629
|
+
}
|
|
22630
|
+
return clonedPrompt;
|
|
22631
|
+
}
|
|
22326
22632
|
/**
|
|
22327
22633
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
22328
22634
|
*
|
|
@@ -22352,16 +22658,11 @@ class OpenAiCompatibleExecutionTools {
|
|
|
22352
22658
|
const openAiOptions = { ...this.options };
|
|
22353
22659
|
delete openAiOptions.isVerbose;
|
|
22354
22660
|
delete openAiOptions.userId;
|
|
22355
|
-
// Enhanced configuration
|
|
22661
|
+
// Enhanced configuration with retries and timeouts.
|
|
22356
22662
|
const enhancedOptions = {
|
|
22357
22663
|
...openAiOptions,
|
|
22358
22664
|
timeout: API_REQUEST_TIMEOUT,
|
|
22359
22665
|
maxRetries: CONNECTION_RETRIES_LIMIT,
|
|
22360
|
-
defaultHeaders: {
|
|
22361
|
-
Connection: 'keep-alive',
|
|
22362
|
-
'Keep-Alive': 'timeout=30, max=100',
|
|
22363
|
-
...openAiOptions.defaultHeaders,
|
|
22364
|
-
},
|
|
22365
22666
|
};
|
|
22366
22667
|
this.client = new OpenAI(enhancedOptions);
|
|
22367
22668
|
}
|
|
@@ -22412,7 +22713,7 @@ class OpenAiCompatibleExecutionTools {
|
|
|
22412
22713
|
*/
|
|
22413
22714
|
async callChatModelStream(prompt, onProgress) {
|
|
22414
22715
|
// Deep clone prompt and modelRequirements to avoid mutation across calls
|
|
22415
|
-
const clonedPrompt =
|
|
22716
|
+
const clonedPrompt = clonePromptPreservingFiles(prompt);
|
|
22416
22717
|
// Use local Set for retried parameters to ensure independence and thread safety
|
|
22417
22718
|
const retriedUnsupportedParameters = new Set();
|
|
22418
22719
|
return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
|
|
@@ -22439,7 +22740,10 @@ class OpenAiCompatibleExecutionTools {
|
|
|
22439
22740
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
22440
22741
|
// <- Note: [🧆]
|
|
22441
22742
|
}; // <- TODO: [💩] Guard here types better
|
|
22442
|
-
if (
|
|
22743
|
+
if (currentModelRequirements.responseFormat !== undefined) {
|
|
22744
|
+
modelSettings.response_format = currentModelRequirements.responseFormat;
|
|
22745
|
+
}
|
|
22746
|
+
else if (format === 'JSON') {
|
|
22443
22747
|
modelSettings.response_format = {
|
|
22444
22748
|
type: 'json_object',
|
|
22445
22749
|
};
|
|
@@ -23250,18 +23554,6 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
|
23250
23554
|
get profile() {
|
|
23251
23555
|
return OPENAI_PROVIDER_PROFILE;
|
|
23252
23556
|
}
|
|
23253
|
-
/*
|
|
23254
|
-
Note: Commenting this out to avoid circular dependency
|
|
23255
|
-
/**
|
|
23256
|
-
* Create (sub)tools for calling OpenAI API Assistants
|
|
23257
|
-
*
|
|
23258
|
-
* @param assistantId Which assistant to use
|
|
23259
|
-
* @returns Tools for calling OpenAI API Assistants with same token
|
|
23260
|
-
* /
|
|
23261
|
-
public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
|
|
23262
|
-
return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
|
|
23263
|
-
}
|
|
23264
|
-
*/
|
|
23265
23557
|
/**
|
|
23266
23558
|
* List all available models (non dynamically)
|
|
23267
23559
|
*
|
|
@@ -23296,206 +23588,1259 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
|
23296
23588
|
}
|
|
23297
23589
|
}
|
|
23298
23590
|
|
|
23591
|
+
const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
|
|
23592
|
+
const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
|
|
23593
|
+
const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
|
|
23594
|
+
const VECTOR_STORE_STALL_LOG_THRESHOLD_MS = 30000;
|
|
23299
23595
|
/**
|
|
23300
|
-
*
|
|
23596
|
+
* Base class for OpenAI execution tools that need hosted vector stores.
|
|
23301
23597
|
*
|
|
23302
23598
|
* @public exported from `@promptbook/openai`
|
|
23303
23599
|
*/
|
|
23304
|
-
class
|
|
23305
|
-
|
|
23306
|
-
|
|
23307
|
-
|
|
23600
|
+
class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
|
|
23601
|
+
/**
|
|
23602
|
+
* Returns the per-knowledge-source download timeout in milliseconds.
|
|
23603
|
+
*/
|
|
23604
|
+
getKnowledgeSourceDownloadTimeoutMs() {
|
|
23605
|
+
var _a;
|
|
23606
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceDownloadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS;
|
|
23308
23607
|
}
|
|
23309
|
-
|
|
23310
|
-
|
|
23608
|
+
/**
|
|
23609
|
+
* Returns the max concurrency for knowledge source uploads.
|
|
23610
|
+
*/
|
|
23611
|
+
getKnowledgeSourceUploadMaxConcurrency() {
|
|
23612
|
+
var _a;
|
|
23613
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadMaxConcurrency) !== null && _a !== void 0 ? _a : 5;
|
|
23311
23614
|
}
|
|
23312
|
-
|
|
23313
|
-
|
|
23615
|
+
/**
|
|
23616
|
+
* Returns the polling interval in milliseconds for vector store uploads.
|
|
23617
|
+
*/
|
|
23618
|
+
getKnowledgeSourceUploadPollIntervalMs() {
|
|
23619
|
+
var _a;
|
|
23620
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadPollIntervalMs) !== null && _a !== void 0 ? _a : 5000;
|
|
23314
23621
|
}
|
|
23315
23622
|
/**
|
|
23316
|
-
*
|
|
23623
|
+
* Returns the overall upload timeout in milliseconds for vector store uploads.
|
|
23317
23624
|
*/
|
|
23318
|
-
|
|
23625
|
+
getKnowledgeSourceUploadTimeoutMs() {
|
|
23626
|
+
var _a;
|
|
23627
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS;
|
|
23628
|
+
}
|
|
23629
|
+
/**
|
|
23630
|
+
* Returns true if we should continue even if vector store ingestion stalls.
|
|
23631
|
+
*/
|
|
23632
|
+
shouldContinueOnVectorStoreStall() {
|
|
23633
|
+
var _a;
|
|
23634
|
+
return (_a = this.vectorStoreOptions.shouldContinueOnVectorStoreStall) !== null && _a !== void 0 ? _a : true;
|
|
23635
|
+
}
|
|
23636
|
+
/**
|
|
23637
|
+
* Returns vector-store-specific options with extended settings.
|
|
23638
|
+
*/
|
|
23639
|
+
get vectorStoreOptions() {
|
|
23640
|
+
return this.options;
|
|
23641
|
+
}
|
|
23642
|
+
/**
|
|
23643
|
+
* Returns the OpenAI vector stores API surface, supporting stable and beta SDKs.
|
|
23644
|
+
*/
|
|
23645
|
+
getVectorStoresApi(client) {
|
|
23646
|
+
var _a, _b;
|
|
23647
|
+
const vectorStores = (_a = client.vectorStores) !== null && _a !== void 0 ? _a : (_b = client.beta) === null || _b === void 0 ? void 0 : _b.vectorStores;
|
|
23648
|
+
if (!vectorStores) {
|
|
23649
|
+
throw new Error('OpenAI client does not support vector stores. Please ensure you are using a compatible version of the OpenAI SDK with vector store support.');
|
|
23650
|
+
}
|
|
23651
|
+
return vectorStores;
|
|
23652
|
+
}
|
|
23653
|
+
/**
|
|
23654
|
+
* Downloads a knowledge source URL into a File for vector store upload.
|
|
23655
|
+
*/
|
|
23656
|
+
async downloadKnowledgeSourceFile(options) {
|
|
23657
|
+
var _a;
|
|
23658
|
+
const { source, timeoutMs, logLabel } = options;
|
|
23659
|
+
const startedAtMs = Date.now();
|
|
23660
|
+
const controller = new AbortController();
|
|
23661
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
|
|
23319
23662
|
if (this.options.isVerbose) {
|
|
23320
|
-
console.info('
|
|
23663
|
+
console.info('[🤰]', 'Downloading knowledge source', {
|
|
23664
|
+
source,
|
|
23665
|
+
timeoutMs,
|
|
23666
|
+
logLabel,
|
|
23667
|
+
});
|
|
23321
23668
|
}
|
|
23322
|
-
|
|
23323
|
-
|
|
23324
|
-
|
|
23325
|
-
|
|
23669
|
+
try {
|
|
23670
|
+
const response = await fetch(source, { signal: controller.signal });
|
|
23671
|
+
const contentType = (_a = response.headers.get('content-type')) !== null && _a !== void 0 ? _a : undefined;
|
|
23672
|
+
if (!response.ok) {
|
|
23673
|
+
console.error('[🤰]', 'Failed to download knowledge source', {
|
|
23674
|
+
source,
|
|
23675
|
+
status: response.status,
|
|
23676
|
+
statusText: response.statusText,
|
|
23677
|
+
contentType,
|
|
23678
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
23679
|
+
logLabel,
|
|
23680
|
+
});
|
|
23681
|
+
return null;
|
|
23682
|
+
}
|
|
23683
|
+
const buffer = await response.arrayBuffer();
|
|
23684
|
+
let filename = source.split('/').pop() || 'downloaded-file';
|
|
23685
|
+
try {
|
|
23686
|
+
const url = new URL(source);
|
|
23687
|
+
filename = url.pathname.split('/').pop() || filename;
|
|
23688
|
+
}
|
|
23689
|
+
catch (error) {
|
|
23690
|
+
// Keep default filename
|
|
23691
|
+
}
|
|
23692
|
+
const file = new File([buffer], filename, contentType ? { type: contentType } : undefined);
|
|
23693
|
+
const elapsedMs = Date.now() - startedAtMs;
|
|
23694
|
+
const sizeBytes = buffer.byteLength;
|
|
23695
|
+
if (this.options.isVerbose) {
|
|
23696
|
+
console.info('[🤰]', 'Downloaded knowledge source', {
|
|
23697
|
+
source,
|
|
23698
|
+
filename,
|
|
23699
|
+
sizeBytes,
|
|
23700
|
+
contentType,
|
|
23701
|
+
elapsedMs,
|
|
23702
|
+
logLabel,
|
|
23703
|
+
});
|
|
23704
|
+
}
|
|
23705
|
+
return { file, sizeBytes, filename, elapsedMs };
|
|
23326
23706
|
}
|
|
23327
|
-
|
|
23328
|
-
|
|
23329
|
-
|
|
23330
|
-
|
|
23331
|
-
|
|
23332
|
-
|
|
23333
|
-
|
|
23334
|
-
|
|
23335
|
-
|
|
23336
|
-
role: msg.sender === 'assistant' ? 'assistant' : 'user',
|
|
23337
|
-
content: msg.content,
|
|
23338
|
-
}));
|
|
23339
|
-
input.push(...previousMessages);
|
|
23707
|
+
catch (error) {
|
|
23708
|
+
assertsError(error);
|
|
23709
|
+
console.error('[🤰]', 'Error downloading knowledge source', {
|
|
23710
|
+
source,
|
|
23711
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
23712
|
+
logLabel,
|
|
23713
|
+
error: serializeError(error),
|
|
23714
|
+
});
|
|
23715
|
+
return null;
|
|
23340
23716
|
}
|
|
23341
|
-
|
|
23342
|
-
|
|
23343
|
-
|
|
23344
|
-
|
|
23345
|
-
|
|
23346
|
-
|
|
23347
|
-
|
|
23348
|
-
|
|
23349
|
-
|
|
23350
|
-
|
|
23351
|
-
if (this.
|
|
23352
|
-
|
|
23353
|
-
|
|
23354
|
-
|
|
23355
|
-
|
|
23356
|
-
|
|
23717
|
+
finally {
|
|
23718
|
+
clearTimeout(timeoutId);
|
|
23719
|
+
}
|
|
23720
|
+
}
|
|
23721
|
+
/**
|
|
23722
|
+
* Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
|
|
23723
|
+
*/
|
|
23724
|
+
async logVectorStoreFileBatchDiagnostics(options) {
|
|
23725
|
+
var _a, _b, _c, _d, _e;
|
|
23726
|
+
const { client, vectorStoreId, batchId, uploadedFiles, logLabel, reason } = options;
|
|
23727
|
+
if (reason === 'stalled' && !this.options.isVerbose) {
|
|
23728
|
+
return;
|
|
23729
|
+
}
|
|
23730
|
+
if (!batchId.startsWith('vsfb_')) {
|
|
23731
|
+
console.error('[🤰]', 'Vector store file batch diagnostics skipped (invalid batch id)', {
|
|
23732
|
+
vectorStoreId,
|
|
23733
|
+
batchId,
|
|
23734
|
+
reason,
|
|
23735
|
+
logLabel,
|
|
23736
|
+
});
|
|
23737
|
+
return;
|
|
23738
|
+
}
|
|
23739
|
+
const fileIdToMetadata = new Map();
|
|
23740
|
+
for (const file of uploadedFiles) {
|
|
23741
|
+
fileIdToMetadata.set(file.fileId, file);
|
|
23742
|
+
}
|
|
23743
|
+
try {
|
|
23744
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
23745
|
+
const limit = Math.min(100, Math.max(10, uploadedFiles.length));
|
|
23746
|
+
const batchFilesPage = await vectorStores.fileBatches.listFiles(batchId, {
|
|
23747
|
+
vector_store_id: vectorStoreId,
|
|
23748
|
+
limit,
|
|
23749
|
+
});
|
|
23750
|
+
const batchFiles = (_a = batchFilesPage.data) !== null && _a !== void 0 ? _a : [];
|
|
23751
|
+
const statusCounts = {
|
|
23752
|
+
in_progress: 0,
|
|
23753
|
+
completed: 0,
|
|
23754
|
+
failed: 0,
|
|
23755
|
+
cancelled: 0,
|
|
23357
23756
|
};
|
|
23757
|
+
const errorSamples = [];
|
|
23758
|
+
const inProgressSamples = [];
|
|
23759
|
+
const batchFileIds = new Set();
|
|
23760
|
+
for (const file of batchFiles) {
|
|
23761
|
+
const status = (_b = file.status) !== null && _b !== void 0 ? _b : 'unknown';
|
|
23762
|
+
statusCounts[status] = ((_c = statusCounts[status]) !== null && _c !== void 0 ? _c : 0) + 1;
|
|
23763
|
+
const vectorStoreFileId = file.id;
|
|
23764
|
+
const uploadedFileId = (_d = file.file_id) !== null && _d !== void 0 ? _d : file.fileId;
|
|
23765
|
+
const fileId = uploadedFileId !== null && uploadedFileId !== void 0 ? uploadedFileId : vectorStoreFileId;
|
|
23766
|
+
batchFileIds.add(fileId);
|
|
23767
|
+
const metadata = fileIdToMetadata.get(fileId);
|
|
23768
|
+
if (status === 'failed') {
|
|
23769
|
+
errorSamples.push({
|
|
23770
|
+
fileId,
|
|
23771
|
+
status,
|
|
23772
|
+
error: (_e = file.last_error) === null || _e === void 0 ? void 0 : _e.message,
|
|
23773
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
23774
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
23775
|
+
});
|
|
23776
|
+
}
|
|
23777
|
+
if (status === 'in_progress') {
|
|
23778
|
+
inProgressSamples.push({
|
|
23779
|
+
fileId,
|
|
23780
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
23781
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
23782
|
+
});
|
|
23783
|
+
}
|
|
23784
|
+
}
|
|
23785
|
+
const missingSamples = uploadedFiles
|
|
23786
|
+
.filter((file) => !batchFileIds.has(file.fileId))
|
|
23787
|
+
.slice(0, 5)
|
|
23788
|
+
.map((file) => ({
|
|
23789
|
+
fileId: file.fileId,
|
|
23790
|
+
filename: file.filename,
|
|
23791
|
+
sizeBytes: file.sizeBytes,
|
|
23792
|
+
}));
|
|
23793
|
+
const vectorStore = await vectorStores.retrieve(vectorStoreId);
|
|
23794
|
+
const logPayload = {
|
|
23795
|
+
vectorStoreId,
|
|
23796
|
+
batchId,
|
|
23797
|
+
reason,
|
|
23798
|
+
vectorStoreStatus: vectorStore.status,
|
|
23799
|
+
vectorStoreFileCounts: vectorStore.file_counts,
|
|
23800
|
+
vectorStoreUsageBytes: vectorStore.usage_bytes,
|
|
23801
|
+
batchFileCount: batchFiles.length,
|
|
23802
|
+
statusCounts,
|
|
23803
|
+
errorSamples: errorSamples.slice(0, 5),
|
|
23804
|
+
inProgressSamples,
|
|
23805
|
+
missingFileCount: uploadedFiles.length - batchFileIds.size,
|
|
23806
|
+
missingSamples,
|
|
23807
|
+
logLabel,
|
|
23808
|
+
};
|
|
23809
|
+
const logFunction = reason === 'stalled' ? console.info : console.error;
|
|
23810
|
+
logFunction('[🤰]', 'Vector store file batch diagnostics', logPayload);
|
|
23811
|
+
}
|
|
23812
|
+
catch (error) {
|
|
23813
|
+
assertsError(error);
|
|
23814
|
+
console.error('[🤰]', 'Vector store file batch diagnostics failed', {
|
|
23815
|
+
vectorStoreId,
|
|
23816
|
+
batchId,
|
|
23817
|
+
reason,
|
|
23818
|
+
logLabel,
|
|
23819
|
+
error: serializeError(error),
|
|
23820
|
+
});
|
|
23821
|
+
}
|
|
23822
|
+
}
|
|
23823
|
+
/**
|
|
23824
|
+
* Uploads knowledge source files to the vector store and polls until processing completes.
|
|
23825
|
+
*/
|
|
23826
|
+
async uploadKnowledgeSourceFilesToVectorStore(options) {
|
|
23827
|
+
var _a, _b, _c, _d, _e, _f;
|
|
23828
|
+
const { client, vectorStoreId, files, totalBytes, logLabel } = options;
|
|
23829
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
23830
|
+
const uploadStartedAtMs = Date.now();
|
|
23831
|
+
const maxConcurrency = Math.max(1, this.getKnowledgeSourceUploadMaxConcurrency());
|
|
23832
|
+
const pollIntervalMs = Math.max(1000, this.getKnowledgeSourceUploadPollIntervalMs());
|
|
23833
|
+
const uploadTimeoutMs = Math.max(1000, this.getKnowledgeSourceUploadTimeoutMs());
|
|
23834
|
+
if (this.options.isVerbose) {
|
|
23835
|
+
console.info('[🤰]', 'Uploading knowledge source files to OpenAI', {
|
|
23836
|
+
vectorStoreId,
|
|
23837
|
+
fileCount: files.length,
|
|
23838
|
+
totalBytes,
|
|
23839
|
+
maxConcurrency,
|
|
23840
|
+
pollIntervalMs,
|
|
23841
|
+
uploadTimeoutMs,
|
|
23842
|
+
logLabel,
|
|
23843
|
+
});
|
|
23844
|
+
}
|
|
23845
|
+
const fileTypeSummary = {};
|
|
23846
|
+
for (const file of files) {
|
|
23847
|
+
const filename = (_a = file.name) !== null && _a !== void 0 ? _a : '';
|
|
23848
|
+
const extension = filename.includes('.')
|
|
23849
|
+
? (_c = (_b = filename.split('.').pop()) === null || _b === void 0 ? void 0 : _b.toLowerCase()) !== null && _c !== void 0 ? _c : 'unknown'
|
|
23850
|
+
: 'unknown';
|
|
23851
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : 0;
|
|
23852
|
+
const summary = (_d = fileTypeSummary[extension]) !== null && _d !== void 0 ? _d : { count: 0, totalBytes: 0 };
|
|
23853
|
+
summary.count += 1;
|
|
23854
|
+
summary.totalBytes += sizeBytes;
|
|
23855
|
+
fileTypeSummary[extension] = summary;
|
|
23856
|
+
}
|
|
23857
|
+
if (this.options.isVerbose) {
|
|
23858
|
+
console.info('[🤰]', 'Knowledge source file summary', {
|
|
23859
|
+
vectorStoreId,
|
|
23860
|
+
fileCount: files.length,
|
|
23861
|
+
totalBytes,
|
|
23862
|
+
fileTypeSummary,
|
|
23863
|
+
logLabel,
|
|
23864
|
+
});
|
|
23865
|
+
}
|
|
23866
|
+
const fileEntries = files.map((file, index) => ({ file, index }));
|
|
23867
|
+
const fileIterator = fileEntries.values();
|
|
23868
|
+
const fileIds = [];
|
|
23869
|
+
const uploadedFiles = [];
|
|
23870
|
+
const failedUploads = [];
|
|
23871
|
+
let uploadedCount = 0;
|
|
23872
|
+
const processFiles = async (iterator) => {
|
|
23873
|
+
var _a, _b;
|
|
23874
|
+
for (const { file, index } of iterator) {
|
|
23875
|
+
const uploadIndex = index + 1;
|
|
23876
|
+
const filename = file.name || `knowledge-source-${uploadIndex}`;
|
|
23877
|
+
const extension = filename.includes('.')
|
|
23878
|
+
? (_b = (_a = filename.split('.').pop()) === null || _a === void 0 ? void 0 : _a.toLowerCase()) !== null && _b !== void 0 ? _b : 'unknown'
|
|
23879
|
+
: 'unknown';
|
|
23880
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : undefined;
|
|
23881
|
+
const fileUploadStartedAtMs = Date.now();
|
|
23882
|
+
if (this.options.isVerbose) {
|
|
23883
|
+
console.info('[🤰]', 'Uploading knowledge source file', {
|
|
23884
|
+
index: uploadIndex,
|
|
23885
|
+
total: files.length,
|
|
23886
|
+
filename,
|
|
23887
|
+
extension,
|
|
23888
|
+
sizeBytes,
|
|
23889
|
+
logLabel,
|
|
23890
|
+
});
|
|
23891
|
+
}
|
|
23892
|
+
try {
|
|
23893
|
+
const uploaded = await client.files.create({ file, purpose: 'assistants' });
|
|
23894
|
+
fileIds.push(uploaded.id);
|
|
23895
|
+
uploadedFiles.push({ fileId: uploaded.id, filename, sizeBytes });
|
|
23896
|
+
uploadedCount += 1;
|
|
23897
|
+
if (this.options.isVerbose) {
|
|
23898
|
+
console.info('[🤰]', 'Uploaded knowledge source file', {
|
|
23899
|
+
index: uploadIndex,
|
|
23900
|
+
total: files.length,
|
|
23901
|
+
filename,
|
|
23902
|
+
sizeBytes,
|
|
23903
|
+
fileId: uploaded.id,
|
|
23904
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
23905
|
+
logLabel,
|
|
23906
|
+
});
|
|
23907
|
+
}
|
|
23908
|
+
}
|
|
23909
|
+
catch (error) {
|
|
23910
|
+
assertsError(error);
|
|
23911
|
+
const serializedError = serializeError(error);
|
|
23912
|
+
failedUploads.push({ index: uploadIndex, filename, error: serializedError });
|
|
23913
|
+
console.error('[🤰]', 'Failed to upload knowledge source file', {
|
|
23914
|
+
index: uploadIndex,
|
|
23915
|
+
total: files.length,
|
|
23916
|
+
filename,
|
|
23917
|
+
sizeBytes,
|
|
23918
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
23919
|
+
logLabel,
|
|
23920
|
+
error: serializedError,
|
|
23921
|
+
});
|
|
23922
|
+
}
|
|
23923
|
+
}
|
|
23924
|
+
};
|
|
23925
|
+
const workerCount = Math.min(maxConcurrency, files.length);
|
|
23926
|
+
const workers = Array.from({ length: workerCount }, () => processFiles(fileIterator));
|
|
23927
|
+
await Promise.all(workers);
|
|
23928
|
+
if (this.options.isVerbose) {
|
|
23929
|
+
console.info('[🤰]', 'Finished uploading knowledge source files', {
|
|
23930
|
+
vectorStoreId,
|
|
23931
|
+
fileCount: files.length,
|
|
23932
|
+
uploadedCount,
|
|
23933
|
+
failedCount: failedUploads.length,
|
|
23934
|
+
elapsedMs: Date.now() - uploadStartedAtMs,
|
|
23935
|
+
failedSamples: failedUploads.slice(0, 3),
|
|
23936
|
+
logLabel,
|
|
23937
|
+
});
|
|
23938
|
+
}
|
|
23939
|
+
if (fileIds.length === 0) {
|
|
23940
|
+
console.error('[🤰]', 'No knowledge source files were uploaded', {
|
|
23941
|
+
vectorStoreId,
|
|
23942
|
+
fileCount: files.length,
|
|
23943
|
+
failedCount: failedUploads.length,
|
|
23944
|
+
logLabel,
|
|
23945
|
+
});
|
|
23946
|
+
return null;
|
|
23947
|
+
}
|
|
23948
|
+
const batch = await vectorStores.fileBatches.create(vectorStoreId, {
|
|
23949
|
+
file_ids: fileIds,
|
|
23950
|
+
});
|
|
23951
|
+
const expectedBatchId = batch.id;
|
|
23952
|
+
const expectedBatchIdValid = expectedBatchId.startsWith('vsfb_');
|
|
23953
|
+
if (!expectedBatchIdValid) {
|
|
23954
|
+
console.error('[🤰]', 'Vector store file batch id looks invalid', {
|
|
23955
|
+
vectorStoreId,
|
|
23956
|
+
batchId: expectedBatchId,
|
|
23957
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
23958
|
+
logLabel,
|
|
23959
|
+
});
|
|
23960
|
+
}
|
|
23961
|
+
else if (batch.vector_store_id !== vectorStoreId) {
|
|
23962
|
+
console.error('[🤰]', 'Vector store file batch vector store id mismatch', {
|
|
23963
|
+
vectorStoreId,
|
|
23964
|
+
batchId: expectedBatchId,
|
|
23965
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
23966
|
+
logLabel,
|
|
23967
|
+
});
|
|
23968
|
+
}
|
|
23969
|
+
if (this.options.isVerbose) {
|
|
23970
|
+
console.info('[🤰]', 'Created vector store file batch', {
|
|
23971
|
+
vectorStoreId,
|
|
23972
|
+
batchId: expectedBatchId,
|
|
23973
|
+
fileCount: fileIds.length,
|
|
23974
|
+
logLabel,
|
|
23975
|
+
});
|
|
23976
|
+
}
|
|
23977
|
+
const pollStartedAtMs = Date.now();
|
|
23978
|
+
const progressLogIntervalMs = Math.max(VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS, pollIntervalMs);
|
|
23979
|
+
const diagnosticsIntervalMs = Math.max(60000, pollIntervalMs * 5);
|
|
23980
|
+
// let lastStatus: string | undefined;
|
|
23981
|
+
let lastCountsKey = '';
|
|
23982
|
+
let lastProgressKey = '';
|
|
23983
|
+
let lastLogAtMs = 0;
|
|
23984
|
+
let lastProgressAtMs = pollStartedAtMs;
|
|
23985
|
+
let lastDiagnosticsAtMs = pollStartedAtMs;
|
|
23986
|
+
let latestBatch = batch;
|
|
23987
|
+
let loggedBatchIdMismatch = false;
|
|
23988
|
+
let loggedBatchIdFallback = false;
|
|
23989
|
+
let loggedBatchIdInvalid = false;
|
|
23990
|
+
let shouldPoll = true;
|
|
23991
|
+
while (shouldPoll) {
|
|
23992
|
+
const nowMs = Date.now();
|
|
23993
|
+
// [🤰] Note: Sometimes OpenAI returns Vector Store object instead of Batch object, or IDs get swapped.
|
|
23994
|
+
const rawBatchId = typeof latestBatch.id === 'string' ? latestBatch.id : '';
|
|
23995
|
+
const rawVectorStoreId = latestBatch.vector_store_id;
|
|
23996
|
+
let returnedBatchId = rawBatchId;
|
|
23997
|
+
let returnedBatchIdValid = typeof returnedBatchId === 'string' && returnedBatchId.startsWith('vsfb_');
|
|
23998
|
+
if (!returnedBatchIdValid && expectedBatchIdValid) {
|
|
23999
|
+
if (!loggedBatchIdFallback) {
|
|
24000
|
+
console.error('[🤰]', 'Vector store file batch id missing from response; falling back to expected', {
|
|
24001
|
+
vectorStoreId,
|
|
24002
|
+
expectedBatchId,
|
|
24003
|
+
returnedBatchId,
|
|
24004
|
+
rawVectorStoreId,
|
|
24005
|
+
logLabel,
|
|
24006
|
+
});
|
|
24007
|
+
loggedBatchIdFallback = true;
|
|
24008
|
+
}
|
|
24009
|
+
returnedBatchId = expectedBatchId;
|
|
24010
|
+
returnedBatchIdValid = true;
|
|
24011
|
+
}
|
|
24012
|
+
if (!returnedBatchIdValid && !loggedBatchIdInvalid) {
|
|
24013
|
+
console.error('[🤰]', 'Vector store file batch id is invalid; stopping polling', {
|
|
24014
|
+
vectorStoreId,
|
|
24015
|
+
expectedBatchId,
|
|
24016
|
+
returnedBatchId,
|
|
24017
|
+
rawVectorStoreId,
|
|
24018
|
+
logLabel,
|
|
24019
|
+
});
|
|
24020
|
+
loggedBatchIdInvalid = true;
|
|
24021
|
+
}
|
|
24022
|
+
const batchIdMismatch = expectedBatchIdValid && returnedBatchIdValid && returnedBatchId !== expectedBatchId;
|
|
24023
|
+
if (batchIdMismatch && !loggedBatchIdMismatch) {
|
|
24024
|
+
console.error('[🤰]', 'Vector store file batch id mismatch', {
|
|
24025
|
+
vectorStoreId,
|
|
24026
|
+
expectedBatchId,
|
|
24027
|
+
returnedBatchId,
|
|
24028
|
+
logLabel,
|
|
24029
|
+
});
|
|
24030
|
+
loggedBatchIdMismatch = true;
|
|
24031
|
+
}
|
|
24032
|
+
if (returnedBatchIdValid) {
|
|
24033
|
+
latestBatch = await vectorStores.fileBatches.retrieve(returnedBatchId, {
|
|
24034
|
+
vector_store_id: vectorStoreId,
|
|
24035
|
+
});
|
|
24036
|
+
}
|
|
24037
|
+
else {
|
|
24038
|
+
shouldPoll = false;
|
|
24039
|
+
continue;
|
|
24040
|
+
}
|
|
24041
|
+
const status = (_e = latestBatch.status) !== null && _e !== void 0 ? _e : 'unknown';
|
|
24042
|
+
const fileCounts = (_f = latestBatch.file_counts) !== null && _f !== void 0 ? _f : {};
|
|
24043
|
+
const progressKey = JSON.stringify(fileCounts);
|
|
24044
|
+
const statusCountsKey = `${status}-${progressKey}`;
|
|
24045
|
+
const isProgressing = progressKey !== lastProgressKey;
|
|
24046
|
+
if (isProgressing) {
|
|
24047
|
+
lastProgressAtMs = nowMs;
|
|
24048
|
+
lastProgressKey = progressKey;
|
|
24049
|
+
}
|
|
24050
|
+
if (this.options.isVerbose &&
|
|
24051
|
+
(statusCountsKey !== lastCountsKey || nowMs - lastLogAtMs >= progressLogIntervalMs)) {
|
|
24052
|
+
console.info('[🤰]', 'Vector store file batch status', {
|
|
24053
|
+
vectorStoreId,
|
|
24054
|
+
batchId: returnedBatchId,
|
|
24055
|
+
status,
|
|
24056
|
+
fileCounts,
|
|
24057
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24058
|
+
logLabel,
|
|
24059
|
+
});
|
|
24060
|
+
lastCountsKey = statusCountsKey;
|
|
24061
|
+
lastLogAtMs = nowMs;
|
|
24062
|
+
}
|
|
24063
|
+
if (status === 'in_progress' &&
|
|
24064
|
+
nowMs - lastProgressAtMs >= VECTOR_STORE_STALL_LOG_THRESHOLD_MS &&
|
|
24065
|
+
nowMs - lastDiagnosticsAtMs >= diagnosticsIntervalMs) {
|
|
24066
|
+
lastDiagnosticsAtMs = nowMs;
|
|
24067
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24068
|
+
client,
|
|
24069
|
+
vectorStoreId,
|
|
24070
|
+
batchId: returnedBatchId,
|
|
24071
|
+
uploadedFiles,
|
|
24072
|
+
logLabel,
|
|
24073
|
+
reason: 'stalled',
|
|
24074
|
+
});
|
|
24075
|
+
}
|
|
24076
|
+
if (status === 'completed') {
|
|
24077
|
+
if (this.options.isVerbose) {
|
|
24078
|
+
console.info('[🤰]', 'Vector store file batch completed', {
|
|
24079
|
+
vectorStoreId,
|
|
24080
|
+
batchId: returnedBatchId,
|
|
24081
|
+
fileCounts,
|
|
24082
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24083
|
+
logLabel,
|
|
24084
|
+
});
|
|
24085
|
+
}
|
|
24086
|
+
shouldPoll = false;
|
|
24087
|
+
continue;
|
|
24088
|
+
}
|
|
24089
|
+
if (status === 'failed') {
|
|
24090
|
+
console.error('[🤰]', 'Vector store file batch completed with failures', {
|
|
24091
|
+
vectorStoreId,
|
|
24092
|
+
batchId: returnedBatchId,
|
|
24093
|
+
fileCounts,
|
|
24094
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24095
|
+
logLabel,
|
|
24096
|
+
});
|
|
24097
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24098
|
+
client,
|
|
24099
|
+
vectorStoreId,
|
|
24100
|
+
batchId: returnedBatchId,
|
|
24101
|
+
uploadedFiles,
|
|
24102
|
+
logLabel,
|
|
24103
|
+
reason: 'failed',
|
|
24104
|
+
});
|
|
24105
|
+
shouldPoll = false;
|
|
24106
|
+
continue;
|
|
24107
|
+
}
|
|
24108
|
+
if (status === 'cancelled') {
|
|
24109
|
+
console.error('[🤰]', 'Vector store file batch did not complete', {
|
|
24110
|
+
vectorStoreId,
|
|
24111
|
+
batchId: returnedBatchId,
|
|
24112
|
+
status,
|
|
24113
|
+
fileCounts,
|
|
24114
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24115
|
+
logLabel,
|
|
24116
|
+
});
|
|
24117
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24118
|
+
client,
|
|
24119
|
+
vectorStoreId,
|
|
24120
|
+
batchId: returnedBatchId,
|
|
24121
|
+
uploadedFiles,
|
|
24122
|
+
logLabel,
|
|
24123
|
+
reason: 'failed',
|
|
24124
|
+
});
|
|
24125
|
+
shouldPoll = false;
|
|
24126
|
+
continue;
|
|
24127
|
+
}
|
|
24128
|
+
if (nowMs - pollStartedAtMs >= uploadTimeoutMs) {
|
|
24129
|
+
console.error('[🤰]', 'Timed out waiting for vector store file batch', {
|
|
24130
|
+
vectorStoreId,
|
|
24131
|
+
batchId: returnedBatchId,
|
|
24132
|
+
fileCounts,
|
|
24133
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24134
|
+
uploadTimeoutMs,
|
|
24135
|
+
logLabel,
|
|
24136
|
+
});
|
|
24137
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24138
|
+
client,
|
|
24139
|
+
vectorStoreId,
|
|
24140
|
+
batchId: returnedBatchId,
|
|
24141
|
+
uploadedFiles,
|
|
24142
|
+
logLabel,
|
|
24143
|
+
reason: 'timeout',
|
|
24144
|
+
});
|
|
24145
|
+
if (this.shouldContinueOnVectorStoreStall()) {
|
|
24146
|
+
console.warn('[🤰]', 'Continuing despite vector store timeout as requested', {
|
|
24147
|
+
vectorStoreId,
|
|
24148
|
+
logLabel,
|
|
24149
|
+
});
|
|
24150
|
+
shouldPoll = false;
|
|
24151
|
+
continue;
|
|
24152
|
+
}
|
|
24153
|
+
try {
|
|
24154
|
+
const cancelBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
|
|
24155
|
+
if (!cancelBatchId.startsWith('vsfb_')) {
|
|
24156
|
+
console.error('[🤰]', 'Skipping vector store file batch cancel (invalid batch id)', {
|
|
24157
|
+
vectorStoreId,
|
|
24158
|
+
batchId: cancelBatchId,
|
|
24159
|
+
logLabel,
|
|
24160
|
+
});
|
|
24161
|
+
}
|
|
24162
|
+
else {
|
|
24163
|
+
await vectorStores.fileBatches.cancel(cancelBatchId, {
|
|
24164
|
+
vector_store_id: vectorStoreId,
|
|
24165
|
+
});
|
|
24166
|
+
}
|
|
24167
|
+
if (this.options.isVerbose) {
|
|
24168
|
+
console.info('[🤰]', 'Cancelled vector store file batch after timeout', {
|
|
24169
|
+
vectorStoreId,
|
|
24170
|
+
batchId: batchIdMismatch && returnedBatchId.startsWith('vsfb_')
|
|
24171
|
+
? returnedBatchId
|
|
24172
|
+
: expectedBatchId,
|
|
24173
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
24174
|
+
logLabel,
|
|
24175
|
+
});
|
|
24176
|
+
}
|
|
24177
|
+
}
|
|
24178
|
+
catch (error) {
|
|
24179
|
+
assertsError(error);
|
|
24180
|
+
console.error('[🤰]', 'Failed to cancel vector store file batch after timeout', {
|
|
24181
|
+
vectorStoreId,
|
|
24182
|
+
batchId: expectedBatchId,
|
|
24183
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
24184
|
+
logLabel,
|
|
24185
|
+
error: serializeError(error),
|
|
24186
|
+
});
|
|
24187
|
+
}
|
|
24188
|
+
shouldPoll = false;
|
|
24189
|
+
continue;
|
|
24190
|
+
}
|
|
24191
|
+
await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
|
|
24192
|
+
}
|
|
24193
|
+
return latestBatch;
|
|
24194
|
+
}
|
|
24195
|
+
/**
|
|
24196
|
+
* Creates a vector store and uploads knowledge sources, returning its ID.
|
|
24197
|
+
*/
|
|
24198
|
+
async createVectorStoreWithKnowledgeSources(options) {
|
|
24199
|
+
const { client, name, knowledgeSources, logLabel } = options;
|
|
24200
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
24201
|
+
const knowledgeSourcesCount = knowledgeSources.length;
|
|
24202
|
+
const downloadTimeoutMs = this.getKnowledgeSourceDownloadTimeoutMs();
|
|
24203
|
+
if (this.options.isVerbose) {
|
|
24204
|
+
console.info('[🤰]', 'Creating vector store with knowledge sources', {
|
|
24205
|
+
name,
|
|
24206
|
+
knowledgeSourcesCount,
|
|
24207
|
+
downloadTimeoutMs,
|
|
24208
|
+
logLabel,
|
|
24209
|
+
});
|
|
24210
|
+
}
|
|
24211
|
+
const vectorStore = await vectorStores.create({
|
|
24212
|
+
name: `${name} Knowledge Base`,
|
|
24213
|
+
});
|
|
24214
|
+
const vectorStoreId = vectorStore.id;
|
|
24215
|
+
if (this.options.isVerbose) {
|
|
24216
|
+
console.info('[🤰]', 'Vector store created', {
|
|
24217
|
+
vectorStoreId,
|
|
24218
|
+
logLabel,
|
|
24219
|
+
});
|
|
24220
|
+
}
|
|
24221
|
+
const fileStreams = [];
|
|
24222
|
+
const skippedSources = [];
|
|
24223
|
+
let totalBytes = 0;
|
|
24224
|
+
const processingStartedAtMs = Date.now();
|
|
24225
|
+
for (const [index, source] of knowledgeSources.entries()) {
|
|
24226
|
+
try {
|
|
24227
|
+
const isDataUrl = isDataUrlKnowledgeSource(source);
|
|
24228
|
+
const isHttp = source.startsWith('http://') || source.startsWith('https://');
|
|
24229
|
+
const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
|
|
24230
|
+
if (this.options.isVerbose) {
|
|
24231
|
+
console.info('[🤰]', 'Processing knowledge source', {
|
|
24232
|
+
index: index + 1,
|
|
24233
|
+
total: knowledgeSourcesCount,
|
|
24234
|
+
source,
|
|
24235
|
+
sourceType,
|
|
24236
|
+
logLabel,
|
|
24237
|
+
});
|
|
24238
|
+
}
|
|
24239
|
+
if (isDataUrl) {
|
|
24240
|
+
const parsed = parseDataUrlKnowledgeSource(source);
|
|
24241
|
+
if (!parsed) {
|
|
24242
|
+
skippedSources.push({ source, reason: 'invalid_data_url' });
|
|
24243
|
+
if (this.options.isVerbose) {
|
|
24244
|
+
console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
|
|
24245
|
+
source,
|
|
24246
|
+
sourceType,
|
|
24247
|
+
logLabel,
|
|
24248
|
+
});
|
|
24249
|
+
}
|
|
24250
|
+
continue;
|
|
24251
|
+
}
|
|
24252
|
+
const dataUrlFile = new File([parsed.buffer], parsed.filename, {
|
|
24253
|
+
type: parsed.mimeType,
|
|
24254
|
+
});
|
|
24255
|
+
fileStreams.push(dataUrlFile);
|
|
24256
|
+
totalBytes += parsed.buffer.length;
|
|
24257
|
+
continue;
|
|
24258
|
+
}
|
|
24259
|
+
if (isHttp) {
|
|
24260
|
+
const downloadResult = await this.downloadKnowledgeSourceFile({
|
|
24261
|
+
source,
|
|
24262
|
+
timeoutMs: downloadTimeoutMs,
|
|
24263
|
+
logLabel,
|
|
24264
|
+
});
|
|
24265
|
+
if (downloadResult) {
|
|
24266
|
+
fileStreams.push(downloadResult.file);
|
|
24267
|
+
totalBytes += downloadResult.sizeBytes;
|
|
24268
|
+
}
|
|
24269
|
+
else {
|
|
24270
|
+
skippedSources.push({ source, reason: 'download_failed' });
|
|
24271
|
+
}
|
|
24272
|
+
}
|
|
24273
|
+
else {
|
|
24274
|
+
skippedSources.push({ source, reason: 'unsupported_source_type' });
|
|
24275
|
+
if (this.options.isVerbose) {
|
|
24276
|
+
console.info('[🤰]', 'Skipping knowledge source (unsupported type)', {
|
|
24277
|
+
source,
|
|
24278
|
+
sourceType,
|
|
24279
|
+
logLabel,
|
|
24280
|
+
});
|
|
24281
|
+
}
|
|
24282
|
+
/*
|
|
24283
|
+
TODO: [🤰] Resolve problem with browser environment
|
|
24284
|
+
// Assume it's a local file path
|
|
24285
|
+
// Note: This will work in Node.js environment
|
|
24286
|
+
// For browser environments, this would need different handling
|
|
24287
|
+
const fs = await import('fs');
|
|
24288
|
+
const fileStream = fs.createReadStream(source);
|
|
24289
|
+
fileStreams.push(fileStream);
|
|
24290
|
+
*/
|
|
24291
|
+
}
|
|
24292
|
+
}
|
|
24293
|
+
catch (error) {
|
|
24294
|
+
assertsError(error);
|
|
24295
|
+
skippedSources.push({ source, reason: 'processing_error' });
|
|
24296
|
+
console.error('[🤰]', 'Error processing knowledge source', {
|
|
24297
|
+
source,
|
|
24298
|
+
logLabel,
|
|
24299
|
+
error: serializeError(error),
|
|
24300
|
+
});
|
|
24301
|
+
}
|
|
24302
|
+
}
|
|
24303
|
+
if (this.options.isVerbose) {
|
|
24304
|
+
console.info('[🤰]', 'Finished processing knowledge sources', {
|
|
24305
|
+
total: knowledgeSourcesCount,
|
|
24306
|
+
downloadedCount: fileStreams.length,
|
|
24307
|
+
skippedCount: skippedSources.length,
|
|
24308
|
+
totalBytes,
|
|
24309
|
+
elapsedMs: Date.now() - processingStartedAtMs,
|
|
24310
|
+
skippedSamples: skippedSources.slice(0, 3),
|
|
24311
|
+
logLabel,
|
|
24312
|
+
});
|
|
24313
|
+
}
|
|
24314
|
+
if (fileStreams.length > 0) {
|
|
24315
|
+
if (this.options.isVerbose) {
|
|
24316
|
+
console.info('[🤰]', 'Uploading files to vector store', {
|
|
24317
|
+
vectorStoreId,
|
|
24318
|
+
fileCount: fileStreams.length,
|
|
24319
|
+
totalBytes,
|
|
24320
|
+
maxConcurrency: this.getKnowledgeSourceUploadMaxConcurrency(),
|
|
24321
|
+
pollIntervalMs: this.getKnowledgeSourceUploadPollIntervalMs(),
|
|
24322
|
+
uploadTimeoutMs: this.getKnowledgeSourceUploadTimeoutMs(),
|
|
24323
|
+
logLabel,
|
|
24324
|
+
});
|
|
24325
|
+
}
|
|
24326
|
+
try {
|
|
24327
|
+
await this.uploadKnowledgeSourceFilesToVectorStore({
|
|
24328
|
+
client,
|
|
24329
|
+
vectorStoreId,
|
|
24330
|
+
files: fileStreams,
|
|
24331
|
+
totalBytes,
|
|
24332
|
+
logLabel,
|
|
24333
|
+
});
|
|
24334
|
+
}
|
|
24335
|
+
catch (error) {
|
|
24336
|
+
assertsError(error);
|
|
24337
|
+
console.error('[🤰]', 'Error uploading files to vector store', {
|
|
24338
|
+
vectorStoreId,
|
|
24339
|
+
logLabel,
|
|
24340
|
+
error: serializeError(error),
|
|
24341
|
+
});
|
|
24342
|
+
}
|
|
24343
|
+
}
|
|
24344
|
+
else if (this.options.isVerbose) {
|
|
24345
|
+
console.info('[🤰]', 'No knowledge source files to upload', {
|
|
24346
|
+
vectorStoreId,
|
|
24347
|
+
skippedCount: skippedSources.length,
|
|
24348
|
+
logLabel,
|
|
24349
|
+
});
|
|
24350
|
+
}
|
|
24351
|
+
return {
|
|
24352
|
+
vectorStoreId,
|
|
24353
|
+
uploadedFileCount: fileStreams.length,
|
|
24354
|
+
skippedCount: skippedSources.length,
|
|
24355
|
+
totalBytes,
|
|
24356
|
+
};
|
|
24357
|
+
}
|
|
24358
|
+
}
|
|
24359
|
+
|
|
24360
|
+
const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
|
|
24361
|
+
const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
|
|
24362
|
+
/*
|
|
24363
|
+
TODO: Use or remove
|
|
24364
|
+
const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
|
|
24365
|
+
type: 'object',
|
|
24366
|
+
properties: {},
|
|
24367
|
+
required: [],
|
|
24368
|
+
additionalProperties: true,
|
|
24369
|
+
};
|
|
24370
|
+
*/
|
|
24371
|
+
function buildJsonSchemaDefinition(jsonSchema) {
|
|
24372
|
+
var _a, _b, _c;
|
|
24373
|
+
const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
|
|
24374
|
+
return {
|
|
24375
|
+
type: 'json_schema',
|
|
24376
|
+
name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
|
|
24377
|
+
strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
|
|
24378
|
+
schema: {
|
|
24379
|
+
type: 'object',
|
|
24380
|
+
properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
|
|
24381
|
+
required: Array.isArray(schema.required) ? schema.required : [],
|
|
24382
|
+
additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
|
|
24383
|
+
description: schema.description,
|
|
24384
|
+
},
|
|
24385
|
+
};
|
|
24386
|
+
}
|
|
24387
|
+
/**
|
|
24388
|
+
* Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
|
|
24389
|
+
* structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
|
|
24390
|
+
*
|
|
24391
|
+
* @param responseFormat - The OpenAI `response_format` payload from the user request.
|
|
24392
|
+
* @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
|
|
24393
|
+
* @private utility of Open AI
|
|
24394
|
+
*/
|
|
24395
|
+
function mapResponseFormatToAgentOutputType(responseFormat) {
|
|
24396
|
+
if (!responseFormat) {
|
|
24397
|
+
return undefined;
|
|
24398
|
+
}
|
|
24399
|
+
if (typeof responseFormat === 'string') {
|
|
24400
|
+
if (responseFormat === 'text') {
|
|
24401
|
+
return 'text';
|
|
24402
|
+
}
|
|
24403
|
+
if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
|
|
24404
|
+
return buildJsonSchemaDefinition();
|
|
24405
|
+
}
|
|
24406
|
+
return 'text';
|
|
24407
|
+
}
|
|
24408
|
+
switch (responseFormat.type) {
|
|
24409
|
+
case 'text':
|
|
24410
|
+
return 'text';
|
|
24411
|
+
case 'json_schema':
|
|
24412
|
+
return buildJsonSchemaDefinition(responseFormat.json_schema);
|
|
24413
|
+
case 'json_object':
|
|
24414
|
+
return buildJsonSchemaDefinition();
|
|
24415
|
+
default:
|
|
24416
|
+
return undefined;
|
|
24417
|
+
}
|
|
24418
|
+
}
|
|
24419
|
+
/**
|
|
24420
|
+
* Execution tools for OpenAI AgentKit (Agents SDK).
|
|
24421
|
+
*
|
|
24422
|
+
* @public exported from `@promptbook/openai`
|
|
24423
|
+
*/
|
|
24424
|
+
class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
|
|
24425
|
+
/**
|
|
24426
|
+
* Creates OpenAI AgentKit execution tools.
|
|
24427
|
+
*/
|
|
24428
|
+
constructor(options) {
|
|
24429
|
+
var _a;
|
|
24430
|
+
if (options.isProxied) {
|
|
24431
|
+
throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI AgentKit`);
|
|
24432
|
+
}
|
|
24433
|
+
super(options);
|
|
24434
|
+
this.preparedAgentKitAgent = null;
|
|
24435
|
+
this.agentKitModelName = (_a = options.agentKitModelName) !== null && _a !== void 0 ? _a : DEFAULT_AGENT_KIT_MODEL_NAME;
|
|
24436
|
+
}
|
|
24437
|
+
get title() {
|
|
24438
|
+
return 'OpenAI AgentKit';
|
|
24439
|
+
}
|
|
24440
|
+
get description() {
|
|
24441
|
+
return 'Use OpenAI AgentKit for agent-style chat with tools and knowledge';
|
|
24442
|
+
}
|
|
24443
|
+
/**
|
|
24444
|
+
* Calls OpenAI AgentKit with a chat prompt (non-streaming).
|
|
24445
|
+
*/
|
|
24446
|
+
async callChatModel(prompt) {
|
|
24447
|
+
return this.callChatModelStream(prompt, () => { });
|
|
24448
|
+
}
|
|
24449
|
+
/**
|
|
24450
|
+
* Calls OpenAI AgentKit with a chat prompt (streaming).
|
|
24451
|
+
*/
|
|
24452
|
+
async callChatModelStream(prompt, onProgress) {
|
|
24453
|
+
const { content, parameters, modelRequirements } = prompt;
|
|
24454
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
24455
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
24456
|
+
}
|
|
24457
|
+
for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
|
|
24458
|
+
if (modelRequirements[key] !== undefined) {
|
|
24459
|
+
throw new NotYetImplementedError(`In \`OpenAiAgentKitExecutionTools\` you cannot specify \`${key}\``);
|
|
24460
|
+
}
|
|
24461
|
+
}
|
|
24462
|
+
const rawPromptContent = templateParameters(content, {
|
|
24463
|
+
...parameters,
|
|
24464
|
+
modelName: this.agentKitModelName,
|
|
24465
|
+
});
|
|
24466
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
|
|
24467
|
+
const preparedAgentKitAgent = await this.prepareAgentKitAgent({
|
|
24468
|
+
name: (prompt.title || 'Agent'),
|
|
24469
|
+
instructions: modelRequirements.systemMessage || '',
|
|
24470
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
24471
|
+
tools: 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : modelRequirements.tools,
|
|
24472
|
+
});
|
|
24473
|
+
return this.callChatModelStreamWithPreparedAgent({
|
|
24474
|
+
openAiAgentKitAgent: preparedAgentKitAgent.agent,
|
|
24475
|
+
prompt,
|
|
24476
|
+
rawPromptContent,
|
|
24477
|
+
onProgress,
|
|
24478
|
+
responseFormatOutputType,
|
|
24479
|
+
});
|
|
24480
|
+
}
|
|
24481
|
+
/**
|
|
24482
|
+
* Returns a prepared AgentKit agent when the server wants to manage caching externally.
|
|
24483
|
+
*/
|
|
24484
|
+
getPreparedAgentKitAgent() {
|
|
24485
|
+
return this.preparedAgentKitAgent;
|
|
24486
|
+
}
|
|
24487
|
+
/**
|
|
24488
|
+
* Stores a prepared AgentKit agent for later reuse by external cache managers.
|
|
24489
|
+
*/
|
|
24490
|
+
setPreparedAgentKitAgent(preparedAgent) {
|
|
24491
|
+
this.preparedAgentKitAgent = preparedAgent;
|
|
24492
|
+
}
|
|
24493
|
+
/**
|
|
24494
|
+
* Creates a new tools instance bound to a prepared AgentKit agent.
|
|
24495
|
+
*/
|
|
24496
|
+
getPreparedAgentTools(preparedAgent) {
|
|
24497
|
+
const tools = new OpenAiAgentKitExecutionTools(this.agentKitOptions);
|
|
24498
|
+
tools.setPreparedAgentKitAgent(preparedAgent);
|
|
24499
|
+
return tools;
|
|
24500
|
+
}
|
|
24501
|
+
/**
|
|
24502
|
+
* Prepares an AgentKit agent with optional knowledge sources and tool definitions.
|
|
24503
|
+
*/
|
|
24504
|
+
async prepareAgentKitAgent(options) {
|
|
24505
|
+
var _a, _b;
|
|
24506
|
+
const { name, instructions, knowledgeSources, tools, vectorStoreId: cachedVectorStoreId, storeAsPrepared, } = options;
|
|
24507
|
+
await this.ensureAgentKitDefaults();
|
|
24508
|
+
if (this.options.isVerbose) {
|
|
24509
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
24510
|
+
name,
|
|
24511
|
+
instructionsLength: instructions.length,
|
|
24512
|
+
knowledgeSourcesCount: (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0,
|
|
24513
|
+
toolsCount: (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0,
|
|
24514
|
+
});
|
|
24515
|
+
}
|
|
24516
|
+
let vectorStoreId = cachedVectorStoreId;
|
|
24517
|
+
if (!vectorStoreId && knowledgeSources && knowledgeSources.length > 0) {
|
|
24518
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
24519
|
+
client: await this.getClient(),
|
|
24520
|
+
name,
|
|
24521
|
+
knowledgeSources,
|
|
24522
|
+
logLabel: 'agentkit preparation',
|
|
24523
|
+
});
|
|
24524
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
24525
|
+
}
|
|
24526
|
+
else if (vectorStoreId && this.options.isVerbose) {
|
|
24527
|
+
console.info('[🤰]', 'Using cached vector store for AgentKit agent', {
|
|
24528
|
+
name,
|
|
24529
|
+
vectorStoreId,
|
|
24530
|
+
});
|
|
24531
|
+
}
|
|
24532
|
+
const agentKitTools = this.buildAgentKitTools({ tools, vectorStoreId });
|
|
24533
|
+
const openAiAgentKitAgent = new Agent$1({
|
|
24534
|
+
name,
|
|
24535
|
+
model: this.agentKitModelName,
|
|
24536
|
+
instructions: instructions || 'You are a helpful assistant.',
|
|
24537
|
+
tools: agentKitTools,
|
|
24538
|
+
});
|
|
24539
|
+
const preparedAgent = {
|
|
24540
|
+
agent: openAiAgentKitAgent,
|
|
24541
|
+
vectorStoreId,
|
|
24542
|
+
};
|
|
24543
|
+
if (storeAsPrepared) {
|
|
24544
|
+
this.setPreparedAgentKitAgent(preparedAgent);
|
|
24545
|
+
}
|
|
24546
|
+
if (this.options.isVerbose) {
|
|
24547
|
+
console.info('[🤰]', 'OpenAI AgentKit agent ready', {
|
|
24548
|
+
name,
|
|
24549
|
+
model: this.agentKitModelName,
|
|
24550
|
+
toolCount: agentKitTools.length,
|
|
24551
|
+
hasVectorStore: Boolean(vectorStoreId),
|
|
24552
|
+
});
|
|
24553
|
+
}
|
|
24554
|
+
return preparedAgent;
|
|
24555
|
+
}
|
|
24556
|
+
/**
|
|
24557
|
+
* Ensures the AgentKit SDK is wired to the OpenAI client and API key.
|
|
24558
|
+
*/
|
|
24559
|
+
async ensureAgentKitDefaults() {
|
|
24560
|
+
const client = await this.getClient();
|
|
24561
|
+
setDefaultOpenAIClient(client);
|
|
24562
|
+
const apiKey = this.agentKitOptions.apiKey;
|
|
24563
|
+
if (apiKey && typeof apiKey === 'string') {
|
|
24564
|
+
setDefaultOpenAIKey(apiKey);
|
|
24565
|
+
}
|
|
24566
|
+
}
|
|
24567
|
+
/**
|
|
24568
|
+
* Builds the tool list for AgentKit, including hosted file search when applicable.
|
|
24569
|
+
*/
|
|
24570
|
+
buildAgentKitTools(options) {
|
|
24571
|
+
var _a;
|
|
24572
|
+
const { tools, vectorStoreId } = options;
|
|
24573
|
+
const agentKitTools = [];
|
|
24574
|
+
if (vectorStoreId) {
|
|
24575
|
+
agentKitTools.push(fileSearchTool(vectorStoreId));
|
|
24576
|
+
}
|
|
24577
|
+
if (tools && tools.length > 0) {
|
|
24578
|
+
const scriptTools = this.resolveScriptTools();
|
|
24579
|
+
for (const toolDefinition of tools) {
|
|
24580
|
+
agentKitTools.push(tool({
|
|
24581
|
+
name: toolDefinition.name,
|
|
24582
|
+
description: toolDefinition.description,
|
|
24583
|
+
parameters: toolDefinition.parameters
|
|
24584
|
+
? {
|
|
24585
|
+
...toolDefinition.parameters,
|
|
24586
|
+
additionalProperties: false,
|
|
24587
|
+
required: (_a = toolDefinition.parameters.required) !== null && _a !== void 0 ? _a : [],
|
|
24588
|
+
}
|
|
24589
|
+
: undefined,
|
|
24590
|
+
strict: false,
|
|
24591
|
+
execute: async (input, runContext, details) => {
|
|
24592
|
+
var _a, _b, _c;
|
|
24593
|
+
const scriptTool = scriptTools[0];
|
|
24594
|
+
const functionName = toolDefinition.name;
|
|
24595
|
+
const calledAt = $getCurrentDate();
|
|
24596
|
+
const callId = (_a = details === null || details === void 0 ? void 0 : details.toolCall) === null || _a === void 0 ? void 0 : _a.callId;
|
|
24597
|
+
const functionArgs = input !== null && input !== void 0 ? input : {};
|
|
24598
|
+
if (this.options.isVerbose) {
|
|
24599
|
+
console.info('[🤰]', 'Executing AgentKit tool', {
|
|
24600
|
+
functionName,
|
|
24601
|
+
callId,
|
|
24602
|
+
calledAt,
|
|
24603
|
+
});
|
|
24604
|
+
}
|
|
24605
|
+
try {
|
|
24606
|
+
return await scriptTool.execute({
|
|
24607
|
+
scriptLanguage: 'javascript',
|
|
24608
|
+
script: `
|
|
24609
|
+
const args = ${JSON.stringify(functionArgs)};
|
|
24610
|
+
return await ${functionName}(args);
|
|
24611
|
+
`,
|
|
24612
|
+
parameters: (_c = (_b = runContext === null || runContext === void 0 ? void 0 : runContext.context) === null || _b === void 0 ? void 0 : _b.parameters) !== null && _c !== void 0 ? _c : {},
|
|
24613
|
+
});
|
|
24614
|
+
}
|
|
24615
|
+
catch (error) {
|
|
24616
|
+
assertsError(error);
|
|
24617
|
+
const serializedError = serializeError(error);
|
|
24618
|
+
const errorMessage = spaceTrim$2((block) => `
|
|
24619
|
+
|
|
24620
|
+
The invoked tool \`${functionName}\` failed with error:
|
|
24621
|
+
|
|
24622
|
+
\`\`\`json
|
|
24623
|
+
${block(JSON.stringify(serializedError, null, 4))}
|
|
24624
|
+
\`\`\`
|
|
24625
|
+
|
|
24626
|
+
`);
|
|
24627
|
+
console.error('[🤰]', 'AgentKit tool execution failed', {
|
|
24628
|
+
functionName,
|
|
24629
|
+
callId,
|
|
24630
|
+
error: serializedError,
|
|
24631
|
+
});
|
|
24632
|
+
return errorMessage;
|
|
24633
|
+
}
|
|
24634
|
+
},
|
|
24635
|
+
}));
|
|
24636
|
+
}
|
|
23358
24637
|
}
|
|
23359
|
-
|
|
23360
|
-
|
|
23361
|
-
|
|
23362
|
-
|
|
23363
|
-
|
|
23364
|
-
|
|
23365
|
-
|
|
23366
|
-
|
|
24638
|
+
return agentKitTools;
|
|
24639
|
+
}
|
|
24640
|
+
/**
|
|
24641
|
+
* Resolves the configured script tools for tool execution.
|
|
24642
|
+
*/
|
|
24643
|
+
resolveScriptTools() {
|
|
24644
|
+
const executionTools = this.options.executionTools;
|
|
24645
|
+
if (!executionTools || !executionTools.script) {
|
|
24646
|
+
throw new PipelineExecutionError(`Model requested tools but no executionTools.script were provided in OpenAiAgentKitExecutionTools options`);
|
|
23367
24647
|
}
|
|
24648
|
+
return Array.isArray(executionTools.script) ? executionTools.script : [executionTools.script];
|
|
24649
|
+
}
|
|
24650
|
+
/**
|
|
24651
|
+
* Runs a prepared AgentKit agent and streams results back to the caller.
|
|
24652
|
+
*/
|
|
24653
|
+
async callChatModelStreamWithPreparedAgent(options) {
|
|
24654
|
+
var _a, _b, _c, _d;
|
|
24655
|
+
const { openAiAgentKitAgent, prompt, onProgress } = options;
|
|
24656
|
+
const rawPromptContent = (_a = options.rawPromptContent) !== null && _a !== void 0 ? _a : templateParameters(prompt.content, {
|
|
24657
|
+
...prompt.parameters,
|
|
24658
|
+
modelName: this.agentKitModelName,
|
|
24659
|
+
});
|
|
24660
|
+
const agentForRun = options.responseFormatOutputType !== undefined
|
|
24661
|
+
? openAiAgentKitAgent.clone({
|
|
24662
|
+
outputType: options.responseFormatOutputType,
|
|
24663
|
+
})
|
|
24664
|
+
: openAiAgentKitAgent;
|
|
23368
24665
|
const start = $getCurrentDate();
|
|
23369
|
-
|
|
24666
|
+
let latestContent = '';
|
|
24667
|
+
const toolCalls = [];
|
|
24668
|
+
const toolCallIndexById = new Map();
|
|
24669
|
+
const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
|
|
23370
24670
|
const rawRequest = {
|
|
23371
|
-
|
|
23372
|
-
|
|
23373
|
-
input,
|
|
23374
|
-
instructions: modelRequirements.systemMessage,
|
|
23375
|
-
tools: agentTools.length > 0 ? agentTools : undefined,
|
|
23376
|
-
tool_resources: toolResources,
|
|
23377
|
-
store: false, // Stateless by default as we pass full history
|
|
24671
|
+
agentName: agentForRun.name,
|
|
24672
|
+
input: inputItems,
|
|
23378
24673
|
};
|
|
23379
|
-
|
|
23380
|
-
|
|
23381
|
-
|
|
23382
|
-
|
|
23383
|
-
|
|
23384
|
-
|
|
23385
|
-
|
|
23386
|
-
|
|
23387
|
-
|
|
23388
|
-
|
|
23389
|
-
|
|
23390
|
-
|
|
23391
|
-
|
|
23392
|
-
|
|
23393
|
-
|
|
23394
|
-
|
|
23395
|
-
|
|
23396
|
-
|
|
23397
|
-
|
|
23398
|
-
|
|
23399
|
-
|
|
23400
|
-
|
|
23401
|
-
|
|
23402
|
-
|
|
24674
|
+
const streamResult = await run(agentForRun, inputItems, {
|
|
24675
|
+
stream: true,
|
|
24676
|
+
context: { parameters: prompt.parameters },
|
|
24677
|
+
});
|
|
24678
|
+
for await (const event of streamResult) {
|
|
24679
|
+
if (event.type === 'raw_model_stream_event' && ((_b = event.data) === null || _b === void 0 ? void 0 : _b.type) === 'output_text_delta') {
|
|
24680
|
+
latestContent += event.data.delta;
|
|
24681
|
+
onProgress({
|
|
24682
|
+
content: latestContent,
|
|
24683
|
+
modelName: this.agentKitModelName,
|
|
24684
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24685
|
+
usage: UNCERTAIN_USAGE,
|
|
24686
|
+
rawPromptContent: rawPromptContent,
|
|
24687
|
+
rawRequest: null,
|
|
24688
|
+
rawResponse: {},
|
|
24689
|
+
});
|
|
24690
|
+
continue;
|
|
24691
|
+
}
|
|
24692
|
+
if (event.type === 'run_item_stream_event') {
|
|
24693
|
+
const rawItem = (_c = event.item) === null || _c === void 0 ? void 0 : _c.rawItem;
|
|
24694
|
+
if (event.name === 'tool_called' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call') {
|
|
24695
|
+
const toolCall = {
|
|
24696
|
+
name: rawItem.name,
|
|
24697
|
+
arguments: rawItem.arguments,
|
|
24698
|
+
rawToolCall: rawItem,
|
|
24699
|
+
createdAt: $getCurrentDate(),
|
|
24700
|
+
};
|
|
24701
|
+
toolCallIndexById.set(rawItem.callId, toolCalls.length);
|
|
24702
|
+
toolCalls.push(toolCall);
|
|
24703
|
+
onProgress({
|
|
24704
|
+
content: latestContent,
|
|
24705
|
+
modelName: this.agentKitModelName,
|
|
24706
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24707
|
+
usage: UNCERTAIN_USAGE,
|
|
24708
|
+
rawPromptContent: rawPromptContent,
|
|
24709
|
+
rawRequest: null,
|
|
24710
|
+
rawResponse: {},
|
|
24711
|
+
toolCalls: [toolCall],
|
|
24712
|
+
});
|
|
24713
|
+
}
|
|
24714
|
+
if (event.name === 'tool_output' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call_result') {
|
|
24715
|
+
const index = toolCallIndexById.get(rawItem.callId);
|
|
24716
|
+
const result = this.formatAgentKitToolOutput(rawItem.output);
|
|
24717
|
+
if (index !== undefined) {
|
|
24718
|
+
const existingToolCall = toolCalls[index];
|
|
24719
|
+
const completedToolCall = {
|
|
24720
|
+
...existingToolCall,
|
|
24721
|
+
result,
|
|
24722
|
+
rawToolCall: rawItem,
|
|
24723
|
+
};
|
|
24724
|
+
toolCalls[index] = completedToolCall;
|
|
24725
|
+
onProgress({
|
|
24726
|
+
content: latestContent,
|
|
24727
|
+
modelName: this.agentKitModelName,
|
|
24728
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24729
|
+
usage: UNCERTAIN_USAGE,
|
|
24730
|
+
rawPromptContent: rawPromptContent,
|
|
24731
|
+
rawRequest: null,
|
|
24732
|
+
rawResponse: {},
|
|
24733
|
+
toolCalls: [completedToolCall],
|
|
24734
|
+
});
|
|
23403
24735
|
}
|
|
23404
24736
|
}
|
|
23405
|
-
else if (item.type === 'function_call') ;
|
|
23406
24737
|
}
|
|
23407
24738
|
}
|
|
23408
|
-
|
|
23409
|
-
|
|
23410
|
-
|
|
23411
|
-
|
|
23412
|
-
|
|
23413
|
-
|
|
23414
|
-
content: resultContent,
|
|
23415
|
-
modelName: response.model || 'agent',
|
|
24739
|
+
await streamResult.completed;
|
|
24740
|
+
const complete = $getCurrentDate();
|
|
24741
|
+
const finalContent = ((_d = streamResult.finalOutput) !== null && _d !== void 0 ? _d : latestContent);
|
|
24742
|
+
const finalResult = {
|
|
24743
|
+
content: finalContent,
|
|
24744
|
+
modelName: this.agentKitModelName,
|
|
23416
24745
|
timing: { start, complete },
|
|
23417
24746
|
usage: UNCERTAIN_USAGE,
|
|
23418
|
-
rawPromptContent,
|
|
24747
|
+
rawPromptContent: rawPromptContent,
|
|
23419
24748
|
rawRequest,
|
|
23420
|
-
rawResponse:
|
|
23421
|
-
|
|
23422
|
-
|
|
23423
|
-
|
|
23424
|
-
|
|
23425
|
-
order: [],
|
|
23426
|
-
value: {
|
|
23427
|
-
content: resultContent,
|
|
23428
|
-
modelName: response.model || 'agent',
|
|
23429
|
-
timing: { start, complete },
|
|
23430
|
-
usage: UNCERTAIN_USAGE,
|
|
23431
|
-
rawPromptContent,
|
|
23432
|
-
rawRequest,
|
|
23433
|
-
rawResponse: response,
|
|
23434
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
23435
|
-
},
|
|
23436
|
-
});
|
|
24749
|
+
rawResponse: { runResult: streamResult },
|
|
24750
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
24751
|
+
};
|
|
24752
|
+
onProgress(finalResult);
|
|
24753
|
+
return finalResult;
|
|
23437
24754
|
}
|
|
23438
24755
|
/**
|
|
23439
|
-
*
|
|
24756
|
+
* Builds AgentKit input items from the prompt and optional thread.
|
|
23440
24757
|
*/
|
|
23441
|
-
|
|
23442
|
-
|
|
23443
|
-
const
|
|
23444
|
-
|
|
23445
|
-
|
|
23446
|
-
|
|
23447
|
-
|
|
23448
|
-
|
|
23449
|
-
|
|
23450
|
-
|
|
23451
|
-
|
|
23452
|
-
|
|
23453
|
-
|
|
23454
|
-
const response = await fetch(source);
|
|
23455
|
-
if (!response.ok) {
|
|
23456
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
23457
|
-
continue;
|
|
23458
|
-
}
|
|
23459
|
-
const buffer = await response.arrayBuffer();
|
|
23460
|
-
const filename = source.split('/').pop() || 'downloaded-file';
|
|
23461
|
-
const blob = new Blob([buffer]);
|
|
23462
|
-
const file = new File([blob], filename);
|
|
23463
|
-
fileStreams.push(file);
|
|
24758
|
+
async buildAgentKitInputItems(prompt, rawPromptContent) {
|
|
24759
|
+
var _a;
|
|
24760
|
+
const inputItems = [];
|
|
24761
|
+
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
|
24762
|
+
for (const message of prompt.thread) {
|
|
24763
|
+
const sender = message.sender;
|
|
24764
|
+
const content = (_a = message.content) !== null && _a !== void 0 ? _a : '';
|
|
24765
|
+
if (sender === 'assistant' || sender === 'agent') {
|
|
24766
|
+
inputItems.push({
|
|
24767
|
+
role: 'assistant',
|
|
24768
|
+
status: 'completed',
|
|
24769
|
+
content: [{ type: 'output_text', text: content }],
|
|
24770
|
+
});
|
|
23464
24771
|
}
|
|
23465
24772
|
else {
|
|
23466
|
-
|
|
24773
|
+
inputItems.push({
|
|
24774
|
+
role: 'user',
|
|
24775
|
+
content,
|
|
24776
|
+
});
|
|
23467
24777
|
}
|
|
23468
24778
|
}
|
|
23469
|
-
catch (error) {
|
|
23470
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
23471
|
-
}
|
|
23472
24779
|
}
|
|
23473
|
-
|
|
23474
|
-
|
|
23475
|
-
|
|
23476
|
-
|
|
23477
|
-
|
|
23478
|
-
|
|
23479
|
-
|
|
23480
|
-
|
|
23481
|
-
|
|
24780
|
+
const userContent = await this.buildAgentKitUserContent(prompt, rawPromptContent);
|
|
24781
|
+
inputItems.push({
|
|
24782
|
+
role: 'user',
|
|
24783
|
+
content: userContent,
|
|
24784
|
+
});
|
|
24785
|
+
return inputItems;
|
|
24786
|
+
}
|
|
24787
|
+
/**
|
|
24788
|
+
* Builds the user message content for AgentKit runs, including file inputs when provided.
|
|
24789
|
+
*/
|
|
24790
|
+
async buildAgentKitUserContent(prompt, rawPromptContent) {
|
|
24791
|
+
if ('files' in prompt && Array.isArray(prompt.files) && prompt.files.length > 0) {
|
|
24792
|
+
const fileItems = await Promise.all(prompt.files.map(async (file) => {
|
|
24793
|
+
const arrayBuffer = await file.arrayBuffer();
|
|
24794
|
+
const base64 = Buffer.from(arrayBuffer).toString('base64');
|
|
24795
|
+
return {
|
|
24796
|
+
type: 'input_image',
|
|
24797
|
+
image: `data:${file.type};base64,${base64}`,
|
|
24798
|
+
};
|
|
24799
|
+
}));
|
|
24800
|
+
return [{ type: 'input_text', text: rawPromptContent }, ...fileItems];
|
|
24801
|
+
}
|
|
24802
|
+
return rawPromptContent;
|
|
24803
|
+
}
|
|
24804
|
+
/**
|
|
24805
|
+
* Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
|
|
24806
|
+
*/
|
|
24807
|
+
formatAgentKitToolOutput(output) {
|
|
24808
|
+
if (typeof output === 'string') {
|
|
24809
|
+
return output;
|
|
24810
|
+
}
|
|
24811
|
+
if (output && typeof output === 'object') {
|
|
24812
|
+
const textOutput = output;
|
|
24813
|
+
if (textOutput.type === 'text' && typeof textOutput.text === 'string') {
|
|
24814
|
+
return textOutput.text;
|
|
23482
24815
|
}
|
|
23483
24816
|
}
|
|
23484
|
-
return
|
|
24817
|
+
return JSON.stringify(output !== null && output !== void 0 ? output : null);
|
|
23485
24818
|
}
|
|
23486
24819
|
/**
|
|
23487
|
-
*
|
|
24820
|
+
* Returns AgentKit-specific options.
|
|
24821
|
+
*/
|
|
24822
|
+
get agentKitOptions() {
|
|
24823
|
+
return this.options;
|
|
24824
|
+
}
|
|
24825
|
+
/**
|
|
24826
|
+
* Discriminant for type guards.
|
|
23488
24827
|
*/
|
|
23489
24828
|
get discriminant() {
|
|
23490
|
-
return
|
|
24829
|
+
return DISCRIMINANT$1;
|
|
23491
24830
|
}
|
|
23492
24831
|
/**
|
|
23493
|
-
* Type guard to check if given `LlmExecutionTools` are instanceof `
|
|
24832
|
+
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
|
|
23494
24833
|
*/
|
|
23495
|
-
static
|
|
23496
|
-
return llmExecutionTools.discriminant ===
|
|
24834
|
+
static isOpenAiAgentKitExecutionTools(llmExecutionTools) {
|
|
24835
|
+
return llmExecutionTools.discriminant === DISCRIMINANT$1;
|
|
23497
24836
|
}
|
|
23498
24837
|
}
|
|
24838
|
+
/**
|
|
24839
|
+
* Discriminant for type guards.
|
|
24840
|
+
*
|
|
24841
|
+
* @private const of `OpenAiAgentKitExecutionTools`
|
|
24842
|
+
*/
|
|
24843
|
+
const DISCRIMINANT$1 = 'OPEN_AI_AGENT_KIT_V1';
|
|
23499
24844
|
|
|
23500
24845
|
/**
|
|
23501
24846
|
* Uploads files to OpenAI and returns their IDs
|
|
@@ -23530,10 +24875,10 @@ async function uploadFilesToOpenAi(client, files) {
|
|
|
23530
24875
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
23531
24876
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
23532
24877
|
*
|
|
24878
|
+
* @deprecated Use `OpenAiAgentKitExecutionTools` instead.
|
|
23533
24879
|
* @public exported from `@promptbook/openai`
|
|
23534
|
-
* @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
|
|
23535
24880
|
*/
|
|
23536
|
-
class OpenAiAssistantExecutionTools extends
|
|
24881
|
+
class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
|
|
23537
24882
|
/**
|
|
23538
24883
|
* Creates OpenAI Execution Tools.
|
|
23539
24884
|
*
|
|
@@ -23662,8 +25007,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23662
25007
|
console.info(colors.bgWhite('rawRequest (non-streaming with tools)'), JSON.stringify(rawRequest, null, 4));
|
|
23663
25008
|
}
|
|
23664
25009
|
// Create thread and run
|
|
23665
|
-
|
|
23666
|
-
let run = threadAndRun;
|
|
25010
|
+
let run = (await client.beta.threads.createAndRun(rawRequest));
|
|
23667
25011
|
const completedToolCalls = [];
|
|
23668
25012
|
const toolCallStartedAt = new Map();
|
|
23669
25013
|
// Poll until run completes or requires action
|
|
@@ -23758,14 +25102,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23758
25102
|
}
|
|
23759
25103
|
}
|
|
23760
25104
|
// Submit tool outputs
|
|
23761
|
-
run = await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
25105
|
+
run = (await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
23762
25106
|
tool_outputs: toolOutputs,
|
|
23763
|
-
});
|
|
25107
|
+
}));
|
|
23764
25108
|
}
|
|
23765
25109
|
else {
|
|
23766
25110
|
// Wait a bit before polling again
|
|
23767
25111
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
23768
|
-
run = await client.beta.threads.runs.retrieve(run.thread_id, run.id);
|
|
25112
|
+
run = (await client.beta.threads.runs.retrieve(run.thread_id, run.id));
|
|
23769
25113
|
}
|
|
23770
25114
|
}
|
|
23771
25115
|
if (run.status !== 'completed') {
|
|
@@ -23964,6 +25308,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23964
25308
|
getAssistant(assistantId) {
|
|
23965
25309
|
return new OpenAiAssistantExecutionTools({
|
|
23966
25310
|
...this.options,
|
|
25311
|
+
isCreatingNewAssistantsAllowed: this.isCreatingNewAssistantsAllowed,
|
|
23967
25312
|
assistantId,
|
|
23968
25313
|
});
|
|
23969
25314
|
}
|
|
@@ -23989,88 +25334,13 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23989
25334
|
let vectorStoreId;
|
|
23990
25335
|
// If knowledge sources are provided, create a vector store with them
|
|
23991
25336
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
23992
|
-
|
|
23993
|
-
|
|
23994
|
-
|
|
23995
|
-
|
|
23996
|
-
|
|
23997
|
-
}
|
|
23998
|
-
// Create a vector store
|
|
23999
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
24000
|
-
name: `${name} Knowledge Base`,
|
|
25337
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
25338
|
+
client,
|
|
25339
|
+
name,
|
|
25340
|
+
knowledgeSources,
|
|
25341
|
+
logLabel: 'assistant creation',
|
|
24001
25342
|
});
|
|
24002
|
-
vectorStoreId =
|
|
24003
|
-
if (this.options.isVerbose) {
|
|
24004
|
-
console.info('[🤰]', 'Vector store created', {
|
|
24005
|
-
vectorStoreId,
|
|
24006
|
-
});
|
|
24007
|
-
}
|
|
24008
|
-
// Upload files from knowledge sources to the vector store
|
|
24009
|
-
const fileStreams = [];
|
|
24010
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
24011
|
-
try {
|
|
24012
|
-
if (this.options.isVerbose) {
|
|
24013
|
-
console.info('[🤰]', 'Processing knowledge source', {
|
|
24014
|
-
index: index + 1,
|
|
24015
|
-
total: knowledgeSources.length,
|
|
24016
|
-
source,
|
|
24017
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
24018
|
-
});
|
|
24019
|
-
}
|
|
24020
|
-
// Check if it's a URL
|
|
24021
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
24022
|
-
// Download the file
|
|
24023
|
-
const response = await fetch(source);
|
|
24024
|
-
if (!response.ok) {
|
|
24025
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
24026
|
-
continue;
|
|
24027
|
-
}
|
|
24028
|
-
const buffer = await response.arrayBuffer();
|
|
24029
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
24030
|
-
try {
|
|
24031
|
-
const url = new URL(source);
|
|
24032
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
24033
|
-
}
|
|
24034
|
-
catch (error) {
|
|
24035
|
-
// Keep default filename
|
|
24036
|
-
}
|
|
24037
|
-
const blob = new Blob([buffer]);
|
|
24038
|
-
const file = new File([blob], filename);
|
|
24039
|
-
fileStreams.push(file);
|
|
24040
|
-
}
|
|
24041
|
-
else {
|
|
24042
|
-
/*
|
|
24043
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
24044
|
-
// Assume it's a local file path
|
|
24045
|
-
// Note: This will work in Node.js environment
|
|
24046
|
-
// For browser environments, this would need different handling
|
|
24047
|
-
const fs = await import('fs');
|
|
24048
|
-
const fileStream = fs.createReadStream(source);
|
|
24049
|
-
fileStreams.push(fileStream);
|
|
24050
|
-
*/
|
|
24051
|
-
}
|
|
24052
|
-
}
|
|
24053
|
-
catch (error) {
|
|
24054
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
24055
|
-
}
|
|
24056
|
-
}
|
|
24057
|
-
// Batch upload files to the vector store
|
|
24058
|
-
if (fileStreams.length > 0) {
|
|
24059
|
-
try {
|
|
24060
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
24061
|
-
files: fileStreams,
|
|
24062
|
-
});
|
|
24063
|
-
if (this.options.isVerbose) {
|
|
24064
|
-
console.info('[🤰]', 'Uploaded files to vector store', {
|
|
24065
|
-
vectorStoreId,
|
|
24066
|
-
fileCount: fileStreams.length,
|
|
24067
|
-
});
|
|
24068
|
-
}
|
|
24069
|
-
}
|
|
24070
|
-
catch (error) {
|
|
24071
|
-
console.error('Error uploading files to vector store:', error);
|
|
24072
|
-
}
|
|
24073
|
-
}
|
|
25343
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
24074
25344
|
}
|
|
24075
25345
|
// Create assistant with vector store attached
|
|
24076
25346
|
const assistantConfig = {
|
|
@@ -24137,91 +25407,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
24137
25407
|
const client = await this.getClient();
|
|
24138
25408
|
let vectorStoreId;
|
|
24139
25409
|
// If knowledge sources are provided, create a vector store with them
|
|
24140
|
-
// TODO: [🧠] Reuse vector store creation logic from createNewAssistant
|
|
24141
25410
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
24142
|
-
|
|
24143
|
-
|
|
24144
|
-
|
|
24145
|
-
|
|
24146
|
-
|
|
24147
|
-
});
|
|
24148
|
-
}
|
|
24149
|
-
// Create a vector store
|
|
24150
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
24151
|
-
name: `${name} Knowledge Base`,
|
|
25411
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
25412
|
+
client,
|
|
25413
|
+
name: name !== null && name !== void 0 ? name : assistantId,
|
|
25414
|
+
knowledgeSources,
|
|
25415
|
+
logLabel: 'assistant update',
|
|
24152
25416
|
});
|
|
24153
|
-
vectorStoreId =
|
|
24154
|
-
if (this.options.isVerbose) {
|
|
24155
|
-
console.info('[🤰]', 'Vector store created for assistant update', {
|
|
24156
|
-
vectorStoreId,
|
|
24157
|
-
});
|
|
24158
|
-
}
|
|
24159
|
-
// Upload files from knowledge sources to the vector store
|
|
24160
|
-
const fileStreams = [];
|
|
24161
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
24162
|
-
try {
|
|
24163
|
-
if (this.options.isVerbose) {
|
|
24164
|
-
console.info('[🤰]', 'Processing knowledge source for update', {
|
|
24165
|
-
index: index + 1,
|
|
24166
|
-
total: knowledgeSources.length,
|
|
24167
|
-
source,
|
|
24168
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
24169
|
-
});
|
|
24170
|
-
}
|
|
24171
|
-
// Check if it's a URL
|
|
24172
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
24173
|
-
// Download the file
|
|
24174
|
-
const response = await fetch(source);
|
|
24175
|
-
if (!response.ok) {
|
|
24176
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
24177
|
-
continue;
|
|
24178
|
-
}
|
|
24179
|
-
const buffer = await response.arrayBuffer();
|
|
24180
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
24181
|
-
try {
|
|
24182
|
-
const url = new URL(source);
|
|
24183
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
24184
|
-
}
|
|
24185
|
-
catch (error) {
|
|
24186
|
-
// Keep default filename
|
|
24187
|
-
}
|
|
24188
|
-
const blob = new Blob([buffer]);
|
|
24189
|
-
const file = new File([blob], filename);
|
|
24190
|
-
fileStreams.push(file);
|
|
24191
|
-
}
|
|
24192
|
-
else {
|
|
24193
|
-
/*
|
|
24194
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
24195
|
-
// Assume it's a local file path
|
|
24196
|
-
// Note: This will work in Node.js environment
|
|
24197
|
-
// For browser environments, this would need different handling
|
|
24198
|
-
const fs = await import('fs');
|
|
24199
|
-
const fileStream = fs.createReadStream(source);
|
|
24200
|
-
fileStreams.push(fileStream);
|
|
24201
|
-
*/
|
|
24202
|
-
}
|
|
24203
|
-
}
|
|
24204
|
-
catch (error) {
|
|
24205
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
24206
|
-
}
|
|
24207
|
-
}
|
|
24208
|
-
// Batch upload files to the vector store
|
|
24209
|
-
if (fileStreams.length > 0) {
|
|
24210
|
-
try {
|
|
24211
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
24212
|
-
files: fileStreams,
|
|
24213
|
-
});
|
|
24214
|
-
if (this.options.isVerbose) {
|
|
24215
|
-
console.info('[🤰]', 'Uploaded files to vector store for update', {
|
|
24216
|
-
vectorStoreId,
|
|
24217
|
-
fileCount: fileStreams.length,
|
|
24218
|
-
});
|
|
24219
|
-
}
|
|
24220
|
-
}
|
|
24221
|
-
catch (error) {
|
|
24222
|
-
console.error('Error uploading files to vector store:', error);
|
|
24223
|
-
}
|
|
24224
|
-
}
|
|
25417
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
24225
25418
|
}
|
|
24226
25419
|
const assistantUpdate = {
|
|
24227
25420
|
name,
|
|
@@ -24325,8 +25518,8 @@ function emitAssistantPreparationProgress(options) {
|
|
|
24325
25518
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
24326
25519
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
24327
25520
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
24328
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
24329
25521
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
25522
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
24330
25523
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
24331
25524
|
*
|
|
24332
25525
|
* @public exported from `@promptbook/core`
|
|
@@ -24461,97 +25654,129 @@ class AgentLlmExecutionTools {
|
|
|
24461
25654
|
* Calls the chat model with agent-specific system prompt and requirements with streaming
|
|
24462
25655
|
*/
|
|
24463
25656
|
async callChatModelStream(prompt, onProgress) {
|
|
25657
|
+
var _a, _b;
|
|
24464
25658
|
// Ensure we're working with a chat prompt
|
|
24465
25659
|
if (prompt.modelRequirements.modelVariant !== 'CHAT') {
|
|
24466
25660
|
throw new Error('AgentLlmExecutionTools only supports chat prompts');
|
|
24467
25661
|
}
|
|
24468
25662
|
const modelRequirements = await this.getModelRequirements();
|
|
25663
|
+
const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
|
|
24469
25664
|
const chatPrompt = prompt;
|
|
24470
25665
|
let underlyingLlmResult;
|
|
24471
|
-
|
|
25666
|
+
const chatPromptContentWithSuffix = promptSuffix
|
|
25667
|
+
? `${chatPrompt.content}\n\n${promptSuffix}`
|
|
25668
|
+
: chatPrompt.content;
|
|
24472
25669
|
const promptWithAgentModelRequirements = {
|
|
24473
25670
|
...chatPrompt,
|
|
25671
|
+
content: chatPromptContentWithSuffix,
|
|
24474
25672
|
modelRequirements: {
|
|
24475
25673
|
...chatPrompt.modelRequirements,
|
|
24476
|
-
...
|
|
25674
|
+
...sanitizedRequirements,
|
|
24477
25675
|
// Spread tools to convert readonly array to mutable
|
|
24478
|
-
tools:
|
|
25676
|
+
tools: sanitizedRequirements.tools
|
|
25677
|
+
? [...sanitizedRequirements.tools]
|
|
25678
|
+
: chatPrompt.modelRequirements.tools,
|
|
24479
25679
|
// Spread knowledgeSources to convert readonly array to mutable
|
|
24480
|
-
knowledgeSources:
|
|
24481
|
-
? [...
|
|
25680
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources
|
|
25681
|
+
? [...sanitizedRequirements.knowledgeSources]
|
|
24482
25682
|
: undefined,
|
|
24483
25683
|
// Prepend agent system message to existing system message
|
|
24484
|
-
systemMessage:
|
|
25684
|
+
systemMessage: sanitizedRequirements.systemMessage +
|
|
24485
25685
|
(chatPrompt.modelRequirements.systemMessage
|
|
24486
25686
|
? `\n\n${chatPrompt.modelRequirements.systemMessage}`
|
|
24487
25687
|
: ''),
|
|
24488
25688
|
}, // Cast to avoid readonly mismatch from spread
|
|
24489
25689
|
};
|
|
24490
25690
|
console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
|
|
24491
|
-
if (
|
|
24492
|
-
const requirementsHash = SHA256(JSON.stringify(
|
|
24493
|
-
const
|
|
24494
|
-
|
|
24495
|
-
|
|
25691
|
+
if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
|
|
25692
|
+
const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
25693
|
+
const vectorStoreHash = SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
|
|
25694
|
+
const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
|
|
25695
|
+
const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
|
|
25696
|
+
let preparedAgentKit = this.options.assistantPreparationMode === 'external'
|
|
25697
|
+
? this.options.llmTools.getPreparedAgentKitAgent()
|
|
25698
|
+
: null;
|
|
25699
|
+
const vectorStoreId = (preparedAgentKit === null || preparedAgentKit === void 0 ? void 0 : preparedAgentKit.vectorStoreId) ||
|
|
25700
|
+
(cachedVectorStore && cachedVectorStore.requirementsHash === vectorStoreHash
|
|
25701
|
+
? cachedVectorStore.vectorStoreId
|
|
25702
|
+
: undefined);
|
|
25703
|
+
if (!preparedAgentKit && cachedAgentKit && cachedAgentKit.requirementsHash === requirementsHash) {
|
|
24496
25704
|
if (this.options.isVerbose) {
|
|
24497
|
-
console.
|
|
25705
|
+
console.info('[🤰]', 'Using cached OpenAI AgentKit agent', {
|
|
25706
|
+
agent: this.title,
|
|
25707
|
+
});
|
|
24498
25708
|
}
|
|
24499
|
-
|
|
24500
|
-
|
|
24501
|
-
|
|
24502
|
-
|
|
24503
|
-
// We can cast to access options if they were public, or use a method to clone.
|
|
24504
|
-
// OpenAiAgentExecutionTools doesn't have a clone method.
|
|
24505
|
-
// However, we can just assume the passed tool *might* not have the vector store yet, or we are replacing it.
|
|
24506
|
-
// Actually, if the passed tool IS OpenAiAgentExecutionTools, we should use it as a base.
|
|
24507
|
-
// TODO: [🧠] This is a bit hacky, accessing protected options or recreating tools.
|
|
24508
|
-
// Ideally OpenAiAgentExecutionTools should have a method `withVectorStoreId`.
|
|
24509
|
-
agentTools = new OpenAiAgentExecutionTools({
|
|
24510
|
-
...this.options.llmTools.options,
|
|
24511
|
-
vectorStoreId: cached.vectorStoreId,
|
|
24512
|
-
});
|
|
25709
|
+
preparedAgentKit = {
|
|
25710
|
+
agent: cachedAgentKit.agent,
|
|
25711
|
+
vectorStoreId: cachedAgentKit.vectorStoreId,
|
|
25712
|
+
};
|
|
24513
25713
|
}
|
|
24514
|
-
|
|
25714
|
+
if (!preparedAgentKit) {
|
|
24515
25715
|
if (this.options.isVerbose) {
|
|
24516
|
-
console.
|
|
24517
|
-
|
|
24518
|
-
|
|
24519
|
-
if (modelRequirements.knowledgeSources && modelRequirements.knowledgeSources.length > 0) {
|
|
24520
|
-
const client = await this.options.llmTools.getClient();
|
|
24521
|
-
vectorStoreId = await OpenAiAgentExecutionTools.createVectorStore(client, this.title, modelRequirements.knowledgeSources);
|
|
25716
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
25717
|
+
agent: this.title,
|
|
25718
|
+
});
|
|
24522
25719
|
}
|
|
24523
|
-
if (vectorStoreId) {
|
|
24524
|
-
|
|
24525
|
-
|
|
24526
|
-
|
|
25720
|
+
if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
|
|
25721
|
+
emitAssistantPreparationProgress({
|
|
25722
|
+
onProgress,
|
|
25723
|
+
prompt,
|
|
25724
|
+
modelName: this.modelName,
|
|
25725
|
+
phase: 'Creating knowledge base',
|
|
24527
25726
|
});
|
|
24528
25727
|
}
|
|
24529
|
-
|
|
24530
|
-
|
|
25728
|
+
emitAssistantPreparationProgress({
|
|
25729
|
+
onProgress,
|
|
25730
|
+
prompt,
|
|
25731
|
+
modelName: this.modelName,
|
|
25732
|
+
phase: 'Preparing AgentKit agent',
|
|
25733
|
+
});
|
|
25734
|
+
preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
|
|
25735
|
+
name: this.title,
|
|
25736
|
+
instructions: sanitizedRequirements.systemMessage || '',
|
|
25737
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25738
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24531
25739
|
vectorStoreId,
|
|
24532
25740
|
});
|
|
24533
25741
|
}
|
|
24534
|
-
|
|
24535
|
-
|
|
24536
|
-
|
|
24537
|
-
|
|
24538
|
-
|
|
24539
|
-
|
|
24540
|
-
|
|
24541
|
-
|
|
24542
|
-
|
|
24543
|
-
|
|
24544
|
-
|
|
24545
|
-
|
|
24546
|
-
|
|
24547
|
-
|
|
25742
|
+
if (preparedAgentKit.vectorStoreId) {
|
|
25743
|
+
AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
|
|
25744
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
25745
|
+
requirementsHash: vectorStoreHash,
|
|
25746
|
+
});
|
|
25747
|
+
}
|
|
25748
|
+
AgentLlmExecutionTools.agentKitAgentCache.set(this.title, {
|
|
25749
|
+
agent: preparedAgentKit.agent,
|
|
25750
|
+
requirementsHash,
|
|
25751
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
25752
|
+
});
|
|
25753
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
|
|
25754
|
+
underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
|
|
25755
|
+
openAiAgentKitAgent: preparedAgentKit.agent,
|
|
25756
|
+
prompt: promptWithAgentModelRequirements,
|
|
25757
|
+
onProgress,
|
|
25758
|
+
responseFormatOutputType,
|
|
25759
|
+
});
|
|
24548
25760
|
}
|
|
24549
25761
|
else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
|
|
24550
25762
|
// ... deprecated path ...
|
|
24551
|
-
const requirementsHash = SHA256(JSON.stringify(
|
|
25763
|
+
const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
24552
25764
|
const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
|
|
24553
25765
|
let assistant;
|
|
24554
|
-
if (
|
|
25766
|
+
if (this.options.assistantPreparationMode === 'external') {
|
|
25767
|
+
assistant = this.options.llmTools;
|
|
25768
|
+
if (this.options.isVerbose) {
|
|
25769
|
+
console.info('[🤰]', 'Using externally managed OpenAI Assistant', {
|
|
25770
|
+
agent: this.title,
|
|
25771
|
+
assistantId: assistant.assistantId,
|
|
25772
|
+
});
|
|
25773
|
+
}
|
|
25774
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
25775
|
+
assistantId: assistant.assistantId,
|
|
25776
|
+
requirementsHash,
|
|
25777
|
+
});
|
|
25778
|
+
}
|
|
25779
|
+
else if (cached) {
|
|
24555
25780
|
if (cached.requirementsHash === requirementsHash) {
|
|
24556
25781
|
if (this.options.isVerbose) {
|
|
24557
25782
|
console.info('[🤰]', 'Using cached OpenAI Assistant', {
|
|
@@ -24577,9 +25802,9 @@ class AgentLlmExecutionTools {
|
|
|
24577
25802
|
assistant = await this.options.llmTools.updateAssistant({
|
|
24578
25803
|
assistantId: cached.assistantId,
|
|
24579
25804
|
name: this.title,
|
|
24580
|
-
instructions:
|
|
24581
|
-
knowledgeSources:
|
|
24582
|
-
tools:
|
|
25805
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
25806
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25807
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24583
25808
|
});
|
|
24584
25809
|
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
24585
25810
|
assistantId: assistant.assistantId,
|
|
@@ -24602,9 +25827,9 @@ class AgentLlmExecutionTools {
|
|
|
24602
25827
|
});
|
|
24603
25828
|
assistant = await this.options.llmTools.createNewAssistant({
|
|
24604
25829
|
name: this.title,
|
|
24605
|
-
instructions:
|
|
24606
|
-
knowledgeSources:
|
|
24607
|
-
tools:
|
|
25830
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
25831
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25832
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24608
25833
|
/*
|
|
24609
25834
|
!!!
|
|
24610
25835
|
metadata: {
|
|
@@ -24646,18 +25871,28 @@ class AgentLlmExecutionTools {
|
|
|
24646
25871
|
}
|
|
24647
25872
|
}
|
|
24648
25873
|
let content = underlyingLlmResult.content;
|
|
24649
|
-
|
|
24650
|
-
|
|
24651
|
-
|
|
24652
|
-
|
|
25874
|
+
if (typeof content === 'string') {
|
|
25875
|
+
// Note: Cleanup the AI artifacts from the content
|
|
25876
|
+
content = humanizeAiText(content);
|
|
25877
|
+
// Note: Make sure the content is Promptbook-like
|
|
25878
|
+
content = promptbookifyAiText(content);
|
|
25879
|
+
}
|
|
25880
|
+
else {
|
|
25881
|
+
// TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
|
|
25882
|
+
content = JSON.stringify(content);
|
|
25883
|
+
}
|
|
24653
25884
|
const agentResult = {
|
|
24654
25885
|
...underlyingLlmResult,
|
|
24655
|
-
content,
|
|
25886
|
+
content: content,
|
|
24656
25887
|
modelName: this.modelName,
|
|
24657
25888
|
};
|
|
24658
25889
|
return agentResult;
|
|
24659
25890
|
}
|
|
24660
25891
|
}
|
|
25892
|
+
/**
|
|
25893
|
+
* Cached AgentKit agents to avoid rebuilding identical instances.
|
|
25894
|
+
*/
|
|
25895
|
+
AgentLlmExecutionTools.agentKitAgentCache = new Map();
|
|
24661
25896
|
/**
|
|
24662
25897
|
* Cache of OpenAI assistants to avoid creating duplicates
|
|
24663
25898
|
*/
|
|
@@ -24738,8 +25973,8 @@ function buildTeacherSummary(commitments, used) {
|
|
|
24738
25973
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
24739
25974
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
24740
25975
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
24741
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
24742
25976
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
25977
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
24743
25978
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
24744
25979
|
*
|
|
24745
25980
|
* @public exported from `@promptbook/core`
|
|
@@ -24770,6 +26005,7 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
24770
26005
|
super({
|
|
24771
26006
|
isVerbose: options.isVerbose,
|
|
24772
26007
|
llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
|
|
26008
|
+
assistantPreparationMode: options.assistantPreparationMode,
|
|
24773
26009
|
agentSource: agentSource.value, // <- TODO: [🐱🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
24774
26010
|
});
|
|
24775
26011
|
_Agent_instances.add(this);
|
|
@@ -24836,7 +26072,6 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
24836
26072
|
* Note: This method also implements the learning mechanism
|
|
24837
26073
|
*/
|
|
24838
26074
|
async callChatModelStream(prompt, onProgress) {
|
|
24839
|
-
var _a;
|
|
24840
26075
|
// [1] Check if the user is asking the same thing as in the samples
|
|
24841
26076
|
const modelRequirements = await this.getModelRequirements();
|
|
24842
26077
|
if (modelRequirements.samples) {
|
|
@@ -24884,7 +26119,7 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
24884
26119
|
if (result.rawResponse && 'sample' in result.rawResponse) {
|
|
24885
26120
|
return result;
|
|
24886
26121
|
}
|
|
24887
|
-
if (
|
|
26122
|
+
if (modelRequirements.isClosed) {
|
|
24888
26123
|
return result;
|
|
24889
26124
|
}
|
|
24890
26125
|
// Note: [0] Notify start of self-learning
|
|
@@ -25045,6 +26280,63 @@ async function _Agent_selfLearnTeacher(prompt, result) {
|
|
|
25045
26280
|
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
25046
26281
|
*/
|
|
25047
26282
|
|
|
26283
|
+
/**
|
|
26284
|
+
* Resolve a remote META IMAGE value into an absolute URL when possible.
|
|
26285
|
+
*/
|
|
26286
|
+
function resolveRemoteImageUrl(imageUrl, agentUrl) {
|
|
26287
|
+
if (!imageUrl) {
|
|
26288
|
+
return undefined;
|
|
26289
|
+
}
|
|
26290
|
+
if (imageUrl.startsWith('http://') ||
|
|
26291
|
+
imageUrl.startsWith('https://') ||
|
|
26292
|
+
imageUrl.startsWith('data:') ||
|
|
26293
|
+
imageUrl.startsWith('blob:')) {
|
|
26294
|
+
return imageUrl;
|
|
26295
|
+
}
|
|
26296
|
+
try {
|
|
26297
|
+
return new URL(imageUrl, agentUrl).href;
|
|
26298
|
+
}
|
|
26299
|
+
catch (_a) {
|
|
26300
|
+
return imageUrl;
|
|
26301
|
+
}
|
|
26302
|
+
}
|
|
26303
|
+
/**
|
|
26304
|
+
* Format a META commitment line when the value is provided.
|
|
26305
|
+
*/
|
|
26306
|
+
function formatMetaLine(label, value) {
|
|
26307
|
+
if (!value) {
|
|
26308
|
+
return null;
|
|
26309
|
+
}
|
|
26310
|
+
return `META ${label} ${value}`;
|
|
26311
|
+
}
|
|
26312
|
+
/**
|
|
26313
|
+
* Build a minimal agent source snapshot for remote agents.
|
|
26314
|
+
*/
|
|
26315
|
+
function buildRemoteAgentSource(profile, meta) {
|
|
26316
|
+
const metaLines = [
|
|
26317
|
+
formatMetaLine('FULLNAME', meta === null || meta === void 0 ? void 0 : meta.fullname),
|
|
26318
|
+
formatMetaLine('IMAGE', meta === null || meta === void 0 ? void 0 : meta.image),
|
|
26319
|
+
formatMetaLine('DESCRIPTION', meta === null || meta === void 0 ? void 0 : meta.description),
|
|
26320
|
+
formatMetaLine('COLOR', meta === null || meta === void 0 ? void 0 : meta.color),
|
|
26321
|
+
formatMetaLine('FONT', meta === null || meta === void 0 ? void 0 : meta.font),
|
|
26322
|
+
formatMetaLine('LINK', meta === null || meta === void 0 ? void 0 : meta.link),
|
|
26323
|
+
]
|
|
26324
|
+
.filter((line) => Boolean(line))
|
|
26325
|
+
.join('\n');
|
|
26326
|
+
const personaBlock = profile.personaDescription
|
|
26327
|
+
? spaceTrim$2((block) => `
|
|
26328
|
+
PERSONA
|
|
26329
|
+
${block(profile.personaDescription || '')}
|
|
26330
|
+
`)
|
|
26331
|
+
: '';
|
|
26332
|
+
return book `
|
|
26333
|
+
${profile.agentName}
|
|
26334
|
+
|
|
26335
|
+
${metaLines}
|
|
26336
|
+
|
|
26337
|
+
${personaBlock}
|
|
26338
|
+
`;
|
|
26339
|
+
}
|
|
25048
26340
|
/**
|
|
25049
26341
|
* Represents one AI Agent
|
|
25050
26342
|
*
|
|
@@ -25052,13 +26344,15 @@ async function _Agent_selfLearnTeacher(prompt, result) {
|
|
|
25052
26344
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
25053
26345
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
25054
26346
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
25055
|
-
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
26347
|
+
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
26348
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
25056
26349
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
25057
26350
|
*
|
|
25058
26351
|
* @public exported from `@promptbook/core`
|
|
25059
26352
|
*/
|
|
25060
26353
|
class RemoteAgent extends Agent {
|
|
25061
26354
|
static async connect(options) {
|
|
26355
|
+
var _a, _b, _c;
|
|
25062
26356
|
const agentProfileUrl = `${options.agentUrl}/api/profile`;
|
|
25063
26357
|
const profileResponse = await fetch(agentProfileUrl);
|
|
25064
26358
|
// <- TODO: [🐱🚀] What about closed-source agents?
|
|
@@ -25078,14 +26372,14 @@ class RemoteAgent extends Agent {
|
|
|
25078
26372
|
|
|
25079
26373
|
`));
|
|
25080
26374
|
}
|
|
25081
|
-
const profile = await profileResponse.json();
|
|
26375
|
+
const profile = (await profileResponse.json());
|
|
26376
|
+
const resolvedMeta = {
|
|
26377
|
+
...(profile.meta || {}),
|
|
26378
|
+
image: resolveRemoteImageUrl((_a = profile.meta) === null || _a === void 0 ? void 0 : _a.image, options.agentUrl),
|
|
26379
|
+
};
|
|
25082
26380
|
// Note: We are creating dummy agent source because we don't have the source from the remote agent
|
|
25083
26381
|
// But we populate the metadata from the profile
|
|
25084
|
-
const agentSource = new BehaviorSubject(
|
|
25085
|
-
${profile.agentName}
|
|
25086
|
-
|
|
25087
|
-
${profile.personaDescription}
|
|
25088
|
-
`);
|
|
26382
|
+
const agentSource = new BehaviorSubject(buildRemoteAgentSource(profile, resolvedMeta));
|
|
25089
26383
|
// <- TODO: [🐱🚀] createBookFromProfile
|
|
25090
26384
|
// <- TODO: [🐱🚀] Support updating and self-updating
|
|
25091
26385
|
const remoteAgent = new RemoteAgent({
|
|
@@ -25108,10 +26402,10 @@ class RemoteAgent extends Agent {
|
|
|
25108
26402
|
});
|
|
25109
26403
|
remoteAgent._remoteAgentName = profile.agentName;
|
|
25110
26404
|
remoteAgent._remoteAgentHash = profile.agentHash;
|
|
25111
|
-
remoteAgent.personaDescription = profile.personaDescription;
|
|
25112
|
-
remoteAgent.initialMessage = profile.initialMessage;
|
|
25113
|
-
remoteAgent.links = profile.links;
|
|
25114
|
-
remoteAgent.meta =
|
|
26405
|
+
remoteAgent.personaDescription = (_b = profile.personaDescription) !== null && _b !== void 0 ? _b : null;
|
|
26406
|
+
remoteAgent.initialMessage = (_c = profile.initialMessage) !== null && _c !== void 0 ? _c : null;
|
|
26407
|
+
remoteAgent.links = profile.links || [];
|
|
26408
|
+
remoteAgent.meta = resolvedMeta;
|
|
25115
26409
|
remoteAgent.capabilities = profile.capabilities || [];
|
|
25116
26410
|
remoteAgent.samples = profile.samples || [];
|
|
25117
26411
|
remoteAgent.toolTitles = profile.toolTitles || {};
|
|
@@ -25215,26 +26509,7 @@ class RemoteAgent extends Agent {
|
|
|
25215
26509
|
};
|
|
25216
26510
|
};
|
|
25217
26511
|
const getToolCallKey = (toolCall) => {
|
|
25218
|
-
|
|
25219
|
-
const rawId = (_a = toolCall.rawToolCall) === null || _a === void 0 ? void 0 : _a.id;
|
|
25220
|
-
if (rawId) {
|
|
25221
|
-
return `id:${rawId}`;
|
|
25222
|
-
}
|
|
25223
|
-
const argsKey = (() => {
|
|
25224
|
-
if (typeof toolCall.arguments === 'string') {
|
|
25225
|
-
return toolCall.arguments;
|
|
25226
|
-
}
|
|
25227
|
-
if (!toolCall.arguments) {
|
|
25228
|
-
return '';
|
|
25229
|
-
}
|
|
25230
|
-
try {
|
|
25231
|
-
return JSON.stringify(toolCall.arguments);
|
|
25232
|
-
}
|
|
25233
|
-
catch (_a) {
|
|
25234
|
-
return '';
|
|
25235
|
-
}
|
|
25236
|
-
})();
|
|
25237
|
-
return `${toolCall.name}:${toolCall.createdAt || ''}:${argsKey}`;
|
|
26512
|
+
return getToolCallIdentity(toolCall);
|
|
25238
26513
|
};
|
|
25239
26514
|
const mergeToolCall = (existing, incoming) => {
|
|
25240
26515
|
const incomingResult = incoming.result;
|