@promptbook/core 0.110.0-0 → 0.110.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1787 -511
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -2
- package/esm/typings/src/_packages/openai.index.d.ts +8 -4
- package/esm/typings/src/_packages/types.index.d.ts +12 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
- package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
- package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
- package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +4 -2
- package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +10 -1
- package/esm/typings/src/book-components/Chat/SourceChip/SourceChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/utils/collectTeamToolCallSummary.d.ts +69 -0
- package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +13 -13
- package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/utils/toolCallParsing.d.ts +4 -0
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +0 -3
- package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +1 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
- package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +10 -0
- package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +13 -2
- package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +150 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionToolsOptions.d.ts +15 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +3 -4
- package/esm/typings/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +135 -0
- package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +1 -1
- package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
- package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
- package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
- package/esm/typings/src/utils/agents/resolveAgentAvatarImageUrl.d.ts +29 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
- package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
- package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +6 -2
- package/umd/index.umd.js +1790 -516
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentExecutionTools.d.ts +0 -43
- package/esm/typings/src/llm-providers/openai/createOpenAiAgentExecutionTools.d.ts +0 -11
package/esm/index.es.js
CHANGED
|
@@ -10,6 +10,7 @@ import { lookup, extension } from 'mime-types';
|
|
|
10
10
|
import { parse, unparse } from 'papaparse';
|
|
11
11
|
import moment from 'moment';
|
|
12
12
|
import colors from 'colors';
|
|
13
|
+
import { Agent as Agent$1, setDefaultOpenAIClient, setDefaultOpenAIKey, fileSearchTool, tool, run } from '@openai/agents';
|
|
13
14
|
import Bottleneck from 'bottleneck';
|
|
14
15
|
import OpenAI from 'openai';
|
|
15
16
|
|
|
@@ -27,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
27
28
|
* @generated
|
|
28
29
|
* @see https://github.com/webgptorg/promptbook
|
|
29
30
|
*/
|
|
30
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-
|
|
31
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-10';
|
|
31
32
|
/**
|
|
32
33
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
33
34
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -7656,11 +7657,14 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
7656
7657
|
function createEmptyAgentModelRequirements() {
|
|
7657
7658
|
return {
|
|
7658
7659
|
systemMessage: '',
|
|
7660
|
+
promptSuffix: '',
|
|
7659
7661
|
// modelName: 'gpt-5',
|
|
7660
7662
|
modelName: 'gemini-2.5-flash-lite',
|
|
7661
7663
|
temperature: 0.7,
|
|
7662
7664
|
topP: 0.9,
|
|
7663
7665
|
topK: 50,
|
|
7666
|
+
parentAgentUrl: null,
|
|
7667
|
+
isClosed: false,
|
|
7664
7668
|
};
|
|
7665
7669
|
}
|
|
7666
7670
|
/**
|
|
@@ -7777,6 +7781,28 @@ class BaseCommitmentDefinition {
|
|
|
7777
7781
|
return currentMessage + separator + content;
|
|
7778
7782
|
});
|
|
7779
7783
|
}
|
|
7784
|
+
/**
|
|
7785
|
+
* Helper method to create a new requirements object with updated prompt suffix
|
|
7786
|
+
*/
|
|
7787
|
+
updatePromptSuffix(requirements, contentUpdate) {
|
|
7788
|
+
const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
|
|
7789
|
+
return {
|
|
7790
|
+
...requirements,
|
|
7791
|
+
promptSuffix: newSuffix,
|
|
7792
|
+
};
|
|
7793
|
+
}
|
|
7794
|
+
/**
|
|
7795
|
+
* Helper method to append content to the prompt suffix
|
|
7796
|
+
* Default separator is a single newline for bullet lists.
|
|
7797
|
+
*/
|
|
7798
|
+
appendToPromptSuffix(requirements, content, separator = '\n') {
|
|
7799
|
+
return this.updatePromptSuffix(requirements, (currentSuffix) => {
|
|
7800
|
+
if (!currentSuffix.trim()) {
|
|
7801
|
+
return content;
|
|
7802
|
+
}
|
|
7803
|
+
return `${currentSuffix}${separator}${content}`;
|
|
7804
|
+
});
|
|
7805
|
+
}
|
|
7780
7806
|
/**
|
|
7781
7807
|
* Helper method to add a comment section to the system message
|
|
7782
7808
|
* Comments are lines starting with # that will be removed from the final system message
|
|
@@ -7954,13 +7980,9 @@ class ClosedCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
7954
7980
|
`);
|
|
7955
7981
|
}
|
|
7956
7982
|
applyToAgentModelRequirements(requirements, _content) {
|
|
7957
|
-
const updatedMetadata = {
|
|
7958
|
-
...requirements.metadata,
|
|
7959
|
-
isClosed: true,
|
|
7960
|
-
};
|
|
7961
7983
|
return {
|
|
7962
7984
|
...requirements,
|
|
7963
|
-
|
|
7985
|
+
isClosed: true,
|
|
7964
7986
|
};
|
|
7965
7987
|
}
|
|
7966
7988
|
}
|
|
@@ -8238,12 +8260,12 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
8238
8260
|
return requirements;
|
|
8239
8261
|
}
|
|
8240
8262
|
// Get existing dictionary entries from metadata
|
|
8241
|
-
const existingDictionary = ((_a = requirements.
|
|
8263
|
+
const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
|
|
8242
8264
|
// Merge the new dictionary entry with existing entries
|
|
8243
8265
|
const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
|
|
8244
8266
|
// Store the merged dictionary in metadata for debugging and inspection
|
|
8245
8267
|
const updatedMetadata = {
|
|
8246
|
-
...requirements.
|
|
8268
|
+
...requirements._metadata,
|
|
8247
8269
|
DICTIONARY: mergedDictionary,
|
|
8248
8270
|
};
|
|
8249
8271
|
// Create the dictionary section for the system message
|
|
@@ -8251,7 +8273,7 @@ class DictionaryCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
8251
8273
|
const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
|
|
8252
8274
|
return {
|
|
8253
8275
|
...this.appendToSystemMessage(requirements, dictionarySection),
|
|
8254
|
-
|
|
8276
|
+
_metadata: updatedMetadata,
|
|
8255
8277
|
};
|
|
8256
8278
|
}
|
|
8257
8279
|
}
|
|
@@ -10696,10 +10718,7 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
10696
10718
|
applyToAgentModelRequirements(requirements, content) {
|
|
10697
10719
|
const trimmedContent = content.trim();
|
|
10698
10720
|
if (!trimmedContent) {
|
|
10699
|
-
return
|
|
10700
|
-
...requirements,
|
|
10701
|
-
parentAgentUrl: undefined,
|
|
10702
|
-
};
|
|
10721
|
+
return requirements;
|
|
10703
10722
|
}
|
|
10704
10723
|
if (trimmedContent.toUpperCase() === 'VOID' ||
|
|
10705
10724
|
trimmedContent.toUpperCase() === 'NULL' ||
|
|
@@ -10913,6 +10932,136 @@ class ImportCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
10913
10932
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
10914
10933
|
*/
|
|
10915
10934
|
|
|
10935
|
+
/**
|
|
10936
|
+
* @@@
|
|
10937
|
+
*
|
|
10938
|
+
* @private thing of inline knowledge
|
|
10939
|
+
*/
|
|
10940
|
+
const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
|
|
10941
|
+
/**
|
|
10942
|
+
* @@@
|
|
10943
|
+
*
|
|
10944
|
+
* @private thing of inline knowledge
|
|
10945
|
+
*/
|
|
10946
|
+
const INLINE_KNOWLEDGE_EXTENSION = '.txt';
|
|
10947
|
+
/**
|
|
10948
|
+
* @@@
|
|
10949
|
+
*
|
|
10950
|
+
* @private thing of inline knowledge
|
|
10951
|
+
*/
|
|
10952
|
+
const DATA_URL_PREFIX = 'data:';
|
|
10953
|
+
/**
|
|
10954
|
+
* @@@
|
|
10955
|
+
*
|
|
10956
|
+
* @private thing of inline knowledge
|
|
10957
|
+
*/
|
|
10958
|
+
function getFirstNonEmptyLine(content) {
|
|
10959
|
+
const lines = content.split(/\r?\n/);
|
|
10960
|
+
for (const line of lines) {
|
|
10961
|
+
const trimmed = line.trim();
|
|
10962
|
+
if (trimmed) {
|
|
10963
|
+
return trimmed;
|
|
10964
|
+
}
|
|
10965
|
+
}
|
|
10966
|
+
return null;
|
|
10967
|
+
}
|
|
10968
|
+
/**
|
|
10969
|
+
* @@@
|
|
10970
|
+
*
|
|
10971
|
+
* @private thing of inline knowledge
|
|
10972
|
+
*/
|
|
10973
|
+
function deriveBaseFilename(content) {
|
|
10974
|
+
const firstLine = getFirstNonEmptyLine(content);
|
|
10975
|
+
if (!firstLine) {
|
|
10976
|
+
return INLINE_KNOWLEDGE_BASE_NAME;
|
|
10977
|
+
}
|
|
10978
|
+
const normalized = normalizeToKebabCase(firstLine);
|
|
10979
|
+
return normalized || INLINE_KNOWLEDGE_BASE_NAME;
|
|
10980
|
+
}
|
|
10981
|
+
/**
|
|
10982
|
+
* Creates a data URL that represents the inline knowledge content as a text file.
|
|
10983
|
+
*
|
|
10984
|
+
* @private thing of inline knowledge
|
|
10985
|
+
*/
|
|
10986
|
+
function createInlineKnowledgeSourceFile(content) {
|
|
10987
|
+
const trimmedContent = content.trim();
|
|
10988
|
+
const baseName = deriveBaseFilename(trimmedContent);
|
|
10989
|
+
const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
10990
|
+
const mimeType = 'text/plain';
|
|
10991
|
+
const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
|
|
10992
|
+
const encodedFilename = encodeURIComponent(filename);
|
|
10993
|
+
const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
|
|
10994
|
+
return {
|
|
10995
|
+
filename,
|
|
10996
|
+
mimeType,
|
|
10997
|
+
url,
|
|
10998
|
+
};
|
|
10999
|
+
}
|
|
11000
|
+
/**
|
|
11001
|
+
* Checks whether the provided source string is a data URL that can be decoded.
|
|
11002
|
+
*
|
|
11003
|
+
* @private thing of inline knowledge
|
|
11004
|
+
*/
|
|
11005
|
+
function isDataUrlKnowledgeSource(source) {
|
|
11006
|
+
return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
|
|
11007
|
+
}
|
|
11008
|
+
/**
|
|
11009
|
+
* Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
|
|
11010
|
+
*
|
|
11011
|
+
* @private thing of inline knowledge
|
|
11012
|
+
*/
|
|
11013
|
+
function parseDataUrlKnowledgeSource(source) {
|
|
11014
|
+
if (!isDataUrlKnowledgeSource(source)) {
|
|
11015
|
+
return null;
|
|
11016
|
+
}
|
|
11017
|
+
const commaIndex = source.indexOf(',');
|
|
11018
|
+
if (commaIndex === -1) {
|
|
11019
|
+
return null;
|
|
11020
|
+
}
|
|
11021
|
+
const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
|
|
11022
|
+
const payload = source.slice(commaIndex + 1);
|
|
11023
|
+
const tokens = header.split(';');
|
|
11024
|
+
const mediaType = tokens[0] || 'text/plain';
|
|
11025
|
+
let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
11026
|
+
let isBase64 = false;
|
|
11027
|
+
for (let i = 1; i < tokens.length; i++) {
|
|
11028
|
+
const token = tokens[i];
|
|
11029
|
+
if (!token) {
|
|
11030
|
+
continue;
|
|
11031
|
+
}
|
|
11032
|
+
if (token.toLowerCase() === 'base64') {
|
|
11033
|
+
isBase64 = true;
|
|
11034
|
+
continue;
|
|
11035
|
+
}
|
|
11036
|
+
const [key, value] = token.split('=');
|
|
11037
|
+
if (key === 'name' && value !== undefined) {
|
|
11038
|
+
try {
|
|
11039
|
+
filename = decodeURIComponent(value);
|
|
11040
|
+
}
|
|
11041
|
+
catch (_a) {
|
|
11042
|
+
filename = value;
|
|
11043
|
+
}
|
|
11044
|
+
}
|
|
11045
|
+
}
|
|
11046
|
+
if (!isBase64) {
|
|
11047
|
+
return null;
|
|
11048
|
+
}
|
|
11049
|
+
try {
|
|
11050
|
+
const buffer = Buffer.from(payload, 'base64');
|
|
11051
|
+
return {
|
|
11052
|
+
buffer,
|
|
11053
|
+
filename,
|
|
11054
|
+
mimeType: mediaType,
|
|
11055
|
+
};
|
|
11056
|
+
}
|
|
11057
|
+
catch (_b) {
|
|
11058
|
+
return null;
|
|
11059
|
+
}
|
|
11060
|
+
}
|
|
11061
|
+
/**
|
|
11062
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
11063
|
+
*/
|
|
11064
|
+
|
|
10916
11065
|
/**
|
|
10917
11066
|
* KNOWLEDGE commitment definition
|
|
10918
11067
|
*
|
|
@@ -11011,9 +11160,13 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11011
11160
|
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
11012
11161
|
}
|
|
11013
11162
|
else {
|
|
11014
|
-
|
|
11015
|
-
const
|
|
11016
|
-
|
|
11163
|
+
const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
|
|
11164
|
+
const updatedRequirements = {
|
|
11165
|
+
...requirements,
|
|
11166
|
+
knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
|
|
11167
|
+
};
|
|
11168
|
+
const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
|
|
11169
|
+
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
11017
11170
|
}
|
|
11018
11171
|
}
|
|
11019
11172
|
}
|
|
@@ -11260,16 +11413,16 @@ class AgentMessageCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11260
11413
|
// and typically doesn't need to be added to the system prompt or model requirements directly.
|
|
11261
11414
|
// It is extracted separately for the chat interface.
|
|
11262
11415
|
var _a;
|
|
11263
|
-
const pendingUserMessage = (_a = requirements.
|
|
11416
|
+
const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
|
|
11264
11417
|
if (pendingUserMessage) {
|
|
11265
11418
|
const newSample = { question: pendingUserMessage, answer: content };
|
|
11266
11419
|
const newSamples = [...(requirements.samples || []), newSample];
|
|
11267
|
-
const newMetadata = { ...requirements.
|
|
11420
|
+
const newMetadata = { ...requirements._metadata };
|
|
11268
11421
|
delete newMetadata.pendingUserMessage;
|
|
11269
11422
|
return {
|
|
11270
11423
|
...requirements,
|
|
11271
11424
|
samples: newSamples,
|
|
11272
|
-
|
|
11425
|
+
_metadata: newMetadata,
|
|
11273
11426
|
};
|
|
11274
11427
|
}
|
|
11275
11428
|
return requirements;
|
|
@@ -11517,8 +11670,8 @@ class UserMessageCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
11517
11670
|
applyToAgentModelRequirements(requirements, content) {
|
|
11518
11671
|
return {
|
|
11519
11672
|
...requirements,
|
|
11520
|
-
|
|
11521
|
-
...requirements.
|
|
11673
|
+
_metadata: {
|
|
11674
|
+
...requirements._metadata,
|
|
11522
11675
|
pendingUserMessage: content,
|
|
11523
11676
|
},
|
|
11524
11677
|
};
|
|
@@ -12376,11 +12529,7 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
12376
12529
|
if (trimmedContent === '') {
|
|
12377
12530
|
return requirements;
|
|
12378
12531
|
}
|
|
12379
|
-
|
|
12380
|
-
return {
|
|
12381
|
-
...requirements,
|
|
12382
|
-
notes: [...(requirements.notes || []), trimmedContent],
|
|
12383
|
-
};
|
|
12532
|
+
return requirements;
|
|
12384
12533
|
}
|
|
12385
12534
|
}
|
|
12386
12535
|
/**
|
|
@@ -12442,12 +12591,12 @@ class OpenCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
12442
12591
|
// Since OPEN is default, we can just ensure isClosed is false
|
|
12443
12592
|
// But to be explicit we can set it
|
|
12444
12593
|
const updatedMetadata = {
|
|
12445
|
-
...requirements.
|
|
12594
|
+
...requirements._metadata,
|
|
12446
12595
|
isClosed: false,
|
|
12447
12596
|
};
|
|
12448
12597
|
return {
|
|
12449
12598
|
...requirements,
|
|
12450
|
-
|
|
12599
|
+
_metadata: updatedMetadata,
|
|
12451
12600
|
};
|
|
12452
12601
|
}
|
|
12453
12602
|
}
|
|
@@ -12528,7 +12677,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
12528
12677
|
return requirements;
|
|
12529
12678
|
}
|
|
12530
12679
|
// Get existing persona content from metadata
|
|
12531
|
-
const existingPersonaContent = ((_a = requirements.
|
|
12680
|
+
const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
|
|
12532
12681
|
// Merge the new content with existing persona content
|
|
12533
12682
|
// When multiple PERSONA commitments exist, they are merged into one
|
|
12534
12683
|
const mergedPersonaContent = existingPersonaContent
|
|
@@ -12536,12 +12685,12 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
12536
12685
|
: trimmedContent;
|
|
12537
12686
|
// Store the merged persona content in metadata for debugging and inspection
|
|
12538
12687
|
const updatedMetadata = {
|
|
12539
|
-
...requirements.
|
|
12688
|
+
...requirements._metadata,
|
|
12540
12689
|
PERSONA: mergedPersonaContent,
|
|
12541
12690
|
};
|
|
12542
12691
|
// Get the agent name from metadata (which should contain the first line of agent source)
|
|
12543
12692
|
// If not available, extract from current system message as fallback
|
|
12544
|
-
let agentName = (_b = requirements.
|
|
12693
|
+
let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
|
|
12545
12694
|
if (!agentName) {
|
|
12546
12695
|
// Fallback: extract from current system message
|
|
12547
12696
|
const currentMessage = requirements.systemMessage.trim();
|
|
@@ -12588,7 +12737,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
12588
12737
|
return {
|
|
12589
12738
|
...requirements,
|
|
12590
12739
|
systemMessage: newSystemMessage,
|
|
12591
|
-
|
|
12740
|
+
_metadata: updatedMetadata,
|
|
12592
12741
|
};
|
|
12593
12742
|
}
|
|
12594
12743
|
}
|
|
@@ -12671,7 +12820,16 @@ class RuleCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
12671
12820
|
}
|
|
12672
12821
|
// Add rule to the system message
|
|
12673
12822
|
const ruleSection = `Rule: ${trimmedContent}`;
|
|
12674
|
-
|
|
12823
|
+
const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
|
|
12824
|
+
const ruleLines = trimmedContent
|
|
12825
|
+
.split(/\r?\n/)
|
|
12826
|
+
.map((line) => line.trim())
|
|
12827
|
+
.filter(Boolean)
|
|
12828
|
+
.map((line) => `- ${line}`);
|
|
12829
|
+
if (ruleLines.length === 0) {
|
|
12830
|
+
return requirementsWithRule;
|
|
12831
|
+
}
|
|
12832
|
+
return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
|
|
12675
12833
|
}
|
|
12676
12834
|
}
|
|
12677
12835
|
/**
|
|
@@ -13177,7 +13335,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13177
13335
|
if (teammates.length === 0) {
|
|
13178
13336
|
return requirements;
|
|
13179
13337
|
}
|
|
13180
|
-
const agentName = ((_a = requirements.
|
|
13338
|
+
const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
|
|
13181
13339
|
const teamEntries = teammates.map((teammate) => ({
|
|
13182
13340
|
toolName: createTeamToolName(teammate.url),
|
|
13183
13341
|
teammate,
|
|
@@ -13217,7 +13375,7 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13217
13375
|
},
|
|
13218
13376
|
});
|
|
13219
13377
|
}
|
|
13220
|
-
const existingTeammates = ((_b = requirements.
|
|
13378
|
+
const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
|
|
13221
13379
|
const updatedTeammates = [...existingTeammates];
|
|
13222
13380
|
for (const entry of teamEntries) {
|
|
13223
13381
|
if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
|
|
@@ -13246,8 +13404,8 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13246
13404
|
return this.appendToSystemMessage({
|
|
13247
13405
|
...requirements,
|
|
13248
13406
|
tools: updatedTools,
|
|
13249
|
-
|
|
13250
|
-
...requirements.
|
|
13407
|
+
_metadata: {
|
|
13408
|
+
...requirements._metadata,
|
|
13251
13409
|
teammates: updatedTeammates,
|
|
13252
13410
|
},
|
|
13253
13411
|
}, teamSystemMessage);
|
|
@@ -13347,11 +13505,16 @@ function createTeamToolFunction(entry) {
|
|
|
13347
13505
|
const request = buildTeammateRequest(message, args.context);
|
|
13348
13506
|
let response = '';
|
|
13349
13507
|
let error = null;
|
|
13508
|
+
let toolCalls;
|
|
13350
13509
|
try {
|
|
13351
13510
|
const remoteAgent = await getRemoteTeammateAgent(entry.teammate.url);
|
|
13352
13511
|
const prompt = buildTeammatePrompt(request);
|
|
13353
13512
|
const teammateResult = await remoteAgent.callChatModel(prompt);
|
|
13354
13513
|
response = teammateResult.content || '';
|
|
13514
|
+
toolCalls =
|
|
13515
|
+
'toolCalls' in teammateResult && Array.isArray(teammateResult.toolCalls)
|
|
13516
|
+
? teammateResult.toolCalls
|
|
13517
|
+
: undefined;
|
|
13355
13518
|
}
|
|
13356
13519
|
catch (err) {
|
|
13357
13520
|
error = err instanceof Error ? err.message : String(err);
|
|
@@ -13361,6 +13524,7 @@ function createTeamToolFunction(entry) {
|
|
|
13361
13524
|
teammate: teammateMetadata,
|
|
13362
13525
|
request,
|
|
13363
13526
|
response: teammateReply,
|
|
13527
|
+
toolCalls: toolCalls && toolCalls.length > 0 ? toolCalls : undefined,
|
|
13364
13528
|
error,
|
|
13365
13529
|
conversation: [
|
|
13366
13530
|
{
|
|
@@ -13473,7 +13637,7 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13473
13637
|
if (!trimmedContent) {
|
|
13474
13638
|
// Store template mode flag in metadata
|
|
13475
13639
|
const updatedMetadata = {
|
|
13476
|
-
...requirements.
|
|
13640
|
+
...requirements._metadata,
|
|
13477
13641
|
templateMode: true,
|
|
13478
13642
|
};
|
|
13479
13643
|
// Add a general instruction about using structured templates
|
|
@@ -13483,21 +13647,21 @@ class TemplateCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13483
13647
|
`);
|
|
13484
13648
|
return {
|
|
13485
13649
|
...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
|
|
13486
|
-
|
|
13650
|
+
_metadata: updatedMetadata,
|
|
13487
13651
|
};
|
|
13488
13652
|
}
|
|
13489
13653
|
// If content is provided, add the specific template instructions
|
|
13490
13654
|
const templateSection = `Response Template: ${trimmedContent}`;
|
|
13491
13655
|
// Store the template in metadata for potential programmatic access
|
|
13492
|
-
const existingTemplates = ((_a = requirements.
|
|
13656
|
+
const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
|
|
13493
13657
|
const updatedMetadata = {
|
|
13494
|
-
...requirements.
|
|
13658
|
+
...requirements._metadata,
|
|
13495
13659
|
templates: [...existingTemplates, trimmedContent],
|
|
13496
13660
|
templateMode: true,
|
|
13497
13661
|
};
|
|
13498
13662
|
return {
|
|
13499
13663
|
...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
|
|
13500
|
-
|
|
13664
|
+
_metadata: updatedMetadata,
|
|
13501
13665
|
};
|
|
13502
13666
|
}
|
|
13503
13667
|
}
|
|
@@ -13834,8 +13998,8 @@ class UseBrowserCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
13834
13998
|
return this.appendToSystemMessage({
|
|
13835
13999
|
...requirements,
|
|
13836
14000
|
tools: updatedTools,
|
|
13837
|
-
|
|
13838
|
-
...requirements.
|
|
14001
|
+
_metadata: {
|
|
14002
|
+
...requirements._metadata,
|
|
13839
14003
|
useBrowser: true,
|
|
13840
14004
|
},
|
|
13841
14005
|
}, spaceTrim$1(`
|
|
@@ -14064,8 +14228,8 @@ class UseEmailCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14064
14228
|
return this.appendToSystemMessage({
|
|
14065
14229
|
...requirements,
|
|
14066
14230
|
tools: updatedTools,
|
|
14067
|
-
|
|
14068
|
-
...requirements.
|
|
14231
|
+
_metadata: {
|
|
14232
|
+
...requirements._metadata,
|
|
14069
14233
|
useEmail: content || true,
|
|
14070
14234
|
},
|
|
14071
14235
|
}, spaceTrim$1((block) => `
|
|
@@ -14200,8 +14364,8 @@ class UseImageGeneratorCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14200
14364
|
return this.appendToSystemMessage({
|
|
14201
14365
|
...requirements,
|
|
14202
14366
|
tools: updatedTools,
|
|
14203
|
-
|
|
14204
|
-
...requirements.
|
|
14367
|
+
_metadata: {
|
|
14368
|
+
...requirements._metadata,
|
|
14205
14369
|
useImageGenerator: content || true,
|
|
14206
14370
|
},
|
|
14207
14371
|
}, spaceTrim$1(`
|
|
@@ -14492,8 +14656,8 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14492
14656
|
return this.appendToSystemMessage({
|
|
14493
14657
|
...requirements,
|
|
14494
14658
|
tools: updatedTools,
|
|
14495
|
-
|
|
14496
|
-
...requirements.
|
|
14659
|
+
_metadata: {
|
|
14660
|
+
...requirements._metadata,
|
|
14497
14661
|
useSearchEngine: content || true,
|
|
14498
14662
|
},
|
|
14499
14663
|
}, spaceTrim$1((block) => `
|
|
@@ -14641,8 +14805,8 @@ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
14641
14805
|
return this.appendToSystemMessage({
|
|
14642
14806
|
...requirements,
|
|
14643
14807
|
tools: updatedTools,
|
|
14644
|
-
|
|
14645
|
-
...requirements.
|
|
14808
|
+
_metadata: {
|
|
14809
|
+
...requirements._metadata,
|
|
14646
14810
|
},
|
|
14647
14811
|
}, spaceTrim$1((block) => `
|
|
14648
14812
|
Time and date context:
|
|
@@ -15226,14 +15390,26 @@ function removeCommentsFromSystemMessage(systemMessage) {
|
|
|
15226
15390
|
}
|
|
15227
15391
|
|
|
15228
15392
|
/**
|
|
15229
|
-
* Creates agent model requirements using the new commitment system
|
|
15393
|
+
* Creates agent model requirements using the new commitment system.
|
|
15394
|
+
*
|
|
15230
15395
|
* This function uses a reduce-like pattern where each commitment applies its changes
|
|
15231
|
-
* to build the final requirements starting from a basic empty model
|
|
15396
|
+
* to build the final requirements starting from a basic empty model.
|
|
15232
15397
|
*
|
|
15233
|
-
* @
|
|
15398
|
+
* @param agentSource - Agent source book to parse.
|
|
15399
|
+
* @param modelName - Optional override for the agent model name.
|
|
15400
|
+
* @param options - Additional options such as the agent reference resolver.
|
|
15401
|
+
*
|
|
15402
|
+
* @private @@@
|
|
15403
|
+
*/
|
|
15404
|
+
const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
|
|
15405
|
+
/**
|
|
15406
|
+
* @@@
|
|
15407
|
+
*
|
|
15408
|
+
* @private @@@
|
|
15234
15409
|
*/
|
|
15235
|
-
async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
|
|
15410
|
+
async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
|
|
15236
15411
|
var _a;
|
|
15412
|
+
const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
|
|
15237
15413
|
// Parse the agent source to extract commitments
|
|
15238
15414
|
const parseResult = parseAgentSourceWithCommitments(agentSource);
|
|
15239
15415
|
// Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
|
|
@@ -15270,8 +15446,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
15270
15446
|
// Store the agent name in metadata so commitments can access it
|
|
15271
15447
|
requirements = {
|
|
15272
15448
|
...requirements,
|
|
15273
|
-
|
|
15274
|
-
...requirements.
|
|
15449
|
+
_metadata: {
|
|
15450
|
+
...requirements._metadata,
|
|
15275
15451
|
agentName: parseResult.agentName,
|
|
15276
15452
|
},
|
|
15277
15453
|
};
|
|
@@ -15285,6 +15461,11 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
15285
15461
|
// Apply each commitment in order using reduce-like pattern
|
|
15286
15462
|
for (let i = 0; i < filteredCommitments.length; i++) {
|
|
15287
15463
|
const commitment = filteredCommitments[i];
|
|
15464
|
+
const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
|
|
15465
|
+
let commitmentContent = commitment.content;
|
|
15466
|
+
if (isReferenceCommitment && agentReferenceResolver) {
|
|
15467
|
+
commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
|
|
15468
|
+
}
|
|
15288
15469
|
// CLOSED commitment should work only if its the last commitment in the book
|
|
15289
15470
|
if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
|
|
15290
15471
|
continue;
|
|
@@ -15292,7 +15473,7 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
15292
15473
|
const definition = getCommitmentDefinition(commitment.type);
|
|
15293
15474
|
if (definition) {
|
|
15294
15475
|
try {
|
|
15295
|
-
requirements = definition.applyToAgentModelRequirements(requirements,
|
|
15476
|
+
requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
|
|
15296
15477
|
}
|
|
15297
15478
|
catch (error) {
|
|
15298
15479
|
console.warn(`Failed to apply commitment ${commitment.type}:`, error);
|
|
@@ -15742,23 +15923,28 @@ function normalizeSeparator(content) {
|
|
|
15742
15923
|
*/
|
|
15743
15924
|
|
|
15744
15925
|
/**
|
|
15745
|
-
* Creates model requirements for an agent based on its source
|
|
15926
|
+
* Creates model requirements for an agent based on its source.
|
|
15746
15927
|
*
|
|
15747
15928
|
* There are 2 similar functions:
|
|
15748
15929
|
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
15749
15930
|
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
|
|
15750
15931
|
*
|
|
15932
|
+
* @param agentSource - Book describing the agent.
|
|
15933
|
+
* @param modelName - Optional override for the agent's model.
|
|
15934
|
+
* @param availableModels - Models that could fulfill the agent.
|
|
15935
|
+
* @param llmTools - Execution tools used when selecting a best model.
|
|
15936
|
+
* @param options - Optional hooks such as the agent reference resolver.
|
|
15751
15937
|
* @public exported from `@promptbook/core`
|
|
15752
15938
|
*/
|
|
15753
|
-
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
|
|
15939
|
+
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
|
|
15754
15940
|
// If availableModels are provided and no specific modelName is given,
|
|
15755
15941
|
// use preparePersona to select the best model
|
|
15756
15942
|
if (availableModels && !modelName && llmTools) {
|
|
15757
15943
|
const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
|
|
15758
|
-
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
|
|
15944
|
+
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
|
|
15759
15945
|
}
|
|
15760
15946
|
// Use the new commitment-based system with provided or default model
|
|
15761
|
-
return createAgentModelRequirementsWithCommitments(agentSource, modelName);
|
|
15947
|
+
return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
|
|
15762
15948
|
}
|
|
15763
15949
|
/**
|
|
15764
15950
|
* Selects the best model using the preparePersona function
|
|
@@ -21300,6 +21486,66 @@ const OPENAI_MODELS = exportJson({
|
|
|
21300
21486
|
},
|
|
21301
21487
|
/**/
|
|
21302
21488
|
/**/
|
|
21489
|
+
{
|
|
21490
|
+
modelVariant: 'CHAT',
|
|
21491
|
+
modelTitle: 'gpt-5.2-codex',
|
|
21492
|
+
modelName: 'gpt-5.2-codex',
|
|
21493
|
+
modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
|
|
21494
|
+
pricing: {
|
|
21495
|
+
prompt: pricing(`$1.75 / 1M tokens`),
|
|
21496
|
+
output: pricing(`$14.00 / 1M tokens`),
|
|
21497
|
+
},
|
|
21498
|
+
},
|
|
21499
|
+
/**/
|
|
21500
|
+
/**/
|
|
21501
|
+
{
|
|
21502
|
+
modelVariant: 'CHAT',
|
|
21503
|
+
modelTitle: 'gpt-5.1-codex-max',
|
|
21504
|
+
modelName: 'gpt-5.1-codex-max',
|
|
21505
|
+
modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
|
|
21506
|
+
pricing: {
|
|
21507
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21508
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21509
|
+
},
|
|
21510
|
+
},
|
|
21511
|
+
/**/
|
|
21512
|
+
/**/
|
|
21513
|
+
{
|
|
21514
|
+
modelVariant: 'CHAT',
|
|
21515
|
+
modelTitle: 'gpt-5.1-codex',
|
|
21516
|
+
modelName: 'gpt-5.1-codex',
|
|
21517
|
+
modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
|
|
21518
|
+
pricing: {
|
|
21519
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21520
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21521
|
+
},
|
|
21522
|
+
},
|
|
21523
|
+
/**/
|
|
21524
|
+
/**/
|
|
21525
|
+
{
|
|
21526
|
+
modelVariant: 'CHAT',
|
|
21527
|
+
modelTitle: 'gpt-5.1-codex-mini',
|
|
21528
|
+
modelName: 'gpt-5.1-codex-mini',
|
|
21529
|
+
modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
|
|
21530
|
+
pricing: {
|
|
21531
|
+
prompt: pricing(`$0.25 / 1M tokens`),
|
|
21532
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
21533
|
+
},
|
|
21534
|
+
},
|
|
21535
|
+
/**/
|
|
21536
|
+
/**/
|
|
21537
|
+
{
|
|
21538
|
+
modelVariant: 'CHAT',
|
|
21539
|
+
modelTitle: 'gpt-5-codex',
|
|
21540
|
+
modelName: 'gpt-5-codex',
|
|
21541
|
+
modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
|
|
21542
|
+
pricing: {
|
|
21543
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21544
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21545
|
+
},
|
|
21546
|
+
},
|
|
21547
|
+
/**/
|
|
21548
|
+
/**/
|
|
21303
21549
|
{
|
|
21304
21550
|
modelVariant: 'CHAT',
|
|
21305
21551
|
modelTitle: 'gpt-5-mini',
|
|
@@ -22004,6 +22250,32 @@ function isUnsupportedParameterError(error) {
|
|
|
22004
22250
|
errorMessage.includes('does not support'));
|
|
22005
22251
|
}
|
|
22006
22252
|
|
|
22253
|
+
/**
|
|
22254
|
+
* Provides access to the structured clone implementation when available.
|
|
22255
|
+
*/
|
|
22256
|
+
function getStructuredCloneFunction() {
|
|
22257
|
+
return globalThis.structuredClone;
|
|
22258
|
+
}
|
|
22259
|
+
/**
|
|
22260
|
+
* Checks whether the prompt is a chat prompt that carries file attachments.
|
|
22261
|
+
*/
|
|
22262
|
+
function hasChatPromptFiles(prompt) {
|
|
22263
|
+
return 'files' in prompt && Array.isArray(prompt.files);
|
|
22264
|
+
}
|
|
22265
|
+
/**
|
|
22266
|
+
* Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
|
|
22267
|
+
*/
|
|
22268
|
+
function clonePromptPreservingFiles(prompt) {
|
|
22269
|
+
const structuredCloneFn = getStructuredCloneFunction();
|
|
22270
|
+
if (typeof structuredCloneFn === 'function') {
|
|
22271
|
+
return structuredCloneFn(prompt);
|
|
22272
|
+
}
|
|
22273
|
+
const clonedPrompt = JSON.parse(JSON.stringify(prompt));
|
|
22274
|
+
if (hasChatPromptFiles(prompt)) {
|
|
22275
|
+
clonedPrompt.files = prompt.files;
|
|
22276
|
+
}
|
|
22277
|
+
return clonedPrompt;
|
|
22278
|
+
}
|
|
22007
22279
|
/**
|
|
22008
22280
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
22009
22281
|
*
|
|
@@ -22033,16 +22305,11 @@ class OpenAiCompatibleExecutionTools {
|
|
|
22033
22305
|
const openAiOptions = { ...this.options };
|
|
22034
22306
|
delete openAiOptions.isVerbose;
|
|
22035
22307
|
delete openAiOptions.userId;
|
|
22036
|
-
// Enhanced configuration
|
|
22308
|
+
// Enhanced configuration with retries and timeouts.
|
|
22037
22309
|
const enhancedOptions = {
|
|
22038
22310
|
...openAiOptions,
|
|
22039
22311
|
timeout: API_REQUEST_TIMEOUT,
|
|
22040
22312
|
maxRetries: CONNECTION_RETRIES_LIMIT,
|
|
22041
|
-
defaultHeaders: {
|
|
22042
|
-
Connection: 'keep-alive',
|
|
22043
|
-
'Keep-Alive': 'timeout=30, max=100',
|
|
22044
|
-
...openAiOptions.defaultHeaders,
|
|
22045
|
-
},
|
|
22046
22313
|
};
|
|
22047
22314
|
this.client = new OpenAI(enhancedOptions);
|
|
22048
22315
|
}
|
|
@@ -22093,7 +22360,7 @@ class OpenAiCompatibleExecutionTools {
|
|
|
22093
22360
|
*/
|
|
22094
22361
|
async callChatModelStream(prompt, onProgress) {
|
|
22095
22362
|
// Deep clone prompt and modelRequirements to avoid mutation across calls
|
|
22096
|
-
const clonedPrompt =
|
|
22363
|
+
const clonedPrompt = clonePromptPreservingFiles(prompt);
|
|
22097
22364
|
// Use local Set for retried parameters to ensure independence and thread safety
|
|
22098
22365
|
const retriedUnsupportedParameters = new Set();
|
|
22099
22366
|
return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
|
|
@@ -22120,7 +22387,10 @@ class OpenAiCompatibleExecutionTools {
|
|
|
22120
22387
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
22121
22388
|
// <- Note: [🧆]
|
|
22122
22389
|
}; // <- TODO: [💩] Guard here types better
|
|
22123
|
-
if (
|
|
22390
|
+
if (currentModelRequirements.responseFormat !== undefined) {
|
|
22391
|
+
modelSettings.response_format = currentModelRequirements.responseFormat;
|
|
22392
|
+
}
|
|
22393
|
+
else if (format === 'JSON') {
|
|
22124
22394
|
modelSettings.response_format = {
|
|
22125
22395
|
type: 'json_object',
|
|
22126
22396
|
};
|
|
@@ -22931,18 +23201,6 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
|
22931
23201
|
get profile() {
|
|
22932
23202
|
return OPENAI_PROVIDER_PROFILE;
|
|
22933
23203
|
}
|
|
22934
|
-
/*
|
|
22935
|
-
Note: Commenting this out to avoid circular dependency
|
|
22936
|
-
/**
|
|
22937
|
-
* Create (sub)tools for calling OpenAI API Assistants
|
|
22938
|
-
*
|
|
22939
|
-
* @param assistantId Which assistant to use
|
|
22940
|
-
* @returns Tools for calling OpenAI API Assistants with same token
|
|
22941
|
-
* /
|
|
22942
|
-
public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
|
|
22943
|
-
return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
|
|
22944
|
-
}
|
|
22945
|
-
*/
|
|
22946
23204
|
/**
|
|
22947
23205
|
* List all available models (non dynamically)
|
|
22948
23206
|
*
|
|
@@ -22977,206 +23235,1259 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
|
22977
23235
|
}
|
|
22978
23236
|
}
|
|
22979
23237
|
|
|
23238
|
+
const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
|
|
23239
|
+
const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
|
|
23240
|
+
const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
|
|
23241
|
+
const VECTOR_STORE_STALL_LOG_THRESHOLD_MS = 30000;
|
|
22980
23242
|
/**
|
|
22981
|
-
*
|
|
23243
|
+
* Base class for OpenAI execution tools that need hosted vector stores.
|
|
22982
23244
|
*
|
|
22983
23245
|
* @public exported from `@promptbook/openai`
|
|
22984
23246
|
*/
|
|
22985
|
-
class
|
|
22986
|
-
|
|
22987
|
-
|
|
22988
|
-
|
|
23247
|
+
class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
|
|
23248
|
+
/**
|
|
23249
|
+
* Returns the per-knowledge-source download timeout in milliseconds.
|
|
23250
|
+
*/
|
|
23251
|
+
getKnowledgeSourceDownloadTimeoutMs() {
|
|
23252
|
+
var _a;
|
|
23253
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceDownloadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS;
|
|
22989
23254
|
}
|
|
22990
|
-
|
|
22991
|
-
|
|
23255
|
+
/**
|
|
23256
|
+
* Returns the max concurrency for knowledge source uploads.
|
|
23257
|
+
*/
|
|
23258
|
+
getKnowledgeSourceUploadMaxConcurrency() {
|
|
23259
|
+
var _a;
|
|
23260
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadMaxConcurrency) !== null && _a !== void 0 ? _a : 5;
|
|
22992
23261
|
}
|
|
22993
|
-
|
|
22994
|
-
|
|
23262
|
+
/**
|
|
23263
|
+
* Returns the polling interval in milliseconds for vector store uploads.
|
|
23264
|
+
*/
|
|
23265
|
+
getKnowledgeSourceUploadPollIntervalMs() {
|
|
23266
|
+
var _a;
|
|
23267
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadPollIntervalMs) !== null && _a !== void 0 ? _a : 5000;
|
|
22995
23268
|
}
|
|
22996
23269
|
/**
|
|
22997
|
-
*
|
|
23270
|
+
* Returns the overall upload timeout in milliseconds for vector store uploads.
|
|
22998
23271
|
*/
|
|
22999
|
-
|
|
23272
|
+
getKnowledgeSourceUploadTimeoutMs() {
|
|
23273
|
+
var _a;
|
|
23274
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS;
|
|
23275
|
+
}
|
|
23276
|
+
/**
|
|
23277
|
+
* Returns true if we should continue even if vector store ingestion stalls.
|
|
23278
|
+
*/
|
|
23279
|
+
shouldContinueOnVectorStoreStall() {
|
|
23280
|
+
var _a;
|
|
23281
|
+
return (_a = this.vectorStoreOptions.shouldContinueOnVectorStoreStall) !== null && _a !== void 0 ? _a : true;
|
|
23282
|
+
}
|
|
23283
|
+
/**
|
|
23284
|
+
* Returns vector-store-specific options with extended settings.
|
|
23285
|
+
*/
|
|
23286
|
+
get vectorStoreOptions() {
|
|
23287
|
+
return this.options;
|
|
23288
|
+
}
|
|
23289
|
+
/**
|
|
23290
|
+
* Returns the OpenAI vector stores API surface, supporting stable and beta SDKs.
|
|
23291
|
+
*/
|
|
23292
|
+
getVectorStoresApi(client) {
|
|
23293
|
+
var _a, _b;
|
|
23294
|
+
const vectorStores = (_a = client.vectorStores) !== null && _a !== void 0 ? _a : (_b = client.beta) === null || _b === void 0 ? void 0 : _b.vectorStores;
|
|
23295
|
+
if (!vectorStores) {
|
|
23296
|
+
throw new Error('OpenAI client does not support vector stores. Please ensure you are using a compatible version of the OpenAI SDK with vector store support.');
|
|
23297
|
+
}
|
|
23298
|
+
return vectorStores;
|
|
23299
|
+
}
|
|
23300
|
+
/**
|
|
23301
|
+
* Downloads a knowledge source URL into a File for vector store upload.
|
|
23302
|
+
*/
|
|
23303
|
+
async downloadKnowledgeSourceFile(options) {
|
|
23304
|
+
var _a;
|
|
23305
|
+
const { source, timeoutMs, logLabel } = options;
|
|
23306
|
+
const startedAtMs = Date.now();
|
|
23307
|
+
const controller = new AbortController();
|
|
23308
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
|
|
23000
23309
|
if (this.options.isVerbose) {
|
|
23001
|
-
console.info('
|
|
23310
|
+
console.info('[🤰]', 'Downloading knowledge source', {
|
|
23311
|
+
source,
|
|
23312
|
+
timeoutMs,
|
|
23313
|
+
logLabel,
|
|
23314
|
+
});
|
|
23002
23315
|
}
|
|
23003
|
-
|
|
23004
|
-
|
|
23005
|
-
|
|
23006
|
-
|
|
23316
|
+
try {
|
|
23317
|
+
const response = await fetch(source, { signal: controller.signal });
|
|
23318
|
+
const contentType = (_a = response.headers.get('content-type')) !== null && _a !== void 0 ? _a : undefined;
|
|
23319
|
+
if (!response.ok) {
|
|
23320
|
+
console.error('[🤰]', 'Failed to download knowledge source', {
|
|
23321
|
+
source,
|
|
23322
|
+
status: response.status,
|
|
23323
|
+
statusText: response.statusText,
|
|
23324
|
+
contentType,
|
|
23325
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
23326
|
+
logLabel,
|
|
23327
|
+
});
|
|
23328
|
+
return null;
|
|
23329
|
+
}
|
|
23330
|
+
const buffer = await response.arrayBuffer();
|
|
23331
|
+
let filename = source.split('/').pop() || 'downloaded-file';
|
|
23332
|
+
try {
|
|
23333
|
+
const url = new URL(source);
|
|
23334
|
+
filename = url.pathname.split('/').pop() || filename;
|
|
23335
|
+
}
|
|
23336
|
+
catch (error) {
|
|
23337
|
+
// Keep default filename
|
|
23338
|
+
}
|
|
23339
|
+
const file = new File([buffer], filename, contentType ? { type: contentType } : undefined);
|
|
23340
|
+
const elapsedMs = Date.now() - startedAtMs;
|
|
23341
|
+
const sizeBytes = buffer.byteLength;
|
|
23342
|
+
if (this.options.isVerbose) {
|
|
23343
|
+
console.info('[🤰]', 'Downloaded knowledge source', {
|
|
23344
|
+
source,
|
|
23345
|
+
filename,
|
|
23346
|
+
sizeBytes,
|
|
23347
|
+
contentType,
|
|
23348
|
+
elapsedMs,
|
|
23349
|
+
logLabel,
|
|
23350
|
+
});
|
|
23351
|
+
}
|
|
23352
|
+
return { file, sizeBytes, filename, elapsedMs };
|
|
23007
23353
|
}
|
|
23008
|
-
|
|
23009
|
-
|
|
23010
|
-
|
|
23011
|
-
|
|
23012
|
-
|
|
23013
|
-
|
|
23014
|
-
|
|
23015
|
-
|
|
23016
|
-
|
|
23017
|
-
role: msg.sender === 'assistant' ? 'assistant' : 'user',
|
|
23018
|
-
content: msg.content,
|
|
23019
|
-
}));
|
|
23020
|
-
input.push(...previousMessages);
|
|
23354
|
+
catch (error) {
|
|
23355
|
+
assertsError(error);
|
|
23356
|
+
console.error('[🤰]', 'Error downloading knowledge source', {
|
|
23357
|
+
source,
|
|
23358
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
23359
|
+
logLabel,
|
|
23360
|
+
error: serializeError(error),
|
|
23361
|
+
});
|
|
23362
|
+
return null;
|
|
23021
23363
|
}
|
|
23022
|
-
|
|
23023
|
-
|
|
23024
|
-
role: 'user',
|
|
23025
|
-
content: rawPromptContent,
|
|
23026
|
-
});
|
|
23027
|
-
// Prepare tools
|
|
23028
|
-
const tools = modelRequirements.tools ? mapToolsToOpenAi(modelRequirements.tools) : undefined;
|
|
23029
|
-
// Add file_search if vector store is present
|
|
23030
|
-
const agentTools = tools ? [...tools] : [];
|
|
23031
|
-
let toolResources = undefined;
|
|
23032
|
-
if (this.vectorStoreId) {
|
|
23033
|
-
agentTools.push({ type: 'file_search' });
|
|
23034
|
-
toolResources = {
|
|
23035
|
-
file_search: {
|
|
23036
|
-
vector_store_ids: [this.vectorStoreId],
|
|
23037
|
-
},
|
|
23038
|
-
};
|
|
23364
|
+
finally {
|
|
23365
|
+
clearTimeout(timeoutId);
|
|
23039
23366
|
}
|
|
23040
|
-
|
|
23041
|
-
|
|
23042
|
-
|
|
23043
|
-
|
|
23044
|
-
|
|
23045
|
-
|
|
23046
|
-
|
|
23047
|
-
|
|
23367
|
+
}
|
|
23368
|
+
/**
|
|
23369
|
+
* Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
|
|
23370
|
+
*/
|
|
23371
|
+
async logVectorStoreFileBatchDiagnostics(options) {
|
|
23372
|
+
var _a, _b, _c, _d, _e;
|
|
23373
|
+
const { client, vectorStoreId, batchId, uploadedFiles, logLabel, reason } = options;
|
|
23374
|
+
if (reason === 'stalled' && !this.options.isVerbose) {
|
|
23375
|
+
return;
|
|
23048
23376
|
}
|
|
23049
|
-
|
|
23050
|
-
|
|
23051
|
-
|
|
23052
|
-
|
|
23053
|
-
|
|
23054
|
-
|
|
23055
|
-
|
|
23056
|
-
|
|
23057
|
-
|
|
23058
|
-
|
|
23059
|
-
|
|
23060
|
-
|
|
23061
|
-
|
|
23377
|
+
if (!batchId.startsWith('vsfb_')) {
|
|
23378
|
+
console.error('[🤰]', 'Vector store file batch diagnostics skipped (invalid batch id)', {
|
|
23379
|
+
vectorStoreId,
|
|
23380
|
+
batchId,
|
|
23381
|
+
reason,
|
|
23382
|
+
logLabel,
|
|
23383
|
+
});
|
|
23384
|
+
return;
|
|
23385
|
+
}
|
|
23386
|
+
const fileIdToMetadata = new Map();
|
|
23387
|
+
for (const file of uploadedFiles) {
|
|
23388
|
+
fileIdToMetadata.set(file.fileId, file);
|
|
23389
|
+
}
|
|
23390
|
+
try {
|
|
23391
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
23392
|
+
const limit = Math.min(100, Math.max(10, uploadedFiles.length));
|
|
23393
|
+
const batchFilesPage = await vectorStores.fileBatches.listFiles(batchId, {
|
|
23394
|
+
vector_store_id: vectorStoreId,
|
|
23395
|
+
limit,
|
|
23396
|
+
});
|
|
23397
|
+
const batchFiles = (_a = batchFilesPage.data) !== null && _a !== void 0 ? _a : [];
|
|
23398
|
+
const statusCounts = {
|
|
23399
|
+
in_progress: 0,
|
|
23400
|
+
completed: 0,
|
|
23401
|
+
failed: 0,
|
|
23402
|
+
cancelled: 0,
|
|
23403
|
+
};
|
|
23404
|
+
const errorSamples = [];
|
|
23405
|
+
const inProgressSamples = [];
|
|
23406
|
+
const batchFileIds = new Set();
|
|
23407
|
+
for (const file of batchFiles) {
|
|
23408
|
+
const status = (_b = file.status) !== null && _b !== void 0 ? _b : 'unknown';
|
|
23409
|
+
statusCounts[status] = ((_c = statusCounts[status]) !== null && _c !== void 0 ? _c : 0) + 1;
|
|
23410
|
+
const vectorStoreFileId = file.id;
|
|
23411
|
+
const uploadedFileId = (_d = file.file_id) !== null && _d !== void 0 ? _d : file.fileId;
|
|
23412
|
+
const fileId = uploadedFileId !== null && uploadedFileId !== void 0 ? uploadedFileId : vectorStoreFileId;
|
|
23413
|
+
batchFileIds.add(fileId);
|
|
23414
|
+
const metadata = fileIdToMetadata.get(fileId);
|
|
23415
|
+
if (status === 'failed') {
|
|
23416
|
+
errorSamples.push({
|
|
23417
|
+
fileId,
|
|
23418
|
+
status,
|
|
23419
|
+
error: (_e = file.last_error) === null || _e === void 0 ? void 0 : _e.message,
|
|
23420
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
23421
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
23422
|
+
});
|
|
23423
|
+
}
|
|
23424
|
+
if (status === 'in_progress') {
|
|
23425
|
+
inProgressSamples.push({
|
|
23426
|
+
fileId,
|
|
23427
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
23428
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
23429
|
+
});
|
|
23430
|
+
}
|
|
23431
|
+
}
|
|
23432
|
+
const missingSamples = uploadedFiles
|
|
23433
|
+
.filter((file) => !batchFileIds.has(file.fileId))
|
|
23434
|
+
.slice(0, 5)
|
|
23435
|
+
.map((file) => ({
|
|
23436
|
+
fileId: file.fileId,
|
|
23437
|
+
filename: file.filename,
|
|
23438
|
+
sizeBytes: file.sizeBytes,
|
|
23439
|
+
}));
|
|
23440
|
+
const vectorStore = await vectorStores.retrieve(vectorStoreId);
|
|
23441
|
+
const logPayload = {
|
|
23442
|
+
vectorStoreId,
|
|
23443
|
+
batchId,
|
|
23444
|
+
reason,
|
|
23445
|
+
vectorStoreStatus: vectorStore.status,
|
|
23446
|
+
vectorStoreFileCounts: vectorStore.file_counts,
|
|
23447
|
+
vectorStoreUsageBytes: vectorStore.usage_bytes,
|
|
23448
|
+
batchFileCount: batchFiles.length,
|
|
23449
|
+
statusCounts,
|
|
23450
|
+
errorSamples: errorSamples.slice(0, 5),
|
|
23451
|
+
inProgressSamples,
|
|
23452
|
+
missingFileCount: uploadedFiles.length - batchFileIds.size,
|
|
23453
|
+
missingSamples,
|
|
23454
|
+
logLabel,
|
|
23455
|
+
};
|
|
23456
|
+
const logFunction = reason === 'stalled' ? console.info : console.error;
|
|
23457
|
+
logFunction('[🤰]', 'Vector store file batch diagnostics', logPayload);
|
|
23458
|
+
}
|
|
23459
|
+
catch (error) {
|
|
23460
|
+
assertsError(error);
|
|
23461
|
+
console.error('[🤰]', 'Vector store file batch diagnostics failed', {
|
|
23462
|
+
vectorStoreId,
|
|
23463
|
+
batchId,
|
|
23464
|
+
reason,
|
|
23465
|
+
logLabel,
|
|
23466
|
+
error: serializeError(error),
|
|
23467
|
+
});
|
|
23468
|
+
}
|
|
23469
|
+
}
|
|
23470
|
+
/**
|
|
23471
|
+
* Uploads knowledge source files to the vector store and polls until processing completes.
|
|
23472
|
+
*/
|
|
23473
|
+
async uploadKnowledgeSourceFilesToVectorStore(options) {
|
|
23474
|
+
var _a, _b, _c, _d, _e, _f;
|
|
23475
|
+
const { client, vectorStoreId, files, totalBytes, logLabel } = options;
|
|
23476
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
23477
|
+
const uploadStartedAtMs = Date.now();
|
|
23478
|
+
const maxConcurrency = Math.max(1, this.getKnowledgeSourceUploadMaxConcurrency());
|
|
23479
|
+
const pollIntervalMs = Math.max(1000, this.getKnowledgeSourceUploadPollIntervalMs());
|
|
23480
|
+
const uploadTimeoutMs = Math.max(1000, this.getKnowledgeSourceUploadTimeoutMs());
|
|
23481
|
+
if (this.options.isVerbose) {
|
|
23482
|
+
console.info('[🤰]', 'Uploading knowledge source files to OpenAI', {
|
|
23483
|
+
vectorStoreId,
|
|
23484
|
+
fileCount: files.length,
|
|
23485
|
+
totalBytes,
|
|
23486
|
+
maxConcurrency,
|
|
23487
|
+
pollIntervalMs,
|
|
23488
|
+
uploadTimeoutMs,
|
|
23489
|
+
logLabel,
|
|
23490
|
+
});
|
|
23491
|
+
}
|
|
23492
|
+
const fileTypeSummary = {};
|
|
23493
|
+
for (const file of files) {
|
|
23494
|
+
const filename = (_a = file.name) !== null && _a !== void 0 ? _a : '';
|
|
23495
|
+
const extension = filename.includes('.')
|
|
23496
|
+
? (_c = (_b = filename.split('.').pop()) === null || _b === void 0 ? void 0 : _b.toLowerCase()) !== null && _c !== void 0 ? _c : 'unknown'
|
|
23497
|
+
: 'unknown';
|
|
23498
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : 0;
|
|
23499
|
+
const summary = (_d = fileTypeSummary[extension]) !== null && _d !== void 0 ? _d : { count: 0, totalBytes: 0 };
|
|
23500
|
+
summary.count += 1;
|
|
23501
|
+
summary.totalBytes += sizeBytes;
|
|
23502
|
+
fileTypeSummary[extension] = summary;
|
|
23062
23503
|
}
|
|
23063
|
-
// Call Responses API
|
|
23064
|
-
// Note: Using any cast because types might not be updated yet
|
|
23065
|
-
const response = await client.responses.create(rawRequest);
|
|
23066
23504
|
if (this.options.isVerbose) {
|
|
23067
|
-
console.info(
|
|
23505
|
+
console.info('[🤰]', 'Knowledge source file summary', {
|
|
23506
|
+
vectorStoreId,
|
|
23507
|
+
fileCount: files.length,
|
|
23508
|
+
totalBytes,
|
|
23509
|
+
fileTypeSummary,
|
|
23510
|
+
logLabel,
|
|
23511
|
+
});
|
|
23068
23512
|
}
|
|
23069
|
-
const
|
|
23070
|
-
|
|
23513
|
+
const fileEntries = files.map((file, index) => ({ file, index }));
|
|
23514
|
+
const fileIterator = fileEntries.values();
|
|
23515
|
+
const fileIds = [];
|
|
23516
|
+
const uploadedFiles = [];
|
|
23517
|
+
const failedUploads = [];
|
|
23518
|
+
let uploadedCount = 0;
|
|
23519
|
+
const processFiles = async (iterator) => {
|
|
23520
|
+
var _a, _b;
|
|
23521
|
+
for (const { file, index } of iterator) {
|
|
23522
|
+
const uploadIndex = index + 1;
|
|
23523
|
+
const filename = file.name || `knowledge-source-${uploadIndex}`;
|
|
23524
|
+
const extension = filename.includes('.')
|
|
23525
|
+
? (_b = (_a = filename.split('.').pop()) === null || _a === void 0 ? void 0 : _a.toLowerCase()) !== null && _b !== void 0 ? _b : 'unknown'
|
|
23526
|
+
: 'unknown';
|
|
23527
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : undefined;
|
|
23528
|
+
const fileUploadStartedAtMs = Date.now();
|
|
23529
|
+
if (this.options.isVerbose) {
|
|
23530
|
+
console.info('[🤰]', 'Uploading knowledge source file', {
|
|
23531
|
+
index: uploadIndex,
|
|
23532
|
+
total: files.length,
|
|
23533
|
+
filename,
|
|
23534
|
+
extension,
|
|
23535
|
+
sizeBytes,
|
|
23536
|
+
logLabel,
|
|
23537
|
+
});
|
|
23538
|
+
}
|
|
23539
|
+
try {
|
|
23540
|
+
const uploaded = await client.files.create({ file, purpose: 'assistants' });
|
|
23541
|
+
fileIds.push(uploaded.id);
|
|
23542
|
+
uploadedFiles.push({ fileId: uploaded.id, filename, sizeBytes });
|
|
23543
|
+
uploadedCount += 1;
|
|
23544
|
+
if (this.options.isVerbose) {
|
|
23545
|
+
console.info('[🤰]', 'Uploaded knowledge source file', {
|
|
23546
|
+
index: uploadIndex,
|
|
23547
|
+
total: files.length,
|
|
23548
|
+
filename,
|
|
23549
|
+
sizeBytes,
|
|
23550
|
+
fileId: uploaded.id,
|
|
23551
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
23552
|
+
logLabel,
|
|
23553
|
+
});
|
|
23554
|
+
}
|
|
23555
|
+
}
|
|
23556
|
+
catch (error) {
|
|
23557
|
+
assertsError(error);
|
|
23558
|
+
const serializedError = serializeError(error);
|
|
23559
|
+
failedUploads.push({ index: uploadIndex, filename, error: serializedError });
|
|
23560
|
+
console.error('[🤰]', 'Failed to upload knowledge source file', {
|
|
23561
|
+
index: uploadIndex,
|
|
23562
|
+
total: files.length,
|
|
23563
|
+
filename,
|
|
23564
|
+
sizeBytes,
|
|
23565
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
23566
|
+
logLabel,
|
|
23567
|
+
error: serializedError,
|
|
23568
|
+
});
|
|
23569
|
+
}
|
|
23570
|
+
}
|
|
23571
|
+
};
|
|
23572
|
+
const workerCount = Math.min(maxConcurrency, files.length);
|
|
23573
|
+
const workers = Array.from({ length: workerCount }, () => processFiles(fileIterator));
|
|
23574
|
+
await Promise.all(workers);
|
|
23575
|
+
if (this.options.isVerbose) {
|
|
23576
|
+
console.info('[🤰]', 'Finished uploading knowledge source files', {
|
|
23577
|
+
vectorStoreId,
|
|
23578
|
+
fileCount: files.length,
|
|
23579
|
+
uploadedCount,
|
|
23580
|
+
failedCount: failedUploads.length,
|
|
23581
|
+
elapsedMs: Date.now() - uploadStartedAtMs,
|
|
23582
|
+
failedSamples: failedUploads.slice(0, 3),
|
|
23583
|
+
logLabel,
|
|
23584
|
+
});
|
|
23585
|
+
}
|
|
23586
|
+
if (fileIds.length === 0) {
|
|
23587
|
+
console.error('[🤰]', 'No knowledge source files were uploaded', {
|
|
23588
|
+
vectorStoreId,
|
|
23589
|
+
fileCount: files.length,
|
|
23590
|
+
failedCount: failedUploads.length,
|
|
23591
|
+
logLabel,
|
|
23592
|
+
});
|
|
23593
|
+
return null;
|
|
23594
|
+
}
|
|
23595
|
+
const batch = await vectorStores.fileBatches.create(vectorStoreId, {
|
|
23596
|
+
file_ids: fileIds,
|
|
23597
|
+
});
|
|
23598
|
+
const expectedBatchId = batch.id;
|
|
23599
|
+
const expectedBatchIdValid = expectedBatchId.startsWith('vsfb_');
|
|
23600
|
+
if (!expectedBatchIdValid) {
|
|
23601
|
+
console.error('[🤰]', 'Vector store file batch id looks invalid', {
|
|
23602
|
+
vectorStoreId,
|
|
23603
|
+
batchId: expectedBatchId,
|
|
23604
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
23605
|
+
logLabel,
|
|
23606
|
+
});
|
|
23607
|
+
}
|
|
23608
|
+
else if (batch.vector_store_id !== vectorStoreId) {
|
|
23609
|
+
console.error('[🤰]', 'Vector store file batch vector store id mismatch', {
|
|
23610
|
+
vectorStoreId,
|
|
23611
|
+
batchId: expectedBatchId,
|
|
23612
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
23613
|
+
logLabel,
|
|
23614
|
+
});
|
|
23615
|
+
}
|
|
23616
|
+
if (this.options.isVerbose) {
|
|
23617
|
+
console.info('[🤰]', 'Created vector store file batch', {
|
|
23618
|
+
vectorStoreId,
|
|
23619
|
+
batchId: expectedBatchId,
|
|
23620
|
+
fileCount: fileIds.length,
|
|
23621
|
+
logLabel,
|
|
23622
|
+
});
|
|
23623
|
+
}
|
|
23624
|
+
const pollStartedAtMs = Date.now();
|
|
23625
|
+
const progressLogIntervalMs = Math.max(VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS, pollIntervalMs);
|
|
23626
|
+
const diagnosticsIntervalMs = Math.max(60000, pollIntervalMs * 5);
|
|
23627
|
+
// let lastStatus: string | undefined;
|
|
23628
|
+
let lastCountsKey = '';
|
|
23629
|
+
let lastProgressKey = '';
|
|
23630
|
+
let lastLogAtMs = 0;
|
|
23631
|
+
let lastProgressAtMs = pollStartedAtMs;
|
|
23632
|
+
let lastDiagnosticsAtMs = pollStartedAtMs;
|
|
23633
|
+
let latestBatch = batch;
|
|
23634
|
+
let loggedBatchIdMismatch = false;
|
|
23635
|
+
let loggedBatchIdFallback = false;
|
|
23636
|
+
let loggedBatchIdInvalid = false;
|
|
23637
|
+
let shouldPoll = true;
|
|
23638
|
+
while (shouldPoll) {
|
|
23639
|
+
const nowMs = Date.now();
|
|
23640
|
+
// [🤰] Note: Sometimes OpenAI returns Vector Store object instead of Batch object, or IDs get swapped.
|
|
23641
|
+
const rawBatchId = typeof latestBatch.id === 'string' ? latestBatch.id : '';
|
|
23642
|
+
const rawVectorStoreId = latestBatch.vector_store_id;
|
|
23643
|
+
let returnedBatchId = rawBatchId;
|
|
23644
|
+
let returnedBatchIdValid = typeof returnedBatchId === 'string' && returnedBatchId.startsWith('vsfb_');
|
|
23645
|
+
if (!returnedBatchIdValid && expectedBatchIdValid) {
|
|
23646
|
+
if (!loggedBatchIdFallback) {
|
|
23647
|
+
console.error('[🤰]', 'Vector store file batch id missing from response; falling back to expected', {
|
|
23648
|
+
vectorStoreId,
|
|
23649
|
+
expectedBatchId,
|
|
23650
|
+
returnedBatchId,
|
|
23651
|
+
rawVectorStoreId,
|
|
23652
|
+
logLabel,
|
|
23653
|
+
});
|
|
23654
|
+
loggedBatchIdFallback = true;
|
|
23655
|
+
}
|
|
23656
|
+
returnedBatchId = expectedBatchId;
|
|
23657
|
+
returnedBatchIdValid = true;
|
|
23658
|
+
}
|
|
23659
|
+
if (!returnedBatchIdValid && !loggedBatchIdInvalid) {
|
|
23660
|
+
console.error('[🤰]', 'Vector store file batch id is invalid; stopping polling', {
|
|
23661
|
+
vectorStoreId,
|
|
23662
|
+
expectedBatchId,
|
|
23663
|
+
returnedBatchId,
|
|
23664
|
+
rawVectorStoreId,
|
|
23665
|
+
logLabel,
|
|
23666
|
+
});
|
|
23667
|
+
loggedBatchIdInvalid = true;
|
|
23668
|
+
}
|
|
23669
|
+
const batchIdMismatch = expectedBatchIdValid && returnedBatchIdValid && returnedBatchId !== expectedBatchId;
|
|
23670
|
+
if (batchIdMismatch && !loggedBatchIdMismatch) {
|
|
23671
|
+
console.error('[🤰]', 'Vector store file batch id mismatch', {
|
|
23672
|
+
vectorStoreId,
|
|
23673
|
+
expectedBatchId,
|
|
23674
|
+
returnedBatchId,
|
|
23675
|
+
logLabel,
|
|
23676
|
+
});
|
|
23677
|
+
loggedBatchIdMismatch = true;
|
|
23678
|
+
}
|
|
23679
|
+
if (returnedBatchIdValid) {
|
|
23680
|
+
latestBatch = await vectorStores.fileBatches.retrieve(returnedBatchId, {
|
|
23681
|
+
vector_store_id: vectorStoreId,
|
|
23682
|
+
});
|
|
23683
|
+
}
|
|
23684
|
+
else {
|
|
23685
|
+
shouldPoll = false;
|
|
23686
|
+
continue;
|
|
23687
|
+
}
|
|
23688
|
+
const status = (_e = latestBatch.status) !== null && _e !== void 0 ? _e : 'unknown';
|
|
23689
|
+
const fileCounts = (_f = latestBatch.file_counts) !== null && _f !== void 0 ? _f : {};
|
|
23690
|
+
const progressKey = JSON.stringify(fileCounts);
|
|
23691
|
+
const statusCountsKey = `${status}-${progressKey}`;
|
|
23692
|
+
const isProgressing = progressKey !== lastProgressKey;
|
|
23693
|
+
if (isProgressing) {
|
|
23694
|
+
lastProgressAtMs = nowMs;
|
|
23695
|
+
lastProgressKey = progressKey;
|
|
23696
|
+
}
|
|
23697
|
+
if (this.options.isVerbose &&
|
|
23698
|
+
(statusCountsKey !== lastCountsKey || nowMs - lastLogAtMs >= progressLogIntervalMs)) {
|
|
23699
|
+
console.info('[🤰]', 'Vector store file batch status', {
|
|
23700
|
+
vectorStoreId,
|
|
23701
|
+
batchId: returnedBatchId,
|
|
23702
|
+
status,
|
|
23703
|
+
fileCounts,
|
|
23704
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
23705
|
+
logLabel,
|
|
23706
|
+
});
|
|
23707
|
+
lastCountsKey = statusCountsKey;
|
|
23708
|
+
lastLogAtMs = nowMs;
|
|
23709
|
+
}
|
|
23710
|
+
if (status === 'in_progress' &&
|
|
23711
|
+
nowMs - lastProgressAtMs >= VECTOR_STORE_STALL_LOG_THRESHOLD_MS &&
|
|
23712
|
+
nowMs - lastDiagnosticsAtMs >= diagnosticsIntervalMs) {
|
|
23713
|
+
lastDiagnosticsAtMs = nowMs;
|
|
23714
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
23715
|
+
client,
|
|
23716
|
+
vectorStoreId,
|
|
23717
|
+
batchId: returnedBatchId,
|
|
23718
|
+
uploadedFiles,
|
|
23719
|
+
logLabel,
|
|
23720
|
+
reason: 'stalled',
|
|
23721
|
+
});
|
|
23722
|
+
}
|
|
23723
|
+
if (status === 'completed') {
|
|
23724
|
+
if (this.options.isVerbose) {
|
|
23725
|
+
console.info('[🤰]', 'Vector store file batch completed', {
|
|
23726
|
+
vectorStoreId,
|
|
23727
|
+
batchId: returnedBatchId,
|
|
23728
|
+
fileCounts,
|
|
23729
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
23730
|
+
logLabel,
|
|
23731
|
+
});
|
|
23732
|
+
}
|
|
23733
|
+
shouldPoll = false;
|
|
23734
|
+
continue;
|
|
23735
|
+
}
|
|
23736
|
+
if (status === 'failed') {
|
|
23737
|
+
console.error('[🤰]', 'Vector store file batch completed with failures', {
|
|
23738
|
+
vectorStoreId,
|
|
23739
|
+
batchId: returnedBatchId,
|
|
23740
|
+
fileCounts,
|
|
23741
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
23742
|
+
logLabel,
|
|
23743
|
+
});
|
|
23744
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
23745
|
+
client,
|
|
23746
|
+
vectorStoreId,
|
|
23747
|
+
batchId: returnedBatchId,
|
|
23748
|
+
uploadedFiles,
|
|
23749
|
+
logLabel,
|
|
23750
|
+
reason: 'failed',
|
|
23751
|
+
});
|
|
23752
|
+
shouldPoll = false;
|
|
23753
|
+
continue;
|
|
23754
|
+
}
|
|
23755
|
+
if (status === 'cancelled') {
|
|
23756
|
+
console.error('[🤰]', 'Vector store file batch did not complete', {
|
|
23757
|
+
vectorStoreId,
|
|
23758
|
+
batchId: returnedBatchId,
|
|
23759
|
+
status,
|
|
23760
|
+
fileCounts,
|
|
23761
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
23762
|
+
logLabel,
|
|
23763
|
+
});
|
|
23764
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
23765
|
+
client,
|
|
23766
|
+
vectorStoreId,
|
|
23767
|
+
batchId: returnedBatchId,
|
|
23768
|
+
uploadedFiles,
|
|
23769
|
+
logLabel,
|
|
23770
|
+
reason: 'failed',
|
|
23771
|
+
});
|
|
23772
|
+
shouldPoll = false;
|
|
23773
|
+
continue;
|
|
23774
|
+
}
|
|
23775
|
+
if (nowMs - pollStartedAtMs >= uploadTimeoutMs) {
|
|
23776
|
+
console.error('[🤰]', 'Timed out waiting for vector store file batch', {
|
|
23777
|
+
vectorStoreId,
|
|
23778
|
+
batchId: returnedBatchId,
|
|
23779
|
+
fileCounts,
|
|
23780
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
23781
|
+
uploadTimeoutMs,
|
|
23782
|
+
logLabel,
|
|
23783
|
+
});
|
|
23784
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
23785
|
+
client,
|
|
23786
|
+
vectorStoreId,
|
|
23787
|
+
batchId: returnedBatchId,
|
|
23788
|
+
uploadedFiles,
|
|
23789
|
+
logLabel,
|
|
23790
|
+
reason: 'timeout',
|
|
23791
|
+
});
|
|
23792
|
+
if (this.shouldContinueOnVectorStoreStall()) {
|
|
23793
|
+
console.warn('[🤰]', 'Continuing despite vector store timeout as requested', {
|
|
23794
|
+
vectorStoreId,
|
|
23795
|
+
logLabel,
|
|
23796
|
+
});
|
|
23797
|
+
shouldPoll = false;
|
|
23798
|
+
continue;
|
|
23799
|
+
}
|
|
23800
|
+
try {
|
|
23801
|
+
const cancelBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
|
|
23802
|
+
if (!cancelBatchId.startsWith('vsfb_')) {
|
|
23803
|
+
console.error('[🤰]', 'Skipping vector store file batch cancel (invalid batch id)', {
|
|
23804
|
+
vectorStoreId,
|
|
23805
|
+
batchId: cancelBatchId,
|
|
23806
|
+
logLabel,
|
|
23807
|
+
});
|
|
23808
|
+
}
|
|
23809
|
+
else {
|
|
23810
|
+
await vectorStores.fileBatches.cancel(cancelBatchId, {
|
|
23811
|
+
vector_store_id: vectorStoreId,
|
|
23812
|
+
});
|
|
23813
|
+
}
|
|
23814
|
+
if (this.options.isVerbose) {
|
|
23815
|
+
console.info('[🤰]', 'Cancelled vector store file batch after timeout', {
|
|
23816
|
+
vectorStoreId,
|
|
23817
|
+
batchId: batchIdMismatch && returnedBatchId.startsWith('vsfb_')
|
|
23818
|
+
? returnedBatchId
|
|
23819
|
+
: expectedBatchId,
|
|
23820
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
23821
|
+
logLabel,
|
|
23822
|
+
});
|
|
23823
|
+
}
|
|
23824
|
+
}
|
|
23825
|
+
catch (error) {
|
|
23826
|
+
assertsError(error);
|
|
23827
|
+
console.error('[🤰]', 'Failed to cancel vector store file batch after timeout', {
|
|
23828
|
+
vectorStoreId,
|
|
23829
|
+
batchId: expectedBatchId,
|
|
23830
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
23831
|
+
logLabel,
|
|
23832
|
+
error: serializeError(error),
|
|
23833
|
+
});
|
|
23834
|
+
}
|
|
23835
|
+
shouldPoll = false;
|
|
23836
|
+
continue;
|
|
23837
|
+
}
|
|
23838
|
+
await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
|
|
23839
|
+
}
|
|
23840
|
+
return latestBatch;
|
|
23841
|
+
}
|
|
23842
|
+
/**
|
|
23843
|
+
* Creates a vector store and uploads knowledge sources, returning its ID.
|
|
23844
|
+
*/
|
|
23845
|
+
async createVectorStoreWithKnowledgeSources(options) {
|
|
23846
|
+
const { client, name, knowledgeSources, logLabel } = options;
|
|
23847
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
23848
|
+
const knowledgeSourcesCount = knowledgeSources.length;
|
|
23849
|
+
const downloadTimeoutMs = this.getKnowledgeSourceDownloadTimeoutMs();
|
|
23850
|
+
if (this.options.isVerbose) {
|
|
23851
|
+
console.info('[🤰]', 'Creating vector store with knowledge sources', {
|
|
23852
|
+
name,
|
|
23853
|
+
knowledgeSourcesCount,
|
|
23854
|
+
downloadTimeoutMs,
|
|
23855
|
+
logLabel,
|
|
23856
|
+
});
|
|
23857
|
+
}
|
|
23858
|
+
const vectorStore = await vectorStores.create({
|
|
23859
|
+
name: `${name} Knowledge Base`,
|
|
23860
|
+
});
|
|
23861
|
+
const vectorStoreId = vectorStore.id;
|
|
23862
|
+
if (this.options.isVerbose) {
|
|
23863
|
+
console.info('[🤰]', 'Vector store created', {
|
|
23864
|
+
vectorStoreId,
|
|
23865
|
+
logLabel,
|
|
23866
|
+
});
|
|
23867
|
+
}
|
|
23868
|
+
const fileStreams = [];
|
|
23869
|
+
const skippedSources = [];
|
|
23870
|
+
let totalBytes = 0;
|
|
23871
|
+
const processingStartedAtMs = Date.now();
|
|
23872
|
+
for (const [index, source] of knowledgeSources.entries()) {
|
|
23873
|
+
try {
|
|
23874
|
+
const isDataUrl = isDataUrlKnowledgeSource(source);
|
|
23875
|
+
const isHttp = source.startsWith('http://') || source.startsWith('https://');
|
|
23876
|
+
const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
|
|
23877
|
+
if (this.options.isVerbose) {
|
|
23878
|
+
console.info('[🤰]', 'Processing knowledge source', {
|
|
23879
|
+
index: index + 1,
|
|
23880
|
+
total: knowledgeSourcesCount,
|
|
23881
|
+
source,
|
|
23882
|
+
sourceType,
|
|
23883
|
+
logLabel,
|
|
23884
|
+
});
|
|
23885
|
+
}
|
|
23886
|
+
if (isDataUrl) {
|
|
23887
|
+
const parsed = parseDataUrlKnowledgeSource(source);
|
|
23888
|
+
if (!parsed) {
|
|
23889
|
+
skippedSources.push({ source, reason: 'invalid_data_url' });
|
|
23890
|
+
if (this.options.isVerbose) {
|
|
23891
|
+
console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
|
|
23892
|
+
source,
|
|
23893
|
+
sourceType,
|
|
23894
|
+
logLabel,
|
|
23895
|
+
});
|
|
23896
|
+
}
|
|
23897
|
+
continue;
|
|
23898
|
+
}
|
|
23899
|
+
const dataUrlFile = new File([parsed.buffer], parsed.filename, {
|
|
23900
|
+
type: parsed.mimeType,
|
|
23901
|
+
});
|
|
23902
|
+
fileStreams.push(dataUrlFile);
|
|
23903
|
+
totalBytes += parsed.buffer.length;
|
|
23904
|
+
continue;
|
|
23905
|
+
}
|
|
23906
|
+
if (isHttp) {
|
|
23907
|
+
const downloadResult = await this.downloadKnowledgeSourceFile({
|
|
23908
|
+
source,
|
|
23909
|
+
timeoutMs: downloadTimeoutMs,
|
|
23910
|
+
logLabel,
|
|
23911
|
+
});
|
|
23912
|
+
if (downloadResult) {
|
|
23913
|
+
fileStreams.push(downloadResult.file);
|
|
23914
|
+
totalBytes += downloadResult.sizeBytes;
|
|
23915
|
+
}
|
|
23916
|
+
else {
|
|
23917
|
+
skippedSources.push({ source, reason: 'download_failed' });
|
|
23918
|
+
}
|
|
23919
|
+
}
|
|
23920
|
+
else {
|
|
23921
|
+
skippedSources.push({ source, reason: 'unsupported_source_type' });
|
|
23922
|
+
if (this.options.isVerbose) {
|
|
23923
|
+
console.info('[🤰]', 'Skipping knowledge source (unsupported type)', {
|
|
23924
|
+
source,
|
|
23925
|
+
sourceType,
|
|
23926
|
+
logLabel,
|
|
23927
|
+
});
|
|
23928
|
+
}
|
|
23929
|
+
/*
|
|
23930
|
+
TODO: [🤰] Resolve problem with browser environment
|
|
23931
|
+
// Assume it's a local file path
|
|
23932
|
+
// Note: This will work in Node.js environment
|
|
23933
|
+
// For browser environments, this would need different handling
|
|
23934
|
+
const fs = await import('fs');
|
|
23935
|
+
const fileStream = fs.createReadStream(source);
|
|
23936
|
+
fileStreams.push(fileStream);
|
|
23937
|
+
*/
|
|
23938
|
+
}
|
|
23939
|
+
}
|
|
23940
|
+
catch (error) {
|
|
23941
|
+
assertsError(error);
|
|
23942
|
+
skippedSources.push({ source, reason: 'processing_error' });
|
|
23943
|
+
console.error('[🤰]', 'Error processing knowledge source', {
|
|
23944
|
+
source,
|
|
23945
|
+
logLabel,
|
|
23946
|
+
error: serializeError(error),
|
|
23947
|
+
});
|
|
23948
|
+
}
|
|
23949
|
+
}
|
|
23950
|
+
if (this.options.isVerbose) {
|
|
23951
|
+
console.info('[🤰]', 'Finished processing knowledge sources', {
|
|
23952
|
+
total: knowledgeSourcesCount,
|
|
23953
|
+
downloadedCount: fileStreams.length,
|
|
23954
|
+
skippedCount: skippedSources.length,
|
|
23955
|
+
totalBytes,
|
|
23956
|
+
elapsedMs: Date.now() - processingStartedAtMs,
|
|
23957
|
+
skippedSamples: skippedSources.slice(0, 3),
|
|
23958
|
+
logLabel,
|
|
23959
|
+
});
|
|
23960
|
+
}
|
|
23961
|
+
if (fileStreams.length > 0) {
|
|
23962
|
+
if (this.options.isVerbose) {
|
|
23963
|
+
console.info('[🤰]', 'Uploading files to vector store', {
|
|
23964
|
+
vectorStoreId,
|
|
23965
|
+
fileCount: fileStreams.length,
|
|
23966
|
+
totalBytes,
|
|
23967
|
+
maxConcurrency: this.getKnowledgeSourceUploadMaxConcurrency(),
|
|
23968
|
+
pollIntervalMs: this.getKnowledgeSourceUploadPollIntervalMs(),
|
|
23969
|
+
uploadTimeoutMs: this.getKnowledgeSourceUploadTimeoutMs(),
|
|
23970
|
+
logLabel,
|
|
23971
|
+
});
|
|
23972
|
+
}
|
|
23973
|
+
try {
|
|
23974
|
+
await this.uploadKnowledgeSourceFilesToVectorStore({
|
|
23975
|
+
client,
|
|
23976
|
+
vectorStoreId,
|
|
23977
|
+
files: fileStreams,
|
|
23978
|
+
totalBytes,
|
|
23979
|
+
logLabel,
|
|
23980
|
+
});
|
|
23981
|
+
}
|
|
23982
|
+
catch (error) {
|
|
23983
|
+
assertsError(error);
|
|
23984
|
+
console.error('[🤰]', 'Error uploading files to vector store', {
|
|
23985
|
+
vectorStoreId,
|
|
23986
|
+
logLabel,
|
|
23987
|
+
error: serializeError(error),
|
|
23988
|
+
});
|
|
23989
|
+
}
|
|
23990
|
+
}
|
|
23991
|
+
else if (this.options.isVerbose) {
|
|
23992
|
+
console.info('[🤰]', 'No knowledge source files to upload', {
|
|
23993
|
+
vectorStoreId,
|
|
23994
|
+
skippedCount: skippedSources.length,
|
|
23995
|
+
logLabel,
|
|
23996
|
+
});
|
|
23997
|
+
}
|
|
23998
|
+
return {
|
|
23999
|
+
vectorStoreId,
|
|
24000
|
+
uploadedFileCount: fileStreams.length,
|
|
24001
|
+
skippedCount: skippedSources.length,
|
|
24002
|
+
totalBytes,
|
|
24003
|
+
};
|
|
24004
|
+
}
|
|
24005
|
+
}
|
|
24006
|
+
|
|
24007
|
+
const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
|
|
24008
|
+
const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
|
|
24009
|
+
/*
|
|
24010
|
+
TODO: Use or remove
|
|
24011
|
+
const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
|
|
24012
|
+
type: 'object',
|
|
24013
|
+
properties: {},
|
|
24014
|
+
required: [],
|
|
24015
|
+
additionalProperties: true,
|
|
24016
|
+
};
|
|
24017
|
+
*/
|
|
24018
|
+
function buildJsonSchemaDefinition(jsonSchema) {
|
|
24019
|
+
var _a, _b, _c;
|
|
24020
|
+
const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
|
|
24021
|
+
return {
|
|
24022
|
+
type: 'json_schema',
|
|
24023
|
+
name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
|
|
24024
|
+
strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
|
|
24025
|
+
schema: {
|
|
24026
|
+
type: 'object',
|
|
24027
|
+
properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
|
|
24028
|
+
required: Array.isArray(schema.required) ? schema.required : [],
|
|
24029
|
+
additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
|
|
24030
|
+
description: schema.description,
|
|
24031
|
+
},
|
|
24032
|
+
};
|
|
24033
|
+
}
|
|
24034
|
+
/**
|
|
24035
|
+
* Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
|
|
24036
|
+
* structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
|
|
24037
|
+
*
|
|
24038
|
+
* @param responseFormat - The OpenAI `response_format` payload from the user request.
|
|
24039
|
+
* @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
|
|
24040
|
+
* @private utility of Open AI
|
|
24041
|
+
*/
|
|
24042
|
+
function mapResponseFormatToAgentOutputType(responseFormat) {
|
|
24043
|
+
if (!responseFormat) {
|
|
24044
|
+
return undefined;
|
|
24045
|
+
}
|
|
24046
|
+
if (typeof responseFormat === 'string') {
|
|
24047
|
+
if (responseFormat === 'text') {
|
|
24048
|
+
return 'text';
|
|
24049
|
+
}
|
|
24050
|
+
if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
|
|
24051
|
+
return buildJsonSchemaDefinition();
|
|
24052
|
+
}
|
|
24053
|
+
return 'text';
|
|
24054
|
+
}
|
|
24055
|
+
switch (responseFormat.type) {
|
|
24056
|
+
case 'text':
|
|
24057
|
+
return 'text';
|
|
24058
|
+
case 'json_schema':
|
|
24059
|
+
return buildJsonSchemaDefinition(responseFormat.json_schema);
|
|
24060
|
+
case 'json_object':
|
|
24061
|
+
return buildJsonSchemaDefinition();
|
|
24062
|
+
default:
|
|
24063
|
+
return undefined;
|
|
24064
|
+
}
|
|
24065
|
+
}
|
|
24066
|
+
/**
|
|
24067
|
+
* Execution tools for OpenAI AgentKit (Agents SDK).
|
|
24068
|
+
*
|
|
24069
|
+
* @public exported from `@promptbook/openai`
|
|
24070
|
+
*/
|
|
24071
|
+
class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
|
|
24072
|
+
/**
|
|
24073
|
+
* Creates OpenAI AgentKit execution tools.
|
|
24074
|
+
*/
|
|
24075
|
+
constructor(options) {
|
|
24076
|
+
var _a;
|
|
24077
|
+
if (options.isProxied) {
|
|
24078
|
+
throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI AgentKit`);
|
|
24079
|
+
}
|
|
24080
|
+
super(options);
|
|
24081
|
+
this.preparedAgentKitAgent = null;
|
|
24082
|
+
this.agentKitModelName = (_a = options.agentKitModelName) !== null && _a !== void 0 ? _a : DEFAULT_AGENT_KIT_MODEL_NAME;
|
|
24083
|
+
}
|
|
24084
|
+
get title() {
|
|
24085
|
+
return 'OpenAI AgentKit';
|
|
24086
|
+
}
|
|
24087
|
+
get description() {
|
|
24088
|
+
return 'Use OpenAI AgentKit for agent-style chat with tools and knowledge';
|
|
24089
|
+
}
|
|
24090
|
+
/**
|
|
24091
|
+
* Calls OpenAI AgentKit with a chat prompt (non-streaming).
|
|
24092
|
+
*/
|
|
24093
|
+
async callChatModel(prompt) {
|
|
24094
|
+
return this.callChatModelStream(prompt, () => { });
|
|
24095
|
+
}
|
|
24096
|
+
/**
|
|
24097
|
+
* Calls OpenAI AgentKit with a chat prompt (streaming).
|
|
24098
|
+
*/
|
|
24099
|
+
async callChatModelStream(prompt, onProgress) {
|
|
24100
|
+
const { content, parameters, modelRequirements } = prompt;
|
|
24101
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
24102
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
24103
|
+
}
|
|
24104
|
+
for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
|
|
24105
|
+
if (modelRequirements[key] !== undefined) {
|
|
24106
|
+
throw new NotYetImplementedError(`In \`OpenAiAgentKitExecutionTools\` you cannot specify \`${key}\``);
|
|
24107
|
+
}
|
|
24108
|
+
}
|
|
24109
|
+
const rawPromptContent = templateParameters(content, {
|
|
24110
|
+
...parameters,
|
|
24111
|
+
modelName: this.agentKitModelName,
|
|
24112
|
+
});
|
|
24113
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
|
|
24114
|
+
const preparedAgentKitAgent = await this.prepareAgentKitAgent({
|
|
24115
|
+
name: (prompt.title || 'Agent'),
|
|
24116
|
+
instructions: modelRequirements.systemMessage || '',
|
|
24117
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
24118
|
+
tools: 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : modelRequirements.tools,
|
|
24119
|
+
});
|
|
24120
|
+
return this.callChatModelStreamWithPreparedAgent({
|
|
24121
|
+
openAiAgentKitAgent: preparedAgentKitAgent.agent,
|
|
24122
|
+
prompt,
|
|
24123
|
+
rawPromptContent,
|
|
24124
|
+
onProgress,
|
|
24125
|
+
responseFormatOutputType,
|
|
24126
|
+
});
|
|
24127
|
+
}
|
|
24128
|
+
/**
|
|
24129
|
+
* Returns a prepared AgentKit agent when the server wants to manage caching externally.
|
|
24130
|
+
*/
|
|
24131
|
+
getPreparedAgentKitAgent() {
|
|
24132
|
+
return this.preparedAgentKitAgent;
|
|
24133
|
+
}
|
|
24134
|
+
/**
|
|
24135
|
+
* Stores a prepared AgentKit agent for later reuse by external cache managers.
|
|
24136
|
+
*/
|
|
24137
|
+
setPreparedAgentKitAgent(preparedAgent) {
|
|
24138
|
+
this.preparedAgentKitAgent = preparedAgent;
|
|
24139
|
+
}
|
|
24140
|
+
/**
|
|
24141
|
+
* Creates a new tools instance bound to a prepared AgentKit agent.
|
|
24142
|
+
*/
|
|
24143
|
+
getPreparedAgentTools(preparedAgent) {
|
|
24144
|
+
const tools = new OpenAiAgentKitExecutionTools(this.agentKitOptions);
|
|
24145
|
+
tools.setPreparedAgentKitAgent(preparedAgent);
|
|
24146
|
+
return tools;
|
|
24147
|
+
}
|
|
24148
|
+
/**
|
|
24149
|
+
* Prepares an AgentKit agent with optional knowledge sources and tool definitions.
|
|
24150
|
+
*/
|
|
24151
|
+
async prepareAgentKitAgent(options) {
|
|
24152
|
+
var _a, _b;
|
|
24153
|
+
const { name, instructions, knowledgeSources, tools, vectorStoreId: cachedVectorStoreId, storeAsPrepared, } = options;
|
|
24154
|
+
await this.ensureAgentKitDefaults();
|
|
24155
|
+
if (this.options.isVerbose) {
|
|
24156
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
24157
|
+
name,
|
|
24158
|
+
instructionsLength: instructions.length,
|
|
24159
|
+
knowledgeSourcesCount: (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0,
|
|
24160
|
+
toolsCount: (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0,
|
|
24161
|
+
});
|
|
24162
|
+
}
|
|
24163
|
+
let vectorStoreId = cachedVectorStoreId;
|
|
24164
|
+
if (!vectorStoreId && knowledgeSources && knowledgeSources.length > 0) {
|
|
24165
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
24166
|
+
client: await this.getClient(),
|
|
24167
|
+
name,
|
|
24168
|
+
knowledgeSources,
|
|
24169
|
+
logLabel: 'agentkit preparation',
|
|
24170
|
+
});
|
|
24171
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
24172
|
+
}
|
|
24173
|
+
else if (vectorStoreId && this.options.isVerbose) {
|
|
24174
|
+
console.info('[🤰]', 'Using cached vector store for AgentKit agent', {
|
|
24175
|
+
name,
|
|
24176
|
+
vectorStoreId,
|
|
24177
|
+
});
|
|
24178
|
+
}
|
|
24179
|
+
const agentKitTools = this.buildAgentKitTools({ tools, vectorStoreId });
|
|
24180
|
+
const openAiAgentKitAgent = new Agent$1({
|
|
24181
|
+
name,
|
|
24182
|
+
model: this.agentKitModelName,
|
|
24183
|
+
instructions: instructions || 'You are a helpful assistant.',
|
|
24184
|
+
tools: agentKitTools,
|
|
24185
|
+
});
|
|
24186
|
+
const preparedAgent = {
|
|
24187
|
+
agent: openAiAgentKitAgent,
|
|
24188
|
+
vectorStoreId,
|
|
24189
|
+
};
|
|
24190
|
+
if (storeAsPrepared) {
|
|
24191
|
+
this.setPreparedAgentKitAgent(preparedAgent);
|
|
24192
|
+
}
|
|
24193
|
+
if (this.options.isVerbose) {
|
|
24194
|
+
console.info('[🤰]', 'OpenAI AgentKit agent ready', {
|
|
24195
|
+
name,
|
|
24196
|
+
model: this.agentKitModelName,
|
|
24197
|
+
toolCount: agentKitTools.length,
|
|
24198
|
+
hasVectorStore: Boolean(vectorStoreId),
|
|
24199
|
+
});
|
|
24200
|
+
}
|
|
24201
|
+
return preparedAgent;
|
|
24202
|
+
}
|
|
24203
|
+
/**
|
|
24204
|
+
* Ensures the AgentKit SDK is wired to the OpenAI client and API key.
|
|
24205
|
+
*/
|
|
24206
|
+
async ensureAgentKitDefaults() {
|
|
24207
|
+
const client = await this.getClient();
|
|
24208
|
+
setDefaultOpenAIClient(client);
|
|
24209
|
+
const apiKey = this.agentKitOptions.apiKey;
|
|
24210
|
+
if (apiKey && typeof apiKey === 'string') {
|
|
24211
|
+
setDefaultOpenAIKey(apiKey);
|
|
24212
|
+
}
|
|
24213
|
+
}
|
|
24214
|
+
/**
|
|
24215
|
+
* Builds the tool list for AgentKit, including hosted file search when applicable.
|
|
24216
|
+
*/
|
|
24217
|
+
buildAgentKitTools(options) {
|
|
24218
|
+
var _a;
|
|
24219
|
+
const { tools, vectorStoreId } = options;
|
|
24220
|
+
const agentKitTools = [];
|
|
24221
|
+
if (vectorStoreId) {
|
|
24222
|
+
agentKitTools.push(fileSearchTool(vectorStoreId));
|
|
24223
|
+
}
|
|
24224
|
+
if (tools && tools.length > 0) {
|
|
24225
|
+
const scriptTools = this.resolveScriptTools();
|
|
24226
|
+
for (const toolDefinition of tools) {
|
|
24227
|
+
agentKitTools.push(tool({
|
|
24228
|
+
name: toolDefinition.name,
|
|
24229
|
+
description: toolDefinition.description,
|
|
24230
|
+
parameters: toolDefinition.parameters
|
|
24231
|
+
? {
|
|
24232
|
+
...toolDefinition.parameters,
|
|
24233
|
+
additionalProperties: false,
|
|
24234
|
+
required: (_a = toolDefinition.parameters.required) !== null && _a !== void 0 ? _a : [],
|
|
24235
|
+
}
|
|
24236
|
+
: undefined,
|
|
24237
|
+
strict: false,
|
|
24238
|
+
execute: async (input, runContext, details) => {
|
|
24239
|
+
var _a, _b, _c;
|
|
24240
|
+
const scriptTool = scriptTools[0];
|
|
24241
|
+
const functionName = toolDefinition.name;
|
|
24242
|
+
const calledAt = $getCurrentDate();
|
|
24243
|
+
const callId = (_a = details === null || details === void 0 ? void 0 : details.toolCall) === null || _a === void 0 ? void 0 : _a.callId;
|
|
24244
|
+
const functionArgs = input !== null && input !== void 0 ? input : {};
|
|
24245
|
+
if (this.options.isVerbose) {
|
|
24246
|
+
console.info('[🤰]', 'Executing AgentKit tool', {
|
|
24247
|
+
functionName,
|
|
24248
|
+
callId,
|
|
24249
|
+
calledAt,
|
|
24250
|
+
});
|
|
24251
|
+
}
|
|
24252
|
+
try {
|
|
24253
|
+
return await scriptTool.execute({
|
|
24254
|
+
scriptLanguage: 'javascript',
|
|
24255
|
+
script: `
|
|
24256
|
+
const args = ${JSON.stringify(functionArgs)};
|
|
24257
|
+
return await ${functionName}(args);
|
|
24258
|
+
`,
|
|
24259
|
+
parameters: (_c = (_b = runContext === null || runContext === void 0 ? void 0 : runContext.context) === null || _b === void 0 ? void 0 : _b.parameters) !== null && _c !== void 0 ? _c : {},
|
|
24260
|
+
});
|
|
24261
|
+
}
|
|
24262
|
+
catch (error) {
|
|
24263
|
+
assertsError(error);
|
|
24264
|
+
const serializedError = serializeError(error);
|
|
24265
|
+
const errorMessage = spaceTrim$2((block) => `
|
|
24266
|
+
|
|
24267
|
+
The invoked tool \`${functionName}\` failed with error:
|
|
24268
|
+
|
|
24269
|
+
\`\`\`json
|
|
24270
|
+
${block(JSON.stringify(serializedError, null, 4))}
|
|
24271
|
+
\`\`\`
|
|
24272
|
+
|
|
24273
|
+
`);
|
|
24274
|
+
console.error('[🤰]', 'AgentKit tool execution failed', {
|
|
24275
|
+
functionName,
|
|
24276
|
+
callId,
|
|
24277
|
+
error: serializedError,
|
|
24278
|
+
});
|
|
24279
|
+
return errorMessage;
|
|
24280
|
+
}
|
|
24281
|
+
},
|
|
24282
|
+
}));
|
|
24283
|
+
}
|
|
24284
|
+
}
|
|
24285
|
+
return agentKitTools;
|
|
24286
|
+
}
|
|
24287
|
+
/**
|
|
24288
|
+
* Resolves the configured script tools for tool execution.
|
|
24289
|
+
*/
|
|
24290
|
+
resolveScriptTools() {
|
|
24291
|
+
const executionTools = this.options.executionTools;
|
|
24292
|
+
if (!executionTools || !executionTools.script) {
|
|
24293
|
+
throw new PipelineExecutionError(`Model requested tools but no executionTools.script were provided in OpenAiAgentKitExecutionTools options`);
|
|
24294
|
+
}
|
|
24295
|
+
return Array.isArray(executionTools.script) ? executionTools.script : [executionTools.script];
|
|
24296
|
+
}
|
|
24297
|
+
/**
|
|
24298
|
+
* Runs a prepared AgentKit agent and streams results back to the caller.
|
|
24299
|
+
*/
|
|
24300
|
+
async callChatModelStreamWithPreparedAgent(options) {
|
|
24301
|
+
var _a, _b, _c, _d;
|
|
24302
|
+
const { openAiAgentKitAgent, prompt, onProgress } = options;
|
|
24303
|
+
const rawPromptContent = (_a = options.rawPromptContent) !== null && _a !== void 0 ? _a : templateParameters(prompt.content, {
|
|
24304
|
+
...prompt.parameters,
|
|
24305
|
+
modelName: this.agentKitModelName,
|
|
24306
|
+
});
|
|
24307
|
+
const agentForRun = options.responseFormatOutputType !== undefined
|
|
24308
|
+
? openAiAgentKitAgent.clone({
|
|
24309
|
+
outputType: options.responseFormatOutputType,
|
|
24310
|
+
})
|
|
24311
|
+
: openAiAgentKitAgent;
|
|
24312
|
+
const start = $getCurrentDate();
|
|
24313
|
+
let latestContent = '';
|
|
23071
24314
|
const toolCalls = [];
|
|
23072
|
-
|
|
23073
|
-
|
|
23074
|
-
|
|
23075
|
-
|
|
23076
|
-
|
|
23077
|
-
|
|
23078
|
-
|
|
23079
|
-
|
|
23080
|
-
|
|
23081
|
-
|
|
23082
|
-
|
|
23083
|
-
|
|
24315
|
+
const toolCallIndexById = new Map();
|
|
24316
|
+
const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
|
|
24317
|
+
const rawRequest = {
|
|
24318
|
+
agentName: agentForRun.name,
|
|
24319
|
+
input: inputItems,
|
|
24320
|
+
};
|
|
24321
|
+
const streamResult = await run(agentForRun, inputItems, {
|
|
24322
|
+
stream: true,
|
|
24323
|
+
context: { parameters: prompt.parameters },
|
|
24324
|
+
});
|
|
24325
|
+
for await (const event of streamResult) {
|
|
24326
|
+
if (event.type === 'raw_model_stream_event' && ((_b = event.data) === null || _b === void 0 ? void 0 : _b.type) === 'output_text_delta') {
|
|
24327
|
+
latestContent += event.data.delta;
|
|
24328
|
+
onProgress({
|
|
24329
|
+
content: latestContent,
|
|
24330
|
+
modelName: this.agentKitModelName,
|
|
24331
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24332
|
+
usage: UNCERTAIN_USAGE,
|
|
24333
|
+
rawPromptContent: rawPromptContent,
|
|
24334
|
+
rawRequest: null,
|
|
24335
|
+
rawResponse: {},
|
|
24336
|
+
});
|
|
24337
|
+
continue;
|
|
24338
|
+
}
|
|
24339
|
+
if (event.type === 'run_item_stream_event') {
|
|
24340
|
+
const rawItem = (_c = event.item) === null || _c === void 0 ? void 0 : _c.rawItem;
|
|
24341
|
+
if (event.name === 'tool_called' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call') {
|
|
24342
|
+
const toolCall = {
|
|
24343
|
+
name: rawItem.name,
|
|
24344
|
+
arguments: rawItem.arguments,
|
|
24345
|
+
rawToolCall: rawItem,
|
|
24346
|
+
createdAt: $getCurrentDate(),
|
|
24347
|
+
};
|
|
24348
|
+
toolCallIndexById.set(rawItem.callId, toolCalls.length);
|
|
24349
|
+
toolCalls.push(toolCall);
|
|
24350
|
+
onProgress({
|
|
24351
|
+
content: latestContent,
|
|
24352
|
+
modelName: this.agentKitModelName,
|
|
24353
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24354
|
+
usage: UNCERTAIN_USAGE,
|
|
24355
|
+
rawPromptContent: rawPromptContent,
|
|
24356
|
+
rawRequest: null,
|
|
24357
|
+
rawResponse: {},
|
|
24358
|
+
toolCalls: [toolCall],
|
|
24359
|
+
});
|
|
24360
|
+
}
|
|
24361
|
+
if (event.name === 'tool_output' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call_result') {
|
|
24362
|
+
const index = toolCallIndexById.get(rawItem.callId);
|
|
24363
|
+
const result = this.formatAgentKitToolOutput(rawItem.output);
|
|
24364
|
+
if (index !== undefined) {
|
|
24365
|
+
const existingToolCall = toolCalls[index];
|
|
24366
|
+
const completedToolCall = {
|
|
24367
|
+
...existingToolCall,
|
|
24368
|
+
result,
|
|
24369
|
+
rawToolCall: rawItem,
|
|
24370
|
+
};
|
|
24371
|
+
toolCalls[index] = completedToolCall;
|
|
24372
|
+
onProgress({
|
|
24373
|
+
content: latestContent,
|
|
24374
|
+
modelName: this.agentKitModelName,
|
|
24375
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24376
|
+
usage: UNCERTAIN_USAGE,
|
|
24377
|
+
rawPromptContent: rawPromptContent,
|
|
24378
|
+
rawRequest: null,
|
|
24379
|
+
rawResponse: {},
|
|
24380
|
+
toolCalls: [completedToolCall],
|
|
24381
|
+
});
|
|
23084
24382
|
}
|
|
23085
24383
|
}
|
|
23086
|
-
else if (item.type === 'function_call') ;
|
|
23087
24384
|
}
|
|
23088
24385
|
}
|
|
23089
|
-
|
|
23090
|
-
|
|
23091
|
-
|
|
23092
|
-
|
|
23093
|
-
|
|
23094
|
-
|
|
23095
|
-
content: resultContent,
|
|
23096
|
-
modelName: response.model || 'agent',
|
|
24386
|
+
await streamResult.completed;
|
|
24387
|
+
const complete = $getCurrentDate();
|
|
24388
|
+
const finalContent = ((_d = streamResult.finalOutput) !== null && _d !== void 0 ? _d : latestContent);
|
|
24389
|
+
const finalResult = {
|
|
24390
|
+
content: finalContent,
|
|
24391
|
+
modelName: this.agentKitModelName,
|
|
23097
24392
|
timing: { start, complete },
|
|
23098
24393
|
usage: UNCERTAIN_USAGE,
|
|
23099
|
-
rawPromptContent,
|
|
24394
|
+
rawPromptContent: rawPromptContent,
|
|
23100
24395
|
rawRequest,
|
|
23101
|
-
rawResponse:
|
|
23102
|
-
|
|
23103
|
-
|
|
23104
|
-
|
|
23105
|
-
|
|
23106
|
-
order: [],
|
|
23107
|
-
value: {
|
|
23108
|
-
content: resultContent,
|
|
23109
|
-
modelName: response.model || 'agent',
|
|
23110
|
-
timing: { start, complete },
|
|
23111
|
-
usage: UNCERTAIN_USAGE,
|
|
23112
|
-
rawPromptContent,
|
|
23113
|
-
rawRequest,
|
|
23114
|
-
rawResponse: response,
|
|
23115
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
23116
|
-
},
|
|
23117
|
-
});
|
|
24396
|
+
rawResponse: { runResult: streamResult },
|
|
24397
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
24398
|
+
};
|
|
24399
|
+
onProgress(finalResult);
|
|
24400
|
+
return finalResult;
|
|
23118
24401
|
}
|
|
23119
24402
|
/**
|
|
23120
|
-
*
|
|
24403
|
+
* Builds AgentKit input items from the prompt and optional thread.
|
|
23121
24404
|
*/
|
|
23122
|
-
|
|
23123
|
-
|
|
23124
|
-
const
|
|
23125
|
-
|
|
23126
|
-
|
|
23127
|
-
|
|
23128
|
-
|
|
23129
|
-
|
|
23130
|
-
|
|
23131
|
-
|
|
23132
|
-
|
|
23133
|
-
|
|
23134
|
-
|
|
23135
|
-
const response = await fetch(source);
|
|
23136
|
-
if (!response.ok) {
|
|
23137
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
23138
|
-
continue;
|
|
23139
|
-
}
|
|
23140
|
-
const buffer = await response.arrayBuffer();
|
|
23141
|
-
const filename = source.split('/').pop() || 'downloaded-file';
|
|
23142
|
-
const blob = new Blob([buffer]);
|
|
23143
|
-
const file = new File([blob], filename);
|
|
23144
|
-
fileStreams.push(file);
|
|
24405
|
+
async buildAgentKitInputItems(prompt, rawPromptContent) {
|
|
24406
|
+
var _a;
|
|
24407
|
+
const inputItems = [];
|
|
24408
|
+
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
|
24409
|
+
for (const message of prompt.thread) {
|
|
24410
|
+
const sender = message.sender;
|
|
24411
|
+
const content = (_a = message.content) !== null && _a !== void 0 ? _a : '';
|
|
24412
|
+
if (sender === 'assistant' || sender === 'agent') {
|
|
24413
|
+
inputItems.push({
|
|
24414
|
+
role: 'assistant',
|
|
24415
|
+
status: 'completed',
|
|
24416
|
+
content: [{ type: 'output_text', text: content }],
|
|
24417
|
+
});
|
|
23145
24418
|
}
|
|
23146
24419
|
else {
|
|
23147
|
-
|
|
24420
|
+
inputItems.push({
|
|
24421
|
+
role: 'user',
|
|
24422
|
+
content,
|
|
24423
|
+
});
|
|
23148
24424
|
}
|
|
23149
24425
|
}
|
|
23150
|
-
catch (error) {
|
|
23151
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
23152
|
-
}
|
|
23153
24426
|
}
|
|
23154
|
-
|
|
23155
|
-
|
|
23156
|
-
|
|
23157
|
-
|
|
23158
|
-
|
|
23159
|
-
|
|
23160
|
-
|
|
23161
|
-
|
|
23162
|
-
|
|
24427
|
+
const userContent = await this.buildAgentKitUserContent(prompt, rawPromptContent);
|
|
24428
|
+
inputItems.push({
|
|
24429
|
+
role: 'user',
|
|
24430
|
+
content: userContent,
|
|
24431
|
+
});
|
|
24432
|
+
return inputItems;
|
|
24433
|
+
}
|
|
24434
|
+
/**
|
|
24435
|
+
* Builds the user message content for AgentKit runs, including file inputs when provided.
|
|
24436
|
+
*/
|
|
24437
|
+
async buildAgentKitUserContent(prompt, rawPromptContent) {
|
|
24438
|
+
if ('files' in prompt && Array.isArray(prompt.files) && prompt.files.length > 0) {
|
|
24439
|
+
const fileItems = await Promise.all(prompt.files.map(async (file) => {
|
|
24440
|
+
const arrayBuffer = await file.arrayBuffer();
|
|
24441
|
+
const base64 = Buffer.from(arrayBuffer).toString('base64');
|
|
24442
|
+
return {
|
|
24443
|
+
type: 'input_image',
|
|
24444
|
+
image: `data:${file.type};base64,${base64}`,
|
|
24445
|
+
};
|
|
24446
|
+
}));
|
|
24447
|
+
return [{ type: 'input_text', text: rawPromptContent }, ...fileItems];
|
|
24448
|
+
}
|
|
24449
|
+
return rawPromptContent;
|
|
24450
|
+
}
|
|
24451
|
+
/**
|
|
24452
|
+
* Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
|
|
24453
|
+
*/
|
|
24454
|
+
formatAgentKitToolOutput(output) {
|
|
24455
|
+
if (typeof output === 'string') {
|
|
24456
|
+
return output;
|
|
24457
|
+
}
|
|
24458
|
+
if (output && typeof output === 'object') {
|
|
24459
|
+
const textOutput = output;
|
|
24460
|
+
if (textOutput.type === 'text' && typeof textOutput.text === 'string') {
|
|
24461
|
+
return textOutput.text;
|
|
23163
24462
|
}
|
|
23164
24463
|
}
|
|
23165
|
-
return
|
|
24464
|
+
return JSON.stringify(output !== null && output !== void 0 ? output : null);
|
|
23166
24465
|
}
|
|
23167
24466
|
/**
|
|
23168
|
-
*
|
|
24467
|
+
* Returns AgentKit-specific options.
|
|
24468
|
+
*/
|
|
24469
|
+
get agentKitOptions() {
|
|
24470
|
+
return this.options;
|
|
24471
|
+
}
|
|
24472
|
+
/**
|
|
24473
|
+
* Discriminant for type guards.
|
|
23169
24474
|
*/
|
|
23170
24475
|
get discriminant() {
|
|
23171
|
-
return
|
|
24476
|
+
return DISCRIMINANT$1;
|
|
23172
24477
|
}
|
|
23173
24478
|
/**
|
|
23174
|
-
* Type guard to check if given `LlmExecutionTools` are instanceof `
|
|
24479
|
+
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
|
|
23175
24480
|
*/
|
|
23176
|
-
static
|
|
23177
|
-
return llmExecutionTools.discriminant ===
|
|
24481
|
+
static isOpenAiAgentKitExecutionTools(llmExecutionTools) {
|
|
24482
|
+
return llmExecutionTools.discriminant === DISCRIMINANT$1;
|
|
23178
24483
|
}
|
|
23179
24484
|
}
|
|
24485
|
+
/**
|
|
24486
|
+
* Discriminant for type guards.
|
|
24487
|
+
*
|
|
24488
|
+
* @private const of `OpenAiAgentKitExecutionTools`
|
|
24489
|
+
*/
|
|
24490
|
+
const DISCRIMINANT$1 = 'OPEN_AI_AGENT_KIT_V1';
|
|
23180
24491
|
|
|
23181
24492
|
/**
|
|
23182
24493
|
* Uploads files to OpenAI and returns their IDs
|
|
@@ -23211,10 +24522,10 @@ async function uploadFilesToOpenAi(client, files) {
|
|
|
23211
24522
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
23212
24523
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
23213
24524
|
*
|
|
24525
|
+
* @deprecated Use `OpenAiAgentKitExecutionTools` instead.
|
|
23214
24526
|
* @public exported from `@promptbook/openai`
|
|
23215
|
-
* @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
|
|
23216
24527
|
*/
|
|
23217
|
-
class OpenAiAssistantExecutionTools extends
|
|
24528
|
+
class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
|
|
23218
24529
|
/**
|
|
23219
24530
|
* Creates OpenAI Execution Tools.
|
|
23220
24531
|
*
|
|
@@ -23343,8 +24654,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23343
24654
|
console.info(colors.bgWhite('rawRequest (non-streaming with tools)'), JSON.stringify(rawRequest, null, 4));
|
|
23344
24655
|
}
|
|
23345
24656
|
// Create thread and run
|
|
23346
|
-
|
|
23347
|
-
let run = threadAndRun;
|
|
24657
|
+
let run = (await client.beta.threads.createAndRun(rawRequest));
|
|
23348
24658
|
const completedToolCalls = [];
|
|
23349
24659
|
const toolCallStartedAt = new Map();
|
|
23350
24660
|
// Poll until run completes or requires action
|
|
@@ -23439,14 +24749,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23439
24749
|
}
|
|
23440
24750
|
}
|
|
23441
24751
|
// Submit tool outputs
|
|
23442
|
-
run = await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
24752
|
+
run = (await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
23443
24753
|
tool_outputs: toolOutputs,
|
|
23444
|
-
});
|
|
24754
|
+
}));
|
|
23445
24755
|
}
|
|
23446
24756
|
else {
|
|
23447
24757
|
// Wait a bit before polling again
|
|
23448
24758
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
23449
|
-
run = await client.beta.threads.runs.retrieve(run.thread_id, run.id);
|
|
24759
|
+
run = (await client.beta.threads.runs.retrieve(run.thread_id, run.id));
|
|
23450
24760
|
}
|
|
23451
24761
|
}
|
|
23452
24762
|
if (run.status !== 'completed') {
|
|
@@ -23645,6 +24955,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23645
24955
|
getAssistant(assistantId) {
|
|
23646
24956
|
return new OpenAiAssistantExecutionTools({
|
|
23647
24957
|
...this.options,
|
|
24958
|
+
isCreatingNewAssistantsAllowed: this.isCreatingNewAssistantsAllowed,
|
|
23648
24959
|
assistantId,
|
|
23649
24960
|
});
|
|
23650
24961
|
}
|
|
@@ -23670,88 +24981,13 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23670
24981
|
let vectorStoreId;
|
|
23671
24982
|
// If knowledge sources are provided, create a vector store with them
|
|
23672
24983
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
23673
|
-
|
|
23674
|
-
|
|
23675
|
-
|
|
23676
|
-
|
|
23677
|
-
|
|
23678
|
-
}
|
|
23679
|
-
// Create a vector store
|
|
23680
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
23681
|
-
name: `${name} Knowledge Base`,
|
|
24984
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
24985
|
+
client,
|
|
24986
|
+
name,
|
|
24987
|
+
knowledgeSources,
|
|
24988
|
+
logLabel: 'assistant creation',
|
|
23682
24989
|
});
|
|
23683
|
-
vectorStoreId =
|
|
23684
|
-
if (this.options.isVerbose) {
|
|
23685
|
-
console.info('[🤰]', 'Vector store created', {
|
|
23686
|
-
vectorStoreId,
|
|
23687
|
-
});
|
|
23688
|
-
}
|
|
23689
|
-
// Upload files from knowledge sources to the vector store
|
|
23690
|
-
const fileStreams = [];
|
|
23691
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
23692
|
-
try {
|
|
23693
|
-
if (this.options.isVerbose) {
|
|
23694
|
-
console.info('[🤰]', 'Processing knowledge source', {
|
|
23695
|
-
index: index + 1,
|
|
23696
|
-
total: knowledgeSources.length,
|
|
23697
|
-
source,
|
|
23698
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
23699
|
-
});
|
|
23700
|
-
}
|
|
23701
|
-
// Check if it's a URL
|
|
23702
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
23703
|
-
// Download the file
|
|
23704
|
-
const response = await fetch(source);
|
|
23705
|
-
if (!response.ok) {
|
|
23706
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
23707
|
-
continue;
|
|
23708
|
-
}
|
|
23709
|
-
const buffer = await response.arrayBuffer();
|
|
23710
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
23711
|
-
try {
|
|
23712
|
-
const url = new URL(source);
|
|
23713
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
23714
|
-
}
|
|
23715
|
-
catch (error) {
|
|
23716
|
-
// Keep default filename
|
|
23717
|
-
}
|
|
23718
|
-
const blob = new Blob([buffer]);
|
|
23719
|
-
const file = new File([blob], filename);
|
|
23720
|
-
fileStreams.push(file);
|
|
23721
|
-
}
|
|
23722
|
-
else {
|
|
23723
|
-
/*
|
|
23724
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
23725
|
-
// Assume it's a local file path
|
|
23726
|
-
// Note: This will work in Node.js environment
|
|
23727
|
-
// For browser environments, this would need different handling
|
|
23728
|
-
const fs = await import('fs');
|
|
23729
|
-
const fileStream = fs.createReadStream(source);
|
|
23730
|
-
fileStreams.push(fileStream);
|
|
23731
|
-
*/
|
|
23732
|
-
}
|
|
23733
|
-
}
|
|
23734
|
-
catch (error) {
|
|
23735
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
23736
|
-
}
|
|
23737
|
-
}
|
|
23738
|
-
// Batch upload files to the vector store
|
|
23739
|
-
if (fileStreams.length > 0) {
|
|
23740
|
-
try {
|
|
23741
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
23742
|
-
files: fileStreams,
|
|
23743
|
-
});
|
|
23744
|
-
if (this.options.isVerbose) {
|
|
23745
|
-
console.info('[🤰]', 'Uploaded files to vector store', {
|
|
23746
|
-
vectorStoreId,
|
|
23747
|
-
fileCount: fileStreams.length,
|
|
23748
|
-
});
|
|
23749
|
-
}
|
|
23750
|
-
}
|
|
23751
|
-
catch (error) {
|
|
23752
|
-
console.error('Error uploading files to vector store:', error);
|
|
23753
|
-
}
|
|
23754
|
-
}
|
|
24990
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
23755
24991
|
}
|
|
23756
24992
|
// Create assistant with vector store attached
|
|
23757
24993
|
const assistantConfig = {
|
|
@@ -23818,91 +25054,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
23818
25054
|
const client = await this.getClient();
|
|
23819
25055
|
let vectorStoreId;
|
|
23820
25056
|
// If knowledge sources are provided, create a vector store with them
|
|
23821
|
-
// TODO: [🧠] Reuse vector store creation logic from createNewAssistant
|
|
23822
25057
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
23823
|
-
|
|
23824
|
-
|
|
23825
|
-
|
|
23826
|
-
|
|
23827
|
-
|
|
23828
|
-
});
|
|
23829
|
-
}
|
|
23830
|
-
// Create a vector store
|
|
23831
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
23832
|
-
name: `${name} Knowledge Base`,
|
|
25058
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
25059
|
+
client,
|
|
25060
|
+
name: name !== null && name !== void 0 ? name : assistantId,
|
|
25061
|
+
knowledgeSources,
|
|
25062
|
+
logLabel: 'assistant update',
|
|
23833
25063
|
});
|
|
23834
|
-
vectorStoreId =
|
|
23835
|
-
if (this.options.isVerbose) {
|
|
23836
|
-
console.info('[🤰]', 'Vector store created for assistant update', {
|
|
23837
|
-
vectorStoreId,
|
|
23838
|
-
});
|
|
23839
|
-
}
|
|
23840
|
-
// Upload files from knowledge sources to the vector store
|
|
23841
|
-
const fileStreams = [];
|
|
23842
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
23843
|
-
try {
|
|
23844
|
-
if (this.options.isVerbose) {
|
|
23845
|
-
console.info('[🤰]', 'Processing knowledge source for update', {
|
|
23846
|
-
index: index + 1,
|
|
23847
|
-
total: knowledgeSources.length,
|
|
23848
|
-
source,
|
|
23849
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
23850
|
-
});
|
|
23851
|
-
}
|
|
23852
|
-
// Check if it's a URL
|
|
23853
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
23854
|
-
// Download the file
|
|
23855
|
-
const response = await fetch(source);
|
|
23856
|
-
if (!response.ok) {
|
|
23857
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
23858
|
-
continue;
|
|
23859
|
-
}
|
|
23860
|
-
const buffer = await response.arrayBuffer();
|
|
23861
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
23862
|
-
try {
|
|
23863
|
-
const url = new URL(source);
|
|
23864
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
23865
|
-
}
|
|
23866
|
-
catch (error) {
|
|
23867
|
-
// Keep default filename
|
|
23868
|
-
}
|
|
23869
|
-
const blob = new Blob([buffer]);
|
|
23870
|
-
const file = new File([blob], filename);
|
|
23871
|
-
fileStreams.push(file);
|
|
23872
|
-
}
|
|
23873
|
-
else {
|
|
23874
|
-
/*
|
|
23875
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
23876
|
-
// Assume it's a local file path
|
|
23877
|
-
// Note: This will work in Node.js environment
|
|
23878
|
-
// For browser environments, this would need different handling
|
|
23879
|
-
const fs = await import('fs');
|
|
23880
|
-
const fileStream = fs.createReadStream(source);
|
|
23881
|
-
fileStreams.push(fileStream);
|
|
23882
|
-
*/
|
|
23883
|
-
}
|
|
23884
|
-
}
|
|
23885
|
-
catch (error) {
|
|
23886
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
23887
|
-
}
|
|
23888
|
-
}
|
|
23889
|
-
// Batch upload files to the vector store
|
|
23890
|
-
if (fileStreams.length > 0) {
|
|
23891
|
-
try {
|
|
23892
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
23893
|
-
files: fileStreams,
|
|
23894
|
-
});
|
|
23895
|
-
if (this.options.isVerbose) {
|
|
23896
|
-
console.info('[🤰]', 'Uploaded files to vector store for update', {
|
|
23897
|
-
vectorStoreId,
|
|
23898
|
-
fileCount: fileStreams.length,
|
|
23899
|
-
});
|
|
23900
|
-
}
|
|
23901
|
-
}
|
|
23902
|
-
catch (error) {
|
|
23903
|
-
console.error('Error uploading files to vector store:', error);
|
|
23904
|
-
}
|
|
23905
|
-
}
|
|
25064
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
23906
25065
|
}
|
|
23907
25066
|
const assistantUpdate = {
|
|
23908
25067
|
name,
|
|
@@ -24006,8 +25165,8 @@ function emitAssistantPreparationProgress(options) {
|
|
|
24006
25165
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
24007
25166
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
24008
25167
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
24009
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
24010
25168
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
25169
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
24011
25170
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
24012
25171
|
*
|
|
24013
25172
|
* @public exported from `@promptbook/core`
|
|
@@ -24142,97 +25301,129 @@ class AgentLlmExecutionTools {
|
|
|
24142
25301
|
* Calls the chat model with agent-specific system prompt and requirements with streaming
|
|
24143
25302
|
*/
|
|
24144
25303
|
async callChatModelStream(prompt, onProgress) {
|
|
25304
|
+
var _a, _b;
|
|
24145
25305
|
// Ensure we're working with a chat prompt
|
|
24146
25306
|
if (prompt.modelRequirements.modelVariant !== 'CHAT') {
|
|
24147
25307
|
throw new Error('AgentLlmExecutionTools only supports chat prompts');
|
|
24148
25308
|
}
|
|
24149
25309
|
const modelRequirements = await this.getModelRequirements();
|
|
25310
|
+
const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
|
|
24150
25311
|
const chatPrompt = prompt;
|
|
24151
25312
|
let underlyingLlmResult;
|
|
24152
|
-
|
|
25313
|
+
const chatPromptContentWithSuffix = promptSuffix
|
|
25314
|
+
? `${chatPrompt.content}\n\n${promptSuffix}`
|
|
25315
|
+
: chatPrompt.content;
|
|
24153
25316
|
const promptWithAgentModelRequirements = {
|
|
24154
25317
|
...chatPrompt,
|
|
25318
|
+
content: chatPromptContentWithSuffix,
|
|
24155
25319
|
modelRequirements: {
|
|
24156
25320
|
...chatPrompt.modelRequirements,
|
|
24157
|
-
...
|
|
25321
|
+
...sanitizedRequirements,
|
|
24158
25322
|
// Spread tools to convert readonly array to mutable
|
|
24159
|
-
tools:
|
|
25323
|
+
tools: sanitizedRequirements.tools
|
|
25324
|
+
? [...sanitizedRequirements.tools]
|
|
25325
|
+
: chatPrompt.modelRequirements.tools,
|
|
24160
25326
|
// Spread knowledgeSources to convert readonly array to mutable
|
|
24161
|
-
knowledgeSources:
|
|
24162
|
-
? [...
|
|
25327
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources
|
|
25328
|
+
? [...sanitizedRequirements.knowledgeSources]
|
|
24163
25329
|
: undefined,
|
|
24164
25330
|
// Prepend agent system message to existing system message
|
|
24165
|
-
systemMessage:
|
|
25331
|
+
systemMessage: sanitizedRequirements.systemMessage +
|
|
24166
25332
|
(chatPrompt.modelRequirements.systemMessage
|
|
24167
25333
|
? `\n\n${chatPrompt.modelRequirements.systemMessage}`
|
|
24168
25334
|
: ''),
|
|
24169
25335
|
}, // Cast to avoid readonly mismatch from spread
|
|
24170
25336
|
};
|
|
24171
25337
|
console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
|
|
24172
|
-
if (
|
|
24173
|
-
const requirementsHash = SHA256(JSON.stringify(
|
|
24174
|
-
const
|
|
24175
|
-
|
|
24176
|
-
|
|
25338
|
+
if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
|
|
25339
|
+
const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
25340
|
+
const vectorStoreHash = SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
|
|
25341
|
+
const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
|
|
25342
|
+
const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
|
|
25343
|
+
let preparedAgentKit = this.options.assistantPreparationMode === 'external'
|
|
25344
|
+
? this.options.llmTools.getPreparedAgentKitAgent()
|
|
25345
|
+
: null;
|
|
25346
|
+
const vectorStoreId = (preparedAgentKit === null || preparedAgentKit === void 0 ? void 0 : preparedAgentKit.vectorStoreId) ||
|
|
25347
|
+
(cachedVectorStore && cachedVectorStore.requirementsHash === vectorStoreHash
|
|
25348
|
+
? cachedVectorStore.vectorStoreId
|
|
25349
|
+
: undefined);
|
|
25350
|
+
if (!preparedAgentKit && cachedAgentKit && cachedAgentKit.requirementsHash === requirementsHash) {
|
|
24177
25351
|
if (this.options.isVerbose) {
|
|
24178
|
-
console.
|
|
25352
|
+
console.info('[🤰]', 'Using cached OpenAI AgentKit agent', {
|
|
25353
|
+
agent: this.title,
|
|
25354
|
+
});
|
|
24179
25355
|
}
|
|
24180
|
-
|
|
24181
|
-
|
|
24182
|
-
|
|
24183
|
-
|
|
24184
|
-
// We can cast to access options if they were public, or use a method to clone.
|
|
24185
|
-
// OpenAiAgentExecutionTools doesn't have a clone method.
|
|
24186
|
-
// However, we can just assume the passed tool *might* not have the vector store yet, or we are replacing it.
|
|
24187
|
-
// Actually, if the passed tool IS OpenAiAgentExecutionTools, we should use it as a base.
|
|
24188
|
-
// TODO: [🧠] This is a bit hacky, accessing protected options or recreating tools.
|
|
24189
|
-
// Ideally OpenAiAgentExecutionTools should have a method `withVectorStoreId`.
|
|
24190
|
-
agentTools = new OpenAiAgentExecutionTools({
|
|
24191
|
-
...this.options.llmTools.options,
|
|
24192
|
-
vectorStoreId: cached.vectorStoreId,
|
|
24193
|
-
});
|
|
25356
|
+
preparedAgentKit = {
|
|
25357
|
+
agent: cachedAgentKit.agent,
|
|
25358
|
+
vectorStoreId: cachedAgentKit.vectorStoreId,
|
|
25359
|
+
};
|
|
24194
25360
|
}
|
|
24195
|
-
|
|
25361
|
+
if (!preparedAgentKit) {
|
|
24196
25362
|
if (this.options.isVerbose) {
|
|
24197
|
-
console.
|
|
24198
|
-
|
|
24199
|
-
|
|
24200
|
-
if (modelRequirements.knowledgeSources && modelRequirements.knowledgeSources.length > 0) {
|
|
24201
|
-
const client = await this.options.llmTools.getClient();
|
|
24202
|
-
vectorStoreId = await OpenAiAgentExecutionTools.createVectorStore(client, this.title, modelRequirements.knowledgeSources);
|
|
25363
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
25364
|
+
agent: this.title,
|
|
25365
|
+
});
|
|
24203
25366
|
}
|
|
24204
|
-
if (vectorStoreId) {
|
|
24205
|
-
|
|
24206
|
-
|
|
24207
|
-
|
|
25367
|
+
if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
|
|
25368
|
+
emitAssistantPreparationProgress({
|
|
25369
|
+
onProgress,
|
|
25370
|
+
prompt,
|
|
25371
|
+
modelName: this.modelName,
|
|
25372
|
+
phase: 'Creating knowledge base',
|
|
24208
25373
|
});
|
|
24209
25374
|
}
|
|
24210
|
-
|
|
24211
|
-
|
|
25375
|
+
emitAssistantPreparationProgress({
|
|
25376
|
+
onProgress,
|
|
25377
|
+
prompt,
|
|
25378
|
+
modelName: this.modelName,
|
|
25379
|
+
phase: 'Preparing AgentKit agent',
|
|
25380
|
+
});
|
|
25381
|
+
preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
|
|
25382
|
+
name: this.title,
|
|
25383
|
+
instructions: sanitizedRequirements.systemMessage || '',
|
|
25384
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25385
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24212
25386
|
vectorStoreId,
|
|
24213
25387
|
});
|
|
24214
25388
|
}
|
|
24215
|
-
|
|
24216
|
-
|
|
24217
|
-
|
|
24218
|
-
|
|
24219
|
-
|
|
24220
|
-
|
|
24221
|
-
|
|
24222
|
-
|
|
24223
|
-
|
|
24224
|
-
|
|
24225
|
-
|
|
24226
|
-
|
|
24227
|
-
|
|
24228
|
-
|
|
25389
|
+
if (preparedAgentKit.vectorStoreId) {
|
|
25390
|
+
AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
|
|
25391
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
25392
|
+
requirementsHash: vectorStoreHash,
|
|
25393
|
+
});
|
|
25394
|
+
}
|
|
25395
|
+
AgentLlmExecutionTools.agentKitAgentCache.set(this.title, {
|
|
25396
|
+
agent: preparedAgentKit.agent,
|
|
25397
|
+
requirementsHash,
|
|
25398
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
25399
|
+
});
|
|
25400
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
|
|
25401
|
+
underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
|
|
25402
|
+
openAiAgentKitAgent: preparedAgentKit.agent,
|
|
25403
|
+
prompt: promptWithAgentModelRequirements,
|
|
25404
|
+
onProgress,
|
|
25405
|
+
responseFormatOutputType,
|
|
25406
|
+
});
|
|
24229
25407
|
}
|
|
24230
25408
|
else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
|
|
24231
25409
|
// ... deprecated path ...
|
|
24232
|
-
const requirementsHash = SHA256(JSON.stringify(
|
|
25410
|
+
const requirementsHash = SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
24233
25411
|
const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
|
|
24234
25412
|
let assistant;
|
|
24235
|
-
if (
|
|
25413
|
+
if (this.options.assistantPreparationMode === 'external') {
|
|
25414
|
+
assistant = this.options.llmTools;
|
|
25415
|
+
if (this.options.isVerbose) {
|
|
25416
|
+
console.info('[🤰]', 'Using externally managed OpenAI Assistant', {
|
|
25417
|
+
agent: this.title,
|
|
25418
|
+
assistantId: assistant.assistantId,
|
|
25419
|
+
});
|
|
25420
|
+
}
|
|
25421
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
25422
|
+
assistantId: assistant.assistantId,
|
|
25423
|
+
requirementsHash,
|
|
25424
|
+
});
|
|
25425
|
+
}
|
|
25426
|
+
else if (cached) {
|
|
24236
25427
|
if (cached.requirementsHash === requirementsHash) {
|
|
24237
25428
|
if (this.options.isVerbose) {
|
|
24238
25429
|
console.info('[🤰]', 'Using cached OpenAI Assistant', {
|
|
@@ -24258,9 +25449,9 @@ class AgentLlmExecutionTools {
|
|
|
24258
25449
|
assistant = await this.options.llmTools.updateAssistant({
|
|
24259
25450
|
assistantId: cached.assistantId,
|
|
24260
25451
|
name: this.title,
|
|
24261
|
-
instructions:
|
|
24262
|
-
knowledgeSources:
|
|
24263
|
-
tools:
|
|
25452
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
25453
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25454
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24264
25455
|
});
|
|
24265
25456
|
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
24266
25457
|
assistantId: assistant.assistantId,
|
|
@@ -24283,9 +25474,9 @@ class AgentLlmExecutionTools {
|
|
|
24283
25474
|
});
|
|
24284
25475
|
assistant = await this.options.llmTools.createNewAssistant({
|
|
24285
25476
|
name: this.title,
|
|
24286
|
-
instructions:
|
|
24287
|
-
knowledgeSources:
|
|
24288
|
-
tools:
|
|
25477
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
25478
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25479
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24289
25480
|
/*
|
|
24290
25481
|
!!!
|
|
24291
25482
|
metadata: {
|
|
@@ -24327,18 +25518,28 @@ class AgentLlmExecutionTools {
|
|
|
24327
25518
|
}
|
|
24328
25519
|
}
|
|
24329
25520
|
let content = underlyingLlmResult.content;
|
|
24330
|
-
|
|
24331
|
-
|
|
24332
|
-
|
|
24333
|
-
|
|
25521
|
+
if (typeof content === 'string') {
|
|
25522
|
+
// Note: Cleanup the AI artifacts from the content
|
|
25523
|
+
content = humanizeAiText(content);
|
|
25524
|
+
// Note: Make sure the content is Promptbook-like
|
|
25525
|
+
content = promptbookifyAiText(content);
|
|
25526
|
+
}
|
|
25527
|
+
else {
|
|
25528
|
+
// TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
|
|
25529
|
+
content = JSON.stringify(content);
|
|
25530
|
+
}
|
|
24334
25531
|
const agentResult = {
|
|
24335
25532
|
...underlyingLlmResult,
|
|
24336
|
-
content,
|
|
25533
|
+
content: content,
|
|
24337
25534
|
modelName: this.modelName,
|
|
24338
25535
|
};
|
|
24339
25536
|
return agentResult;
|
|
24340
25537
|
}
|
|
24341
25538
|
}
|
|
25539
|
+
/**
|
|
25540
|
+
* Cached AgentKit agents to avoid rebuilding identical instances.
|
|
25541
|
+
*/
|
|
25542
|
+
AgentLlmExecutionTools.agentKitAgentCache = new Map();
|
|
24342
25543
|
/**
|
|
24343
25544
|
* Cache of OpenAI assistants to avoid creating duplicates
|
|
24344
25545
|
*/
|
|
@@ -24419,8 +25620,8 @@ function buildTeacherSummary(commitments, used) {
|
|
|
24419
25620
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
24420
25621
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
24421
25622
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
24422
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
24423
25623
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
25624
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
24424
25625
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
24425
25626
|
*
|
|
24426
25627
|
* @public exported from `@promptbook/core`
|
|
@@ -24451,6 +25652,7 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
24451
25652
|
super({
|
|
24452
25653
|
isVerbose: options.isVerbose,
|
|
24453
25654
|
llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
|
|
25655
|
+
assistantPreparationMode: options.assistantPreparationMode,
|
|
24454
25656
|
agentSource: agentSource.value, // <- TODO: [🐱🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
24455
25657
|
});
|
|
24456
25658
|
_Agent_instances.add(this);
|
|
@@ -24517,7 +25719,6 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
24517
25719
|
* Note: This method also implements the learning mechanism
|
|
24518
25720
|
*/
|
|
24519
25721
|
async callChatModelStream(prompt, onProgress) {
|
|
24520
|
-
var _a;
|
|
24521
25722
|
// [1] Check if the user is asking the same thing as in the samples
|
|
24522
25723
|
const modelRequirements = await this.getModelRequirements();
|
|
24523
25724
|
if (modelRequirements.samples) {
|
|
@@ -24565,7 +25766,7 @@ class Agent extends AgentLlmExecutionTools {
|
|
|
24565
25766
|
if (result.rawResponse && 'sample' in result.rawResponse) {
|
|
24566
25767
|
return result;
|
|
24567
25768
|
}
|
|
24568
|
-
if (
|
|
25769
|
+
if (modelRequirements.isClosed) {
|
|
24569
25770
|
return result;
|
|
24570
25771
|
}
|
|
24571
25772
|
// Note: [0] Notify start of self-learning
|
|
@@ -24850,6 +26051,97 @@ function book(strings, ...values) {
|
|
|
24850
26051
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
24851
26052
|
*/
|
|
24852
26053
|
|
|
26054
|
+
/**
|
|
26055
|
+
* Builds a stable identity string for tool calls across partial updates.
|
|
26056
|
+
*
|
|
26057
|
+
* @param toolCall - Tool call entry to identify.
|
|
26058
|
+
* @returns Stable identity string for deduplication.
|
|
26059
|
+
*
|
|
26060
|
+
* @private function of <Chat/>
|
|
26061
|
+
*/
|
|
26062
|
+
function getToolCallIdentity(toolCall) {
|
|
26063
|
+
const rawToolCall = toolCall.rawToolCall;
|
|
26064
|
+
const rawId = (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.id) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.callId) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.call_id);
|
|
26065
|
+
if (rawId) {
|
|
26066
|
+
return `id:${rawId}`;
|
|
26067
|
+
}
|
|
26068
|
+
if (toolCall.createdAt) {
|
|
26069
|
+
return `time:${toolCall.createdAt}:${toolCall.name}`;
|
|
26070
|
+
}
|
|
26071
|
+
const argsKey = (() => {
|
|
26072
|
+
if (typeof toolCall.arguments === 'string') {
|
|
26073
|
+
return toolCall.arguments;
|
|
26074
|
+
}
|
|
26075
|
+
if (!toolCall.arguments) {
|
|
26076
|
+
return '';
|
|
26077
|
+
}
|
|
26078
|
+
try {
|
|
26079
|
+
return JSON.stringify(toolCall.arguments);
|
|
26080
|
+
}
|
|
26081
|
+
catch (_a) {
|
|
26082
|
+
return '';
|
|
26083
|
+
}
|
|
26084
|
+
})();
|
|
26085
|
+
return `fallback:${toolCall.name}:${argsKey}`;
|
|
26086
|
+
}
|
|
26087
|
+
|
|
26088
|
+
/**
|
|
26089
|
+
* Resolve a remote META IMAGE value into an absolute URL when possible.
|
|
26090
|
+
*/
|
|
26091
|
+
function resolveRemoteImageUrl(imageUrl, agentUrl) {
|
|
26092
|
+
if (!imageUrl) {
|
|
26093
|
+
return undefined;
|
|
26094
|
+
}
|
|
26095
|
+
if (imageUrl.startsWith('http://') ||
|
|
26096
|
+
imageUrl.startsWith('https://') ||
|
|
26097
|
+
imageUrl.startsWith('data:') ||
|
|
26098
|
+
imageUrl.startsWith('blob:')) {
|
|
26099
|
+
return imageUrl;
|
|
26100
|
+
}
|
|
26101
|
+
try {
|
|
26102
|
+
return new URL(imageUrl, agentUrl).href;
|
|
26103
|
+
}
|
|
26104
|
+
catch (_a) {
|
|
26105
|
+
return imageUrl;
|
|
26106
|
+
}
|
|
26107
|
+
}
|
|
26108
|
+
/**
|
|
26109
|
+
* Format a META commitment line when the value is provided.
|
|
26110
|
+
*/
|
|
26111
|
+
function formatMetaLine(label, value) {
|
|
26112
|
+
if (!value) {
|
|
26113
|
+
return null;
|
|
26114
|
+
}
|
|
26115
|
+
return `META ${label} ${value}`;
|
|
26116
|
+
}
|
|
26117
|
+
/**
|
|
26118
|
+
* Build a minimal agent source snapshot for remote agents.
|
|
26119
|
+
*/
|
|
26120
|
+
function buildRemoteAgentSource(profile, meta) {
|
|
26121
|
+
const metaLines = [
|
|
26122
|
+
formatMetaLine('FULLNAME', meta === null || meta === void 0 ? void 0 : meta.fullname),
|
|
26123
|
+
formatMetaLine('IMAGE', meta === null || meta === void 0 ? void 0 : meta.image),
|
|
26124
|
+
formatMetaLine('DESCRIPTION', meta === null || meta === void 0 ? void 0 : meta.description),
|
|
26125
|
+
formatMetaLine('COLOR', meta === null || meta === void 0 ? void 0 : meta.color),
|
|
26126
|
+
formatMetaLine('FONT', meta === null || meta === void 0 ? void 0 : meta.font),
|
|
26127
|
+
formatMetaLine('LINK', meta === null || meta === void 0 ? void 0 : meta.link),
|
|
26128
|
+
]
|
|
26129
|
+
.filter((line) => Boolean(line))
|
|
26130
|
+
.join('\n');
|
|
26131
|
+
const personaBlock = profile.personaDescription
|
|
26132
|
+
? spaceTrim$2((block) => `
|
|
26133
|
+
PERSONA
|
|
26134
|
+
${block(profile.personaDescription || '')}
|
|
26135
|
+
`)
|
|
26136
|
+
: '';
|
|
26137
|
+
return book `
|
|
26138
|
+
${profile.agentName}
|
|
26139
|
+
|
|
26140
|
+
${metaLines}
|
|
26141
|
+
|
|
26142
|
+
${personaBlock}
|
|
26143
|
+
`;
|
|
26144
|
+
}
|
|
24853
26145
|
/**
|
|
24854
26146
|
* Represents one AI Agent
|
|
24855
26147
|
*
|
|
@@ -24857,13 +26149,15 @@ function book(strings, ...values) {
|
|
|
24857
26149
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
24858
26150
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
24859
26151
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
24860
|
-
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
26152
|
+
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
26153
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
24861
26154
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
24862
26155
|
*
|
|
24863
26156
|
* @public exported from `@promptbook/core`
|
|
24864
26157
|
*/
|
|
24865
26158
|
class RemoteAgent extends Agent {
|
|
24866
26159
|
static async connect(options) {
|
|
26160
|
+
var _a, _b, _c;
|
|
24867
26161
|
const agentProfileUrl = `${options.agentUrl}/api/profile`;
|
|
24868
26162
|
const profileResponse = await fetch(agentProfileUrl);
|
|
24869
26163
|
// <- TODO: [🐱🚀] What about closed-source agents?
|
|
@@ -24883,14 +26177,14 @@ class RemoteAgent extends Agent {
|
|
|
24883
26177
|
|
|
24884
26178
|
`));
|
|
24885
26179
|
}
|
|
24886
|
-
const profile = await profileResponse.json();
|
|
26180
|
+
const profile = (await profileResponse.json());
|
|
26181
|
+
const resolvedMeta = {
|
|
26182
|
+
...(profile.meta || {}),
|
|
26183
|
+
image: resolveRemoteImageUrl((_a = profile.meta) === null || _a === void 0 ? void 0 : _a.image, options.agentUrl),
|
|
26184
|
+
};
|
|
24887
26185
|
// Note: We are creating dummy agent source because we don't have the source from the remote agent
|
|
24888
26186
|
// But we populate the metadata from the profile
|
|
24889
|
-
const agentSource = new BehaviorSubject(
|
|
24890
|
-
${profile.agentName}
|
|
24891
|
-
|
|
24892
|
-
${profile.personaDescription}
|
|
24893
|
-
`);
|
|
26187
|
+
const agentSource = new BehaviorSubject(buildRemoteAgentSource(profile, resolvedMeta));
|
|
24894
26188
|
// <- TODO: [🐱🚀] createBookFromProfile
|
|
24895
26189
|
// <- TODO: [🐱🚀] Support updating and self-updating
|
|
24896
26190
|
const remoteAgent = new RemoteAgent({
|
|
@@ -24913,10 +26207,10 @@ class RemoteAgent extends Agent {
|
|
|
24913
26207
|
});
|
|
24914
26208
|
remoteAgent._remoteAgentName = profile.agentName;
|
|
24915
26209
|
remoteAgent._remoteAgentHash = profile.agentHash;
|
|
24916
|
-
remoteAgent.personaDescription = profile.personaDescription;
|
|
24917
|
-
remoteAgent.initialMessage = profile.initialMessage;
|
|
24918
|
-
remoteAgent.links = profile.links;
|
|
24919
|
-
remoteAgent.meta =
|
|
26210
|
+
remoteAgent.personaDescription = (_b = profile.personaDescription) !== null && _b !== void 0 ? _b : null;
|
|
26211
|
+
remoteAgent.initialMessage = (_c = profile.initialMessage) !== null && _c !== void 0 ? _c : null;
|
|
26212
|
+
remoteAgent.links = profile.links || [];
|
|
26213
|
+
remoteAgent.meta = resolvedMeta;
|
|
24920
26214
|
remoteAgent.capabilities = profile.capabilities || [];
|
|
24921
26215
|
remoteAgent.samples = profile.samples || [];
|
|
24922
26216
|
remoteAgent.toolTitles = profile.toolTitles || {};
|
|
@@ -25020,26 +26314,7 @@ class RemoteAgent extends Agent {
|
|
|
25020
26314
|
};
|
|
25021
26315
|
};
|
|
25022
26316
|
const getToolCallKey = (toolCall) => {
|
|
25023
|
-
|
|
25024
|
-
const rawId = (_a = toolCall.rawToolCall) === null || _a === void 0 ? void 0 : _a.id;
|
|
25025
|
-
if (rawId) {
|
|
25026
|
-
return `id:${rawId}`;
|
|
25027
|
-
}
|
|
25028
|
-
const argsKey = (() => {
|
|
25029
|
-
if (typeof toolCall.arguments === 'string') {
|
|
25030
|
-
return toolCall.arguments;
|
|
25031
|
-
}
|
|
25032
|
-
if (!toolCall.arguments) {
|
|
25033
|
-
return '';
|
|
25034
|
-
}
|
|
25035
|
-
try {
|
|
25036
|
-
return JSON.stringify(toolCall.arguments);
|
|
25037
|
-
}
|
|
25038
|
-
catch (_a) {
|
|
25039
|
-
return '';
|
|
25040
|
-
}
|
|
25041
|
-
})();
|
|
25042
|
-
return `${toolCall.name}:${toolCall.createdAt || ''}:${argsKey}`;
|
|
26317
|
+
return getToolCallIdentity(toolCall);
|
|
25043
26318
|
};
|
|
25044
26319
|
const mergeToolCall = (existing, incoming) => {
|
|
25045
26320
|
const incomingResult = incoming.result;
|
|
@@ -25553,6 +26828,7 @@ const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register(
|
|
|
25553
26828
|
apiKey: 'sk-',
|
|
25554
26829
|
assistantId: 'asst_',
|
|
25555
26830
|
maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
|
|
26831
|
+
isCreatingNewAssistantsAllowed: false,
|
|
25556
26832
|
},
|
|
25557
26833
|
};
|
|
25558
26834
|
},
|
|
@@ -26878,5 +28154,5 @@ function $generateBookBoilerplate(options) {
|
|
|
26878
28154
|
* TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
|
|
26879
28155
|
*/
|
|
26880
28156
|
|
|
26881
|
-
export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, COLOR_CONSTANTS, CORE_AGENTS_SERVER, CORE_AGENTS_SERVER_WELL_KNOWN_AGENT_NAMES, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_CONCURRENT_UPLOADS, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_RECURSION, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, HTTP_STATUS_CODES, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LIMITS, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NETWORK_LIMITS, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LEGAL_ENTITY, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, PUBLIC_AGENTS_SERVERS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TIME_INTERVALS, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeAgentHash, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements,
|
|
28157
|
+
export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, COLOR_CONSTANTS, CORE_AGENTS_SERVER, CORE_AGENTS_SERVER_WELL_KNOWN_AGENT_NAMES, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_CONCURRENT_UPLOADS, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_RECURSION, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, HTTP_STATUS_CODES, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LIMITS, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NETWORK_LIMITS, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LEGAL_ENTITY, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, PUBLIC_AGENTS_SERVERS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TIME_INTERVALS, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeAgentHash, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createBasicAgentModelRequirements, createDefaultAgentName, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getAllCommitmentsToolTitles, getCommitmentDefinition, getGroupedCommitmentDefinitions, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, normalizeAgentName, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
|
|
26882
28158
|
//# sourceMappingURL=index.es.js.map
|