@promptbook/javascript 0.110.0-3 → 0.110.0-5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1 -1
- package/esm/typings/src/_packages/openai.index.d.ts +0 -4
- package/esm/typings/src/_packages/types.index.d.ts +0 -4
- package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +1 -1
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +0 -2
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +0 -7
- package/esm/typings/src/book-components/Chat/SourceChip/SourceChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/hooks/useChatActionsOverlap.d.ts +6 -3
- package/esm/typings/src/book-components/Chat/utils/collectTeamToolCallSummary.d.ts +69 -0
- package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +13 -6
- package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/utils/toolCallParsing.d.ts +4 -0
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +0 -7
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +0 -1
- package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +9 -0
- package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +9 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +40 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +30 -0
- package/esm/typings/src/utils/agents/resolveAgentAvatarImageUrl.d.ts +29 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -5
- package/umd/index.umd.js +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentExecutionTools.d.ts +0 -43
- package/esm/typings/src/llm-providers/openai/createOpenAiAgentExecutionTools.d.ts +0 -11
package/esm/index.es.js
CHANGED
|
@@ -18,7 +18,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
18
18
|
* @generated
|
|
19
19
|
* @see https://github.com/webgptorg/promptbook
|
|
20
20
|
*/
|
|
21
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-
|
|
21
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-5';
|
|
22
22
|
/**
|
|
23
23
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
24
24
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1,10 +1,8 @@
|
|
|
1
1
|
import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
|
|
2
|
-
import { createOpenAiAgentExecutionTools } from '../llm-providers/openai/createOpenAiAgentExecutionTools';
|
|
3
2
|
import { createOpenAiAssistantExecutionTools } from '../llm-providers/openai/createOpenAiAssistantExecutionTools';
|
|
4
3
|
import { createOpenAiCompatibleExecutionTools } from '../llm-providers/openai/createOpenAiCompatibleExecutionTools';
|
|
5
4
|
import { createOpenAiExecutionTools } from '../llm-providers/openai/createOpenAiExecutionTools';
|
|
6
5
|
import { OPENAI_MODELS } from '../llm-providers/openai/openai-models';
|
|
7
|
-
import { OpenAiAgentExecutionTools } from '../llm-providers/openai/OpenAiAgentExecutionTools';
|
|
8
6
|
import { OpenAiAssistantExecutionTools } from '../llm-providers/openai/OpenAiAssistantExecutionTools';
|
|
9
7
|
import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
|
|
10
8
|
import { OpenAiCompatibleExecutionTools } from '../llm-providers/openai/OpenAiCompatibleExecutionTools';
|
|
@@ -17,12 +15,10 @@ import { _OpenAiRegistration } from '../llm-providers/openai/register-constructo
|
|
|
17
15
|
import { _OpenAiAssistantRegistration } from '../llm-providers/openai/register-constructor';
|
|
18
16
|
import { _OpenAiCompatibleRegistration } from '../llm-providers/openai/register-constructor';
|
|
19
17
|
export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
|
|
20
|
-
export { createOpenAiAgentExecutionTools };
|
|
21
18
|
export { createOpenAiAssistantExecutionTools };
|
|
22
19
|
export { createOpenAiCompatibleExecutionTools };
|
|
23
20
|
export { createOpenAiExecutionTools };
|
|
24
21
|
export { OPENAI_MODELS };
|
|
25
|
-
export { OpenAiAgentExecutionTools };
|
|
26
22
|
export { OpenAiAssistantExecutionTools };
|
|
27
23
|
export type { OpenAiAssistantExecutionToolsOptions };
|
|
28
24
|
export { OpenAiCompatibleExecutionTools };
|
|
@@ -33,7 +33,6 @@ import type { SourceChipProps } from '../book-components/Chat/SourceChip/SourceC
|
|
|
33
33
|
import type { ChatToolCall } from '../book-components/Chat/types/ChatMessage';
|
|
34
34
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
|
35
35
|
import type { ChatParticipant } from '../book-components/Chat/types/ChatParticipant';
|
|
36
|
-
import type { ToolCallChipletInfo } from '../book-components/Chat/utils/getToolCallChipletInfo';
|
|
37
36
|
import type { ParsedCitation } from '../book-components/Chat/utils/parseCitationsFromContent';
|
|
38
37
|
import type { MessageButton } from '../book-components/Chat/utils/parseMessageButtons';
|
|
39
38
|
import type { TeamToolResult } from '../book-components/Chat/utils/toolCallParsing';
|
|
@@ -139,7 +138,6 @@ import type { AzureOpenAiExecutionToolsOptions } from '../llm-providers/azure-op
|
|
|
139
138
|
import type { DeepseekExecutionToolsOptions } from '../llm-providers/deepseek/DeepseekExecutionToolsOptions';
|
|
140
139
|
import type { GoogleExecutionToolsOptions } from '../llm-providers/google/GoogleExecutionToolsOptions';
|
|
141
140
|
import type { OllamaExecutionToolsOptions } from '../llm-providers/ollama/OllamaExecutionToolsOptions';
|
|
142
|
-
import type { OpenAiAgentExecutionToolsOptions } from '../llm-providers/openai/OpenAiAgentExecutionTools';
|
|
143
141
|
import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
|
|
144
142
|
import type { OpenAiCompatibleExecutionToolsOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
145
143
|
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
@@ -436,7 +434,6 @@ export type { SourceChipProps };
|
|
|
436
434
|
export type { ChatToolCall };
|
|
437
435
|
export type { ChatMessage };
|
|
438
436
|
export type { ChatParticipant };
|
|
439
|
-
export type { ToolCallChipletInfo };
|
|
440
437
|
export type { ParsedCitation };
|
|
441
438
|
export type { MessageButton };
|
|
442
439
|
export type { TeamToolResult };
|
|
@@ -542,7 +539,6 @@ export type { AzureOpenAiExecutionToolsOptions };
|
|
|
542
539
|
export type { DeepseekExecutionToolsOptions };
|
|
543
540
|
export type { GoogleExecutionToolsOptions };
|
|
544
541
|
export type { OllamaExecutionToolsOptions };
|
|
545
|
-
export type { OpenAiAgentExecutionToolsOptions };
|
|
546
542
|
export type { OpenAiAssistantExecutionToolsOptions };
|
|
547
543
|
export type { OpenAiCompatibleExecutionToolsOptions };
|
|
548
544
|
export type { OpenAiCompatibleExecutionToolsNonProxiedOptions };
|
|
@@ -21,6 +21,10 @@ export type ChatActionsBarProps = {
|
|
|
21
21
|
saveFormats?: Array<string_chat_format_name>;
|
|
22
22
|
isSaveButtonEnabled: boolean;
|
|
23
23
|
shouldFadeActions: boolean;
|
|
24
|
+
/**
|
|
25
|
+
* Disables action interactions while scroll is active.
|
|
26
|
+
*/
|
|
27
|
+
shouldDisableActions: boolean;
|
|
24
28
|
onButtonClick: (handler?: (event: MouseEvent<HTMLButtonElement>) => void) => (event: MouseEvent<HTMLButtonElement>) => void;
|
|
25
29
|
soundSystem?: ChatSoundSystem;
|
|
26
30
|
};
|
|
@@ -9,7 +9,7 @@ import type { ChatProps } from './ChatProps';
|
|
|
9
9
|
*
|
|
10
10
|
* @private props for internal subcomponent
|
|
11
11
|
*/
|
|
12
|
-
type ChatMessageItemProps = Pick<ChatProps, 'onMessage' | 'participants'
|
|
12
|
+
type ChatMessageItemProps = Pick<ChatProps, 'onMessage' | 'participants'> & {
|
|
13
13
|
message: ChatMessage;
|
|
14
14
|
participant: ChatParticipant | undefined;
|
|
15
15
|
isLastMessage: boolean;
|
|
@@ -5,14 +5,12 @@ import type { ChatMessage } from '../types/ChatMessage';
|
|
|
5
5
|
import type { ChatParticipant } from '../types/ChatParticipant';
|
|
6
6
|
import type { ParsedCitation } from '../utils/parseCitationsFromContent';
|
|
7
7
|
import type { ChatProps } from './ChatProps';
|
|
8
|
-
import { Agent } from '../../../llm-providers/agent/Agent';
|
|
9
8
|
/**
|
|
10
9
|
* Props for the Chat message list container.
|
|
11
10
|
*
|
|
12
11
|
* @private component of `<Chat/>`
|
|
13
12
|
*/
|
|
14
13
|
export type ChatMessageListProps = {
|
|
15
|
-
agent?: Agent;
|
|
16
14
|
messages: ReadonlyArray<ChatMessage>;
|
|
17
15
|
participants: ReadonlyArray<ChatParticipant>;
|
|
18
16
|
expandedMessageId: id | null;
|
|
@@ -6,7 +6,6 @@ import { string_color } from '../../../types/typeAliases';
|
|
|
6
6
|
import type { string_chat_format_name } from '../save/_common/string_chat_format_name';
|
|
7
7
|
import type { ChatMessage } from '../types/ChatMessage';
|
|
8
8
|
import type { ChatParticipant } from '../types/ChatParticipant';
|
|
9
|
-
import type { Agent } from '../../../llm-providers/agent/Agent';
|
|
10
9
|
/**
|
|
11
10
|
* Interface for sound system that can be passed to Chat component
|
|
12
11
|
* This allows the chat to trigger sounds without tight coupling
|
|
@@ -26,12 +25,6 @@ export type ChatSoundSystem = {
|
|
|
26
25
|
* @public exported from `@promptbook/components`
|
|
27
26
|
*/
|
|
28
27
|
export type ChatProps = {
|
|
29
|
-
/**
|
|
30
|
-
* The agent that is used in the chat
|
|
31
|
-
*
|
|
32
|
-
* Note: This is not used directly but passed to subcomponents
|
|
33
|
-
*/
|
|
34
|
-
readonly agent?: Agent;
|
|
35
28
|
/**
|
|
36
29
|
* Optional callback to create a new agent from the template.
|
|
37
30
|
* If provided, renders the [Use this template] button.
|
|
@@ -15,6 +15,10 @@ export type SourceChipProps = {
|
|
|
15
15
|
* Additional CSS class name
|
|
16
16
|
*/
|
|
17
17
|
className?: string;
|
|
18
|
+
/**
|
|
19
|
+
* Optional suffix text to display after the citation label.
|
|
20
|
+
*/
|
|
21
|
+
suffix?: string;
|
|
18
22
|
};
|
|
19
23
|
/**
|
|
20
24
|
* SourceChip component - displays a chip with source document information
|
|
@@ -32,4 +36,4 @@ export type SourceChipProps = {
|
|
|
32
36
|
*
|
|
33
37
|
* @private utility of `ChatMessageItem` component
|
|
34
38
|
*/
|
|
35
|
-
export declare function SourceChip({ citation, onClick, className }: SourceChipProps): import("react/jsx-runtime").JSX.Element;
|
|
39
|
+
export declare function SourceChip({ citation, onClick, className, suffix }: SourceChipProps): import("react/jsx-runtime").JSX.Element;
|
|
@@ -39,10 +39,13 @@ export type ChatActionsOverlapResult = {
|
|
|
39
39
|
*/
|
|
40
40
|
handleChatScroll: (event: UIEvent<HTMLDivElement>) => void;
|
|
41
41
|
/**
|
|
42
|
-
*
|
|
43
|
-
* now there is just one state `shouldFadeActions` which is calculated based on the scroll position and overlap.
|
|
42
|
+
* Whether the actions toolbar is currently being scrolled.
|
|
44
43
|
*/
|
|
45
|
-
|
|
44
|
+
isActionsScrolling: boolean;
|
|
45
|
+
/**
|
|
46
|
+
* Whether the actions toolbar overlaps the first visible message.
|
|
47
|
+
*/
|
|
48
|
+
isActionsOverlapping: boolean;
|
|
46
49
|
};
|
|
47
50
|
/**
|
|
48
51
|
* Tracks action toolbar overlap while coordinating with chat auto-scroll.
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { type ToolCall } from '../../../types/ToolCall';
|
|
2
|
+
import type { ParsedCitation } from './parseCitationsFromContent';
|
|
3
|
+
/**
|
|
4
|
+
* Origin metadata for a tool call or citation executed by a teammate.
|
|
5
|
+
*
|
|
6
|
+
* @private utility of `<Chat/>`
|
|
7
|
+
*/
|
|
8
|
+
export type ToolCallOrigin = {
|
|
9
|
+
/**
|
|
10
|
+
* Human-readable label for the teammate.
|
|
11
|
+
*/
|
|
12
|
+
label: string;
|
|
13
|
+
/**
|
|
14
|
+
* Optional teammate URL.
|
|
15
|
+
*/
|
|
16
|
+
url?: string;
|
|
17
|
+
/**
|
|
18
|
+
* Optional tool name for the teammate.
|
|
19
|
+
*/
|
|
20
|
+
toolName?: string;
|
|
21
|
+
};
|
|
22
|
+
/**
|
|
23
|
+
* Tool call data enriched with its teammate origin.
|
|
24
|
+
*
|
|
25
|
+
* @private utility of `<Chat/>`
|
|
26
|
+
*/
|
|
27
|
+
export type TransitiveToolCall = {
|
|
28
|
+
/**
|
|
29
|
+
* Tool call executed by the teammate.
|
|
30
|
+
*/
|
|
31
|
+
toolCall: ToolCall;
|
|
32
|
+
/**
|
|
33
|
+
* Teammate origin metadata for the tool call.
|
|
34
|
+
*/
|
|
35
|
+
origin: ToolCallOrigin;
|
|
36
|
+
};
|
|
37
|
+
/**
|
|
38
|
+
* Citation data enriched with its teammate origin.
|
|
39
|
+
*
|
|
40
|
+
* @private utility of `<Chat/>`
|
|
41
|
+
*/
|
|
42
|
+
export type TransitiveCitation = ParsedCitation & {
|
|
43
|
+
/**
|
|
44
|
+
* Teammate origin metadata for the citation.
|
|
45
|
+
*/
|
|
46
|
+
origin: ToolCallOrigin;
|
|
47
|
+
};
|
|
48
|
+
/**
|
|
49
|
+
* Aggregated teammate tool calls and citations derived from TEAM tool results.
|
|
50
|
+
*
|
|
51
|
+
* @private utility of `<Chat/>`
|
|
52
|
+
*/
|
|
53
|
+
export type TeamToolCallSummary = {
|
|
54
|
+
/**
|
|
55
|
+
* Tool calls executed by teammates, flattened transitively.
|
|
56
|
+
*/
|
|
57
|
+
toolCalls: TransitiveToolCall[];
|
|
58
|
+
/**
|
|
59
|
+
* Citations referenced by teammates, flattened transitively.
|
|
60
|
+
*/
|
|
61
|
+
citations: TransitiveCitation[];
|
|
62
|
+
};
|
|
63
|
+
/**
|
|
64
|
+
* Collects tool calls and citations from TEAM tool call results, resolving nested teammate chains.
|
|
65
|
+
*
|
|
66
|
+
* @param toolCalls - Tool calls from the top-level agent message.
|
|
67
|
+
* @private utility of `<Chat/>`
|
|
68
|
+
*/
|
|
69
|
+
export declare function collectTeamToolCallSummary(toolCalls: ReadonlyArray<ToolCall> | undefined): TeamToolCallSummary;
|
|
@@ -1,11 +1,9 @@
|
|
|
1
1
|
import { type ToolCall } from '../../../types/ToolCall';
|
|
2
|
-
import type { Agent } from '../../../llm-providers/agent/Agent';
|
|
3
2
|
import type { AgentChipData } from '../AgentChip';
|
|
4
|
-
/**
|
|
5
|
-
* Utility to format tool call information for user-friendly display.
|
|
6
|
-
*/
|
|
7
3
|
/**
|
|
8
4
|
* Tool call chiplet information including agent data for team tools
|
|
5
|
+
*
|
|
6
|
+
* @private utility of `<Chat/>`
|
|
9
7
|
*/
|
|
10
8
|
export type ToolCallChipletInfo = {
|
|
11
9
|
/**
|
|
@@ -23,10 +21,19 @@ export type ToolCallChipletInfo = {
|
|
|
23
21
|
*/
|
|
24
22
|
wrapInBrackets?: boolean;
|
|
25
23
|
};
|
|
24
|
+
/**
|
|
25
|
+
* Builds display text for a tool call chiplet.
|
|
26
|
+
*
|
|
27
|
+
* @param chipletInfo - Chiplet metadata for the tool call.
|
|
28
|
+
*
|
|
29
|
+
* @private utility of `<Chat/>`
|
|
30
|
+
*/
|
|
31
|
+
export declare function buildToolCallChipText(chipletInfo: ToolCallChipletInfo): string;
|
|
26
32
|
/**
|
|
27
33
|
* Technical to user-friendly tool names and emojis
|
|
28
34
|
*
|
|
29
|
-
* @private [🧠] Maybe public?
|
|
35
|
+
* @private utility of `<Chat/>` [🧠] Maybe public?
|
|
36
|
+
*
|
|
30
37
|
*/
|
|
31
38
|
export declare const TOOL_TITLES: Record<string, {
|
|
32
39
|
title: string;
|
|
@@ -38,4 +45,4 @@ export declare const TOOL_TITLES: Record<string, {
|
|
|
38
45
|
*
|
|
39
46
|
* @private [🧠] Maybe public?
|
|
40
47
|
*/
|
|
41
|
-
export declare function getToolCallChipletInfo(toolCall: ToolCall
|
|
48
|
+
export declare function getToolCallChipletInfo(toolCall: ToolCall): ToolCallChipletInfo;
|
|
@@ -39,6 +39,15 @@ export declare function parseCitationsFromContent(content: string): ParsedCitati
|
|
|
39
39
|
* @private utility for internal use
|
|
40
40
|
*/
|
|
41
41
|
export declare function stripCitationsFromContent(content: string): string;
|
|
42
|
+
/**
|
|
43
|
+
* Deduplicates citations by source while preserving the first-seen order.
|
|
44
|
+
*
|
|
45
|
+
* @param citations - Parsed citations to deduplicate.
|
|
46
|
+
* @returns Deduplicated citations in original order.
|
|
47
|
+
*
|
|
48
|
+
* @private utility for internal use
|
|
49
|
+
*/
|
|
50
|
+
export declare function dedupeCitationsBySource(citations: ReadonlyArray<ParsedCitation>): ParsedCitation[];
|
|
42
51
|
/**
|
|
43
52
|
* Extracts citations from a chat message if not already present
|
|
44
53
|
*
|
|
@@ -25,6 +25,10 @@ export type TeamToolResult = {
|
|
|
25
25
|
};
|
|
26
26
|
request?: string;
|
|
27
27
|
response?: string;
|
|
28
|
+
/**
|
|
29
|
+
* Tool calls executed by the teammate while answering.
|
|
30
|
+
*/
|
|
31
|
+
toolCalls?: ReadonlyArray<ToolCall>;
|
|
28
32
|
error?: string | null;
|
|
29
33
|
conversation?: Array<{
|
|
30
34
|
sender?: string;
|
|
@@ -14,7 +14,6 @@ import type { AgentOptions } from './AgentOptions';
|
|
|
14
14
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
15
15
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
16
16
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
17
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
18
17
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
19
18
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
20
19
|
*
|
|
@@ -81,12 +80,6 @@ export declare class Agent extends AgentLlmExecutionTools implements LlmExecutio
|
|
|
81
80
|
* Human-readable titles for tool functions
|
|
82
81
|
*/
|
|
83
82
|
toolTitles: Record<string, string>;
|
|
84
|
-
/**
|
|
85
|
-
* Externals prepared for the agent, like OpenAI assistant, etc.
|
|
86
|
-
*/
|
|
87
|
-
preparedExternals: {
|
|
88
|
-
openaiAssistantId?: string;
|
|
89
|
-
};
|
|
90
83
|
/**
|
|
91
84
|
* Not used in Agent, always returns empty array
|
|
92
85
|
*/
|
|
@@ -16,7 +16,6 @@ import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecut
|
|
|
16
16
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
17
17
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
18
18
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
19
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
20
19
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
21
20
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
22
21
|
*
|
|
@@ -13,6 +13,15 @@ export type AgentOptions = CommonToolsOptions & {
|
|
|
13
13
|
* Here the agent has access to various LLM models, browser, scrapers, LibreOffice, tools, etc.
|
|
14
14
|
*/
|
|
15
15
|
executionTools: ExecutionTools;
|
|
16
|
+
/**
|
|
17
|
+
* How to manage OpenAI assistant preparation when using OpenAiAssistantExecutionTools.
|
|
18
|
+
*
|
|
19
|
+
* Use `external` when an external cache manager already created the assistant and
|
|
20
|
+
* the agent should use it as-is.
|
|
21
|
+
*
|
|
22
|
+
* @default internal
|
|
23
|
+
*/
|
|
24
|
+
assistantPreparationMode?: 'internal' | 'external';
|
|
16
25
|
/**
|
|
17
26
|
* The source of the agent
|
|
18
27
|
*/
|
|
@@ -10,6 +10,15 @@ export type CreateAgentLlmExecutionToolsOptions = CommonToolsOptions & {
|
|
|
10
10
|
* The underlying LLM execution tools to wrap
|
|
11
11
|
*/
|
|
12
12
|
llmTools: LlmExecutionTools | OpenAiAssistantExecutionTools;
|
|
13
|
+
/**
|
|
14
|
+
* How to manage OpenAI assistant preparation when using OpenAiAssistantExecutionTools.
|
|
15
|
+
*
|
|
16
|
+
* Use `external` when an external cache manager already created the assistant and
|
|
17
|
+
* the agent should use it as-is.
|
|
18
|
+
*
|
|
19
|
+
* @default internal
|
|
20
|
+
*/
|
|
21
|
+
assistantPreparationMode?: 'internal' | 'external';
|
|
13
22
|
/**
|
|
14
23
|
* The agent source string that defines the agent's behavior
|
|
15
24
|
*/
|
|
@@ -18,7 +18,6 @@ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
|
|
|
18
18
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
19
19
|
*
|
|
20
20
|
* @public exported from `@promptbook/openai`
|
|
21
|
-
* @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
|
|
22
21
|
*/
|
|
23
22
|
export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
|
|
24
23
|
readonly assistantId: string_token;
|
|
@@ -43,6 +42,46 @@ export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools
|
|
|
43
42
|
* Get an existing assistant tool wrapper
|
|
44
43
|
*/
|
|
45
44
|
getAssistant(assistantId: string_token): OpenAiAssistantExecutionTools;
|
|
45
|
+
/**
|
|
46
|
+
* Returns the per-knowledge-source download timeout in milliseconds.
|
|
47
|
+
*/
|
|
48
|
+
private getKnowledgeSourceDownloadTimeoutMs;
|
|
49
|
+
/**
|
|
50
|
+
* Returns the max concurrency for knowledge source uploads.
|
|
51
|
+
*/
|
|
52
|
+
private getKnowledgeSourceUploadMaxConcurrency;
|
|
53
|
+
/**
|
|
54
|
+
* Returns the polling interval in milliseconds for vector store uploads.
|
|
55
|
+
*/
|
|
56
|
+
private getKnowledgeSourceUploadPollIntervalMs;
|
|
57
|
+
/**
|
|
58
|
+
* Returns the overall upload timeout in milliseconds for vector store uploads.
|
|
59
|
+
*/
|
|
60
|
+
private getKnowledgeSourceUploadTimeoutMs;
|
|
61
|
+
/**
|
|
62
|
+
* Returns true if we should continue even if vector store ingestion stalls.
|
|
63
|
+
*/
|
|
64
|
+
private shouldContinueOnVectorStoreStall;
|
|
65
|
+
/**
|
|
66
|
+
* Returns assistant-specific options with extended settings.
|
|
67
|
+
*/
|
|
68
|
+
private get assistantOptions();
|
|
69
|
+
/**
|
|
70
|
+
* Downloads a knowledge source URL into a File for vector store upload.
|
|
71
|
+
*/
|
|
72
|
+
private downloadKnowledgeSourceFile;
|
|
73
|
+
/**
|
|
74
|
+
* Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
|
|
75
|
+
*/
|
|
76
|
+
private logVectorStoreFileBatchDiagnostics;
|
|
77
|
+
/**
|
|
78
|
+
* Uploads knowledge source files to the vector store and polls until processing completes.
|
|
79
|
+
*/
|
|
80
|
+
private uploadKnowledgeSourceFilesToVectorStore;
|
|
81
|
+
/**
|
|
82
|
+
* Creates a vector store and uploads knowledge sources, returning its ID.
|
|
83
|
+
*/
|
|
84
|
+
private createVectorStoreWithKnowledgeSources;
|
|
46
85
|
createNewAssistant(options: {
|
|
47
86
|
/**
|
|
48
87
|
* Name of the new assistant
|
|
@@ -17,4 +17,34 @@ export type OpenAiAssistantExecutionToolsOptions = OpenAiCompatibleExecutionTool
|
|
|
17
17
|
* Which assistant to use
|
|
18
18
|
*/
|
|
19
19
|
readonly assistantId: string_token;
|
|
20
|
+
/**
|
|
21
|
+
* Per-knowledge-source download timeout in milliseconds when preparing assistants.
|
|
22
|
+
*
|
|
23
|
+
* @default 30000
|
|
24
|
+
*/
|
|
25
|
+
readonly knowledgeSourceDownloadTimeoutMs?: number;
|
|
26
|
+
/**
|
|
27
|
+
* Max concurrency for uploading knowledge source files to the vector store.
|
|
28
|
+
*
|
|
29
|
+
* @default 5
|
|
30
|
+
*/
|
|
31
|
+
readonly knowledgeSourceUploadMaxConcurrency?: number;
|
|
32
|
+
/**
|
|
33
|
+
* Poll interval in milliseconds when waiting for vector store file batch processing.
|
|
34
|
+
*
|
|
35
|
+
* @default 5000
|
|
36
|
+
*/
|
|
37
|
+
readonly knowledgeSourceUploadPollIntervalMs?: number;
|
|
38
|
+
/**
|
|
39
|
+
* Overall timeout in milliseconds for vector store file batch processing.
|
|
40
|
+
*
|
|
41
|
+
* @default 900000
|
|
42
|
+
*/
|
|
43
|
+
readonly knowledgeSourceUploadTimeoutMs?: number;
|
|
44
|
+
/**
|
|
45
|
+
* Whether we should continue even if vector store ingestion stalls.
|
|
46
|
+
*
|
|
47
|
+
* @default true
|
|
48
|
+
*/
|
|
49
|
+
readonly shouldContinueOnVectorStoreStall?: boolean;
|
|
20
50
|
};
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import type { AgentBasicInformation } from '../../book-2.0/agent-source/AgentBasicInformation';
|
|
2
|
+
import type { string_url, string_url_image } from '../../types/typeAliases';
|
|
3
|
+
/**
|
|
4
|
+
* Options for resolving agent avatar URLs.
|
|
5
|
+
*
|
|
6
|
+
* @private utility of `<Chat/>`
|
|
7
|
+
*/
|
|
8
|
+
export type ResolveAgentAvatarImageUrlOptions = {
|
|
9
|
+
/**
|
|
10
|
+
* Agent metadata used for avatar resolution.
|
|
11
|
+
*/
|
|
12
|
+
readonly agent: Pick<AgentBasicInformation, 'agentName' | 'permanentId' | 'meta'>;
|
|
13
|
+
/**
|
|
14
|
+
* Optional base URL used to resolve relative meta images and placeholders.
|
|
15
|
+
*/
|
|
16
|
+
readonly baseUrl?: string_url;
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* Resolve the fallback avatar URL for an agent.
|
|
20
|
+
*
|
|
21
|
+
* @private utility of `<Chat/>`
|
|
22
|
+
*/
|
|
23
|
+
export declare function resolveAgentAvatarFallbackUrl(options: ResolveAgentAvatarImageUrlOptions): string_url_image | null;
|
|
24
|
+
/**
|
|
25
|
+
* Resolve the best avatar URL for an agent, preferring META IMAGE and falling back to placeholders.
|
|
26
|
+
*
|
|
27
|
+
* @private utility of `<Chat/>`
|
|
28
|
+
*/
|
|
29
|
+
export declare function resolveAgentAvatarImageUrl(options: ResolveAgentAvatarImageUrlOptions): string_url_image | null;
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.110.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.110.0-4`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/javascript",
|
|
3
|
-
"version": "0.110.0-
|
|
3
|
+
"version": "0.110.0-5",
|
|
4
4
|
"description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -90,14 +90,11 @@
|
|
|
90
90
|
"node": ">=18.18.0",
|
|
91
91
|
"npm": ">=8.0.0"
|
|
92
92
|
},
|
|
93
|
-
"overrides": {
|
|
94
|
-
"jsdom": "26.1.0"
|
|
95
|
-
},
|
|
96
93
|
"main": "./umd/index.umd.js",
|
|
97
94
|
"module": "./esm/index.es.js",
|
|
98
95
|
"typings": "./esm/typings/src/_packages/javascript.index.d.ts",
|
|
99
96
|
"peerDependencies": {
|
|
100
|
-
"@promptbook/core": "0.110.0-
|
|
97
|
+
"@promptbook/core": "0.110.0-5"
|
|
101
98
|
},
|
|
102
99
|
"dependencies": {
|
|
103
100
|
"crypto": "1.0.1",
|
package/umd/index.umd.js
CHANGED
|
@@ -22,7 +22,7 @@
|
|
|
22
22
|
* @generated
|
|
23
23
|
* @see https://github.com/webgptorg/promptbook
|
|
24
24
|
*/
|
|
25
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-
|
|
25
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-5';
|
|
26
26
|
/**
|
|
27
27
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
28
28
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
import OpenAI from 'openai';
|
|
2
|
-
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
|
-
import type { Prompt } from '../../types/Prompt';
|
|
5
|
-
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
6
|
-
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
|
|
7
|
-
import { OpenAiExecutionTools } from './OpenAiExecutionTools';
|
|
8
|
-
/**
|
|
9
|
-
* Options for OpenAiAgentExecutionTools
|
|
10
|
-
*/
|
|
11
|
-
export type OpenAiAgentExecutionToolsOptions = OpenAiCompatibleExecutionToolsNonProxiedOptions & {
|
|
12
|
-
/**
|
|
13
|
-
* ID of the vector store to use for file search
|
|
14
|
-
*/
|
|
15
|
-
readonly vectorStoreId?: string;
|
|
16
|
-
};
|
|
17
|
-
/**
|
|
18
|
-
* Execution Tools for calling OpenAI API using the Responses API (Agents)
|
|
19
|
-
*
|
|
20
|
-
* @public exported from `@promptbook/openai`
|
|
21
|
-
*/
|
|
22
|
-
export declare class OpenAiAgentExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
|
|
23
|
-
readonly vectorStoreId?: string;
|
|
24
|
-
constructor(options: OpenAiAgentExecutionToolsOptions);
|
|
25
|
-
get title(): string_title & string_markdown_text;
|
|
26
|
-
get description(): string_markdown;
|
|
27
|
-
/**
|
|
28
|
-
* Calls OpenAI API to use a chat model with streaming.
|
|
29
|
-
*/
|
|
30
|
-
callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
|
|
31
|
-
/**
|
|
32
|
-
* Creates a vector store from knowledge sources
|
|
33
|
-
*/
|
|
34
|
-
static createVectorStore(client: OpenAI, name: string, knowledgeSources: ReadonlyArray<string>): Promise<string>;
|
|
35
|
-
/**
|
|
36
|
-
* Discriminant for type guards
|
|
37
|
-
*/
|
|
38
|
-
protected get discriminant(): string;
|
|
39
|
-
/**
|
|
40
|
-
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentExecutionTools`
|
|
41
|
-
*/
|
|
42
|
-
static isOpenAiAgentExecutionTools(llmExecutionTools: LlmExecutionTools): llmExecutionTools is OpenAiAgentExecutionTools;
|
|
43
|
-
}
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
import { OpenAiAgentExecutionTools } from './OpenAiAgentExecutionTools';
|
|
2
|
-
import type { OpenAiAgentExecutionToolsOptions } from './OpenAiAgentExecutionTools';
|
|
3
|
-
/**
|
|
4
|
-
* Execution Tools for calling OpenAI API using Responses API
|
|
5
|
-
*
|
|
6
|
-
* @public exported from `@promptbook/openai`
|
|
7
|
-
*/
|
|
8
|
-
export declare const createOpenAiAgentExecutionTools: ((options: OpenAiAgentExecutionToolsOptions) => OpenAiAgentExecutionTools) & {
|
|
9
|
-
packageName: string;
|
|
10
|
-
className: string;
|
|
11
|
-
};
|