@promptbook/javascript 0.110.0-5 → 0.110.0-8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. package/esm/index.es.js +1 -1
  2. package/esm/typings/src/_packages/openai.index.d.ts +8 -0
  3. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  4. package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
  5. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +4 -1
  6. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +0 -3
  7. package/esm/typings/src/execution/LlmExecutionTools.d.ts +2 -1
  8. package/esm/typings/src/llm-providers/agent/Agent.d.ts +1 -0
  9. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -0
  10. package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +4 -3
  11. package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +7 -5
  12. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -1
  13. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +111 -0
  14. package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionToolsOptions.d.ts +15 -0
  15. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +3 -42
  16. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +2 -33
  17. package/esm/typings/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +135 -0
  18. package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +1 -1
  19. package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
  20. package/esm/typings/src/version.d.ts +1 -1
  21. package/package.json +5 -2
  22. package/umd/index.umd.js +1 -1
package/esm/index.es.js CHANGED
@@ -18,7 +18,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
18
18
  * @generated
19
19
  * @see https://github.com/webgptorg/promptbook
20
20
  */
21
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-5';
21
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-8';
22
22
  /**
23
23
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
24
24
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3,6 +3,8 @@ import { createOpenAiAssistantExecutionTools } from '../llm-providers/openai/cre
3
3
  import { createOpenAiCompatibleExecutionTools } from '../llm-providers/openai/createOpenAiCompatibleExecutionTools';
4
4
  import { createOpenAiExecutionTools } from '../llm-providers/openai/createOpenAiExecutionTools';
5
5
  import { OPENAI_MODELS } from '../llm-providers/openai/openai-models';
6
+ import { OpenAiAgentKitExecutionTools } from '../llm-providers/openai/OpenAiAgentKitExecutionTools';
7
+ import type { OpenAiAgentKitExecutionToolsOptions } from '../llm-providers/openai/OpenAiAgentKitExecutionToolsOptions';
6
8
  import { OpenAiAssistantExecutionTools } from '../llm-providers/openai/OpenAiAssistantExecutionTools';
7
9
  import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
8
10
  import { OpenAiCompatibleExecutionTools } from '../llm-providers/openai/OpenAiCompatibleExecutionTools';
@@ -11,6 +13,8 @@ import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from '../llm-pro
11
13
  import type { OpenAiCompatibleExecutionToolsProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
12
14
  import { OpenAiExecutionTools } from '../llm-providers/openai/OpenAiExecutionTools';
13
15
  import type { OpenAiExecutionToolsOptions } from '../llm-providers/openai/OpenAiExecutionToolsOptions';
16
+ import type { OpenAiVectorStoreHandlerOptions } from '../llm-providers/openai/OpenAiVectorStoreHandler';
17
+ import { OpenAiVectorStoreHandler } from '../llm-providers/openai/OpenAiVectorStoreHandler';
14
18
  import { _OpenAiRegistration } from '../llm-providers/openai/register-constructor';
15
19
  import { _OpenAiAssistantRegistration } from '../llm-providers/openai/register-constructor';
16
20
  import { _OpenAiCompatibleRegistration } from '../llm-providers/openai/register-constructor';
@@ -19,6 +23,8 @@ export { createOpenAiAssistantExecutionTools };
19
23
  export { createOpenAiCompatibleExecutionTools };
20
24
  export { createOpenAiExecutionTools };
21
25
  export { OPENAI_MODELS };
26
+ export { OpenAiAgentKitExecutionTools };
27
+ export type { OpenAiAgentKitExecutionToolsOptions };
22
28
  export { OpenAiAssistantExecutionTools };
23
29
  export type { OpenAiAssistantExecutionToolsOptions };
24
30
  export { OpenAiCompatibleExecutionTools };
@@ -27,6 +33,8 @@ export type { OpenAiCompatibleExecutionToolsNonProxiedOptions };
27
33
  export type { OpenAiCompatibleExecutionToolsProxiedOptions };
28
34
  export { OpenAiExecutionTools };
29
35
  export type { OpenAiExecutionToolsOptions };
36
+ export type { OpenAiVectorStoreHandlerOptions };
37
+ export { OpenAiVectorStoreHandler };
30
38
  export { _OpenAiRegistration };
31
39
  export { _OpenAiAssistantRegistration };
32
40
  export { _OpenAiCompatibleRegistration };
@@ -138,11 +138,13 @@ import type { AzureOpenAiExecutionToolsOptions } from '../llm-providers/azure-op
138
138
  import type { DeepseekExecutionToolsOptions } from '../llm-providers/deepseek/DeepseekExecutionToolsOptions';
139
139
  import type { GoogleExecutionToolsOptions } from '../llm-providers/google/GoogleExecutionToolsOptions';
140
140
  import type { OllamaExecutionToolsOptions } from '../llm-providers/ollama/OllamaExecutionToolsOptions';
141
+ import type { OpenAiAgentKitExecutionToolsOptions } from '../llm-providers/openai/OpenAiAgentKitExecutionToolsOptions';
141
142
  import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
142
143
  import type { OpenAiCompatibleExecutionToolsOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
143
144
  import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
144
145
  import type { OpenAiCompatibleExecutionToolsProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
145
146
  import type { OpenAiExecutionToolsOptions } from '../llm-providers/openai/OpenAiExecutionToolsOptions';
147
+ import type { OpenAiVectorStoreHandlerOptions } from '../llm-providers/openai/OpenAiVectorStoreHandler';
146
148
  import type { VercelExecutionToolsOptions } from '../llm-providers/vercel/VercelExecutionToolsOptions';
147
149
  import type { VercelProvider } from '../llm-providers/vercel/VercelProvider';
148
150
  import type { IsPipelineImplementingInterfaceOptions } from '../pipeline/PipelineInterface/isPipelineImplementingInterface';
@@ -539,11 +541,13 @@ export type { AzureOpenAiExecutionToolsOptions };
539
541
  export type { DeepseekExecutionToolsOptions };
540
542
  export type { GoogleExecutionToolsOptions };
541
543
  export type { OllamaExecutionToolsOptions };
544
+ export type { OpenAiAgentKitExecutionToolsOptions };
542
545
  export type { OpenAiAssistantExecutionToolsOptions };
543
546
  export type { OpenAiCompatibleExecutionToolsOptions };
544
547
  export type { OpenAiCompatibleExecutionToolsNonProxiedOptions };
545
548
  export type { OpenAiCompatibleExecutionToolsProxiedOptions };
546
549
  export type { OpenAiExecutionToolsOptions };
550
+ export type { OpenAiVectorStoreHandlerOptions };
547
551
  export type { VercelExecutionToolsOptions };
548
552
  export type { VercelProvider };
549
553
  export type { IsPipelineImplementingInterfaceOptions };
@@ -45,6 +45,10 @@ export type AgentChipProps = {
45
45
  * Additional CSS class name
46
46
  */
47
47
  className?: string;
48
+ /**
49
+ * Optional suffix appended to the agent label (e.g., " (2x)").
50
+ */
51
+ labelSuffix?: string;
48
52
  };
49
53
  /**
50
54
  * AgentChip component - displays a chip with agent avatar and name
@@ -64,4 +68,4 @@ export type AgentChipProps = {
64
68
  *
65
69
  * @private utility of `ChatMessageItem` component
66
70
  */
67
- export declare function AgentChip({ agent, isOngoing, isClickable, onClick, className }: AgentChipProps): import("react/jsx-runtime").JSX.Element;
71
+ export declare function AgentChip({ agent, isOngoing, isClickable, onClick, className, labelSuffix, }: AgentChipProps): import("react/jsx-runtime").JSX.Element;
@@ -69,5 +69,8 @@ export type LlmChatProps = Omit<ChatProps, 'messages' | 'onMessage' | 'onChange'
69
69
  * @param error - The error that occurred
70
70
  * @param retry - Function to retry the last failed message
71
71
  */
72
- onError?(error: unknown, retry: () => void): void;
72
+ onError?(error: unknown, retry: () => void, failedMessage: {
73
+ content: string;
74
+ attachments: ChatMessage['attachments'];
75
+ }): void;
73
76
  };
@@ -36,7 +36,6 @@ export type AgentsDatabaseSchema = {
36
36
  promptbookEngineVersion: string;
37
37
  usage: Json | null;
38
38
  preparedModelRequirements: Json | null;
39
- preparedExternals: Json | null;
40
39
  folderId: number | null;
41
40
  sortOrder: number;
42
41
  deletedAt: string | null;
@@ -54,7 +53,6 @@ export type AgentsDatabaseSchema = {
54
53
  promptbookEngineVersion: string;
55
54
  usage?: Json | null;
56
55
  preparedModelRequirements?: Json | null;
57
- preparedExternals?: Json | null;
58
56
  folderId?: number | null;
59
57
  sortOrder?: number;
60
58
  deletedAt?: string | null;
@@ -72,7 +70,6 @@ export type AgentsDatabaseSchema = {
72
70
  promptbookEngineVersion?: string;
73
71
  usage?: Json | null;
74
72
  preparedModelRequirements?: Json | null;
75
- preparedExternals?: Json | null;
76
73
  folderId?: number | null;
77
74
  sortOrder?: number;
78
75
  deletedAt?: string | null;
@@ -13,7 +13,8 @@ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, I
13
13
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
14
14
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
15
15
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16
- * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
16
+ * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
17
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
17
18
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
18
19
  *
19
20
  * @see https://github.com/webgptorg/promptbook#llm-execution-tools
@@ -15,6 +15,7 @@ import type { AgentOptions } from './AgentOptions';
15
15
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16
16
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17
17
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
18
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
18
19
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
19
20
  *
20
21
  * @public exported from `@promptbook/core`
@@ -17,12 +17,17 @@ import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecut
17
17
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
18
18
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
19
19
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
20
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
20
21
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
21
22
  *
22
23
  * @public exported from `@promptbook/core`
23
24
  */
24
25
  export declare class AgentLlmExecutionTools implements LlmExecutionTools {
25
26
  protected readonly options: CreateAgentLlmExecutionToolsOptions;
27
+ /**
28
+ * Cached AgentKit agents to avoid rebuilding identical instances.
29
+ */
30
+ private static agentKitAgentCache;
26
31
  /**
27
32
  * Cache of OpenAI assistants to avoid creating duplicates
28
33
  */
@@ -14,10 +14,11 @@ export type AgentOptions = CommonToolsOptions & {
14
14
  */
15
15
  executionTools: ExecutionTools;
16
16
  /**
17
- * How to manage OpenAI assistant preparation when using OpenAiAssistantExecutionTools.
17
+ * How to manage OpenAI assistant/AgentKit preparation when using OpenAiAssistantExecutionTools
18
+ * or OpenAiAgentKitExecutionTools.
18
19
  *
19
- * Use `external` when an external cache manager already created the assistant and
20
- * the agent should use it as-is.
20
+ * Use `external` when an external cache manager already created the assistant/AgentKit agent
21
+ * and the agent should use it as-is.
21
22
  *
22
23
  * @default internal
23
24
  */
@@ -1,7 +1,8 @@
1
1
  import type { string_book } from '../../book-2.0/agent-source/string_book';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import { OpenAiAssistantExecutionTools } from '../openai/OpenAiAssistantExecutionTools';
4
+ import type { OpenAiAgentKitExecutionTools } from '../openai/OpenAiAgentKitExecutionTools';
5
+ import type { OpenAiAssistantExecutionTools } from '../openai/OpenAiAssistantExecutionTools';
5
6
  /**
6
7
  * Options for creating AgentLlmExecutionTools
7
8
  */
@@ -9,12 +10,13 @@ export type CreateAgentLlmExecutionToolsOptions = CommonToolsOptions & {
9
10
  /**
10
11
  * The underlying LLM execution tools to wrap
11
12
  */
12
- llmTools: LlmExecutionTools | OpenAiAssistantExecutionTools;
13
+ llmTools: LlmExecutionTools | OpenAiAssistantExecutionTools | OpenAiAgentKitExecutionTools;
13
14
  /**
14
- * How to manage OpenAI assistant preparation when using OpenAiAssistantExecutionTools.
15
+ * How to manage OpenAI assistant/AgentKit preparation when using OpenAiAssistantExecutionTools
16
+ * or OpenAiAgentKitExecutionTools.
15
17
  *
16
- * Use `external` when an external cache manager already created the assistant and
17
- * the agent should use it as-is.
18
+ * Use `external` when an external cache manager already created the assistant/AgentKit agent
19
+ * and the agent should use it as-is.
18
20
  *
19
21
  * @default internal
20
22
  */
@@ -10,7 +10,8 @@ import type { RemoteAgentOptions } from './RemoteAgentOptions';
10
10
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
11
11
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
12
12
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
13
- * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
13
+ * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
14
+ * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
14
15
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
15
16
  *
16
17
  * @public exported from `@promptbook/core`
@@ -0,0 +1,111 @@
1
+ import { Agent as AgentFromKit } from '@openai/agents';
2
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
+ import type { ChatPromptResult } from '../../execution/PromptResult';
4
+ import type { ModelRequirements } from '../../types/ModelRequirements';
5
+ import type { Prompt } from '../../types/Prompt';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
+ import type { OpenAiAgentKitExecutionToolsOptions } from './OpenAiAgentKitExecutionToolsOptions';
8
+ import { OpenAiVectorStoreHandler } from './OpenAiVectorStoreHandler';
9
+ /**
10
+ * Alias for OpenAI AgentKit agent to avoid naming confusion with Promptbook agents.
11
+ */
12
+ type OpenAiAgentKitAgent = AgentFromKit;
13
+ /**
14
+ * Prepared AgentKit agent details.
15
+ */
16
+ type OpenAiAgentKitPreparedAgent = {
17
+ readonly agent: OpenAiAgentKitAgent;
18
+ readonly vectorStoreId?: string;
19
+ };
20
+ /**
21
+ * Execution tools for OpenAI AgentKit (Agents SDK).
22
+ *
23
+ * @public exported from `@promptbook/openai`
24
+ */
25
+ export declare class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler implements LlmExecutionTools {
26
+ private preparedAgentKitAgent;
27
+ private readonly agentKitModelName;
28
+ /**
29
+ * Creates OpenAI AgentKit execution tools.
30
+ */
31
+ constructor(options: OpenAiAgentKitExecutionToolsOptions);
32
+ get title(): string_title & string_markdown_text;
33
+ get description(): string_markdown;
34
+ /**
35
+ * Calls OpenAI AgentKit with a chat prompt (non-streaming).
36
+ */
37
+ callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
38
+ /**
39
+ * Calls OpenAI AgentKit with a chat prompt (streaming).
40
+ */
41
+ callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
42
+ /**
43
+ * Returns a prepared AgentKit agent when the server wants to manage caching externally.
44
+ */
45
+ getPreparedAgentKitAgent(): OpenAiAgentKitPreparedAgent | null;
46
+ /**
47
+ * Stores a prepared AgentKit agent for later reuse by external cache managers.
48
+ */
49
+ setPreparedAgentKitAgent(preparedAgent: OpenAiAgentKitPreparedAgent): void;
50
+ /**
51
+ * Creates a new tools instance bound to a prepared AgentKit agent.
52
+ */
53
+ getPreparedAgentTools(preparedAgent: OpenAiAgentKitPreparedAgent): OpenAiAgentKitExecutionTools;
54
+ /**
55
+ * Prepares an AgentKit agent with optional knowledge sources and tool definitions.
56
+ */
57
+ prepareAgentKitAgent(options: {
58
+ readonly name: string_title;
59
+ readonly instructions: string_markdown;
60
+ readonly knowledgeSources?: ReadonlyArray<string>;
61
+ readonly tools?: ModelRequirements['tools'];
62
+ readonly vectorStoreId?: string;
63
+ readonly storeAsPrepared?: boolean;
64
+ }): Promise<OpenAiAgentKitPreparedAgent>;
65
+ /**
66
+ * Ensures the AgentKit SDK is wired to the OpenAI client and API key.
67
+ */
68
+ private ensureAgentKitDefaults;
69
+ /**
70
+ * Builds the tool list for AgentKit, including hosted file search when applicable.
71
+ */
72
+ private buildAgentKitTools;
73
+ /**
74
+ * Resolves the configured script tools for tool execution.
75
+ */
76
+ private resolveScriptTools;
77
+ /**
78
+ * Runs a prepared AgentKit agent and streams results back to the caller.
79
+ */
80
+ callChatModelStreamWithPreparedAgent(options: {
81
+ readonly openAiAgentKitAgent: OpenAiAgentKitAgent;
82
+ readonly prompt: Prompt;
83
+ readonly rawPromptContent?: string;
84
+ readonly onProgress: (chunk: ChatPromptResult) => void;
85
+ }): Promise<ChatPromptResult>;
86
+ /**
87
+ * Builds AgentKit input items from the prompt and optional thread.
88
+ */
89
+ private buildAgentKitInputItems;
90
+ /**
91
+ * Builds the user message content for AgentKit runs, including file inputs when provided.
92
+ */
93
+ private buildAgentKitUserContent;
94
+ /**
95
+ * Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
96
+ */
97
+ private formatAgentKitToolOutput;
98
+ /**
99
+ * Returns AgentKit-specific options.
100
+ */
101
+ private get agentKitOptions();
102
+ /**
103
+ * Discriminant for type guards.
104
+ */
105
+ protected get discriminant(): string;
106
+ /**
107
+ * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
108
+ */
109
+ static isOpenAiAgentKitExecutionTools(llmExecutionTools: LlmExecutionTools): llmExecutionTools is OpenAiAgentKitExecutionTools;
110
+ }
111
+ export {};
@@ -0,0 +1,15 @@
1
+ import type { string_model_name } from '../../types/typeAliases';
2
+ import type { OpenAiVectorStoreHandlerOptions } from './OpenAiVectorStoreHandler';
3
+ /**
4
+ * Options for `OpenAiAgentKitExecutionTools`.
5
+ *
6
+ * @public exported from `@promptbook/openai`
7
+ */
8
+ export type OpenAiAgentKitExecutionToolsOptions = OpenAiVectorStoreHandlerOptions & {
9
+ /**
10
+ * Base model name used for AgentKit agents.
11
+ *
12
+ * @default gpt-5.2
13
+ */
14
+ readonly agentKitModelName?: string_model_name;
15
+ };
@@ -4,7 +4,7 @@ import type { ModelRequirements } from '../../types/ModelRequirements';
4
4
  import type { Prompt } from '../../types/Prompt';
5
5
  import type { string_markdown, string_markdown_text, string_title, string_token } from '../../types/typeAliases';
6
6
  import type { OpenAiAssistantExecutionToolsOptions } from './OpenAiAssistantExecutionToolsOptions';
7
- import { OpenAiExecutionTools } from './OpenAiExecutionTools';
7
+ import { OpenAiVectorStoreHandler } from './OpenAiVectorStoreHandler';
8
8
  /**
9
9
  * Execution Tools for calling OpenAI API Assistants
10
10
  *
@@ -17,9 +17,10 @@ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
17
17
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
18
18
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
19
19
  *
20
+ * @deprecated Use `OpenAiAgentKitExecutionTools` instead.
20
21
  * @public exported from `@promptbook/openai`
21
22
  */
22
- export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
23
+ export declare class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler implements LlmExecutionTools {
23
24
  readonly assistantId: string_token;
24
25
  private readonly isCreatingNewAssistantsAllowed;
25
26
  /**
@@ -42,46 +43,6 @@ export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools
42
43
  * Get an existing assistant tool wrapper
43
44
  */
44
45
  getAssistant(assistantId: string_token): OpenAiAssistantExecutionTools;
45
- /**
46
- * Returns the per-knowledge-source download timeout in milliseconds.
47
- */
48
- private getKnowledgeSourceDownloadTimeoutMs;
49
- /**
50
- * Returns the max concurrency for knowledge source uploads.
51
- */
52
- private getKnowledgeSourceUploadMaxConcurrency;
53
- /**
54
- * Returns the polling interval in milliseconds for vector store uploads.
55
- */
56
- private getKnowledgeSourceUploadPollIntervalMs;
57
- /**
58
- * Returns the overall upload timeout in milliseconds for vector store uploads.
59
- */
60
- private getKnowledgeSourceUploadTimeoutMs;
61
- /**
62
- * Returns true if we should continue even if vector store ingestion stalls.
63
- */
64
- private shouldContinueOnVectorStoreStall;
65
- /**
66
- * Returns assistant-specific options with extended settings.
67
- */
68
- private get assistantOptions();
69
- /**
70
- * Downloads a knowledge source URL into a File for vector store upload.
71
- */
72
- private downloadKnowledgeSourceFile;
73
- /**
74
- * Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
75
- */
76
- private logVectorStoreFileBatchDiagnostics;
77
- /**
78
- * Uploads knowledge source files to the vector store and polls until processing completes.
79
- */
80
- private uploadKnowledgeSourceFilesToVectorStore;
81
- /**
82
- * Creates a vector store and uploads knowledge sources, returning its ID.
83
- */
84
- private createVectorStoreWithKnowledgeSources;
85
46
  createNewAssistant(options: {
86
47
  /**
87
48
  * Name of the new assistant
@@ -1,12 +1,11 @@
1
- import type { ClientOptions } from 'openai';
2
1
  import type { string_token } from '../../types/typeAliases';
3
- import type { OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleExecutionToolsOptions';
2
+ import type { OpenAiVectorStoreHandlerOptions } from './OpenAiVectorStoreHandler';
4
3
  /**
5
4
  * Options for `createOpenAiAssistantExecutionTools` and `OpenAiAssistantExecutionTools`
6
5
  *
7
6
  * @public exported from `@promptbook/openai`
8
7
  */
9
- export type OpenAiAssistantExecutionToolsOptions = OpenAiCompatibleExecutionToolsOptions & ClientOptions & {
8
+ export type OpenAiAssistantExecutionToolsOptions = OpenAiVectorStoreHandlerOptions & {
10
9
  /**
11
10
  * Whether creating new assistants is allowed
12
11
  *
@@ -17,34 +16,4 @@ export type OpenAiAssistantExecutionToolsOptions = OpenAiCompatibleExecutionTool
17
16
  * Which assistant to use
18
17
  */
19
18
  readonly assistantId: string_token;
20
- /**
21
- * Per-knowledge-source download timeout in milliseconds when preparing assistants.
22
- *
23
- * @default 30000
24
- */
25
- readonly knowledgeSourceDownloadTimeoutMs?: number;
26
- /**
27
- * Max concurrency for uploading knowledge source files to the vector store.
28
- *
29
- * @default 5
30
- */
31
- readonly knowledgeSourceUploadMaxConcurrency?: number;
32
- /**
33
- * Poll interval in milliseconds when waiting for vector store file batch processing.
34
- *
35
- * @default 5000
36
- */
37
- readonly knowledgeSourceUploadPollIntervalMs?: number;
38
- /**
39
- * Overall timeout in milliseconds for vector store file batch processing.
40
- *
41
- * @default 900000
42
- */
43
- readonly knowledgeSourceUploadTimeoutMs?: number;
44
- /**
45
- * Whether we should continue even if vector store ingestion stalls.
46
- *
47
- * @default true
48
- */
49
- readonly shouldContinueOnVectorStoreStall?: boolean;
50
19
  };
@@ -0,0 +1,135 @@
1
+ import type { ClientOptions } from 'openai';
2
+ import OpenAI from 'openai';
3
+ import { TODO_any } from '../../_packages/types.index';
4
+ import type { string_title } from '../../types/typeAliases';
5
+ import type { OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleExecutionToolsOptions';
6
+ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
7
+ /**
8
+ * Metadata for uploaded knowledge source files used for vector store diagnostics.
9
+ */
10
+ type KnowledgeSourceUploadMetadata = {
11
+ readonly fileId: string;
12
+ readonly filename: string;
13
+ readonly sizeBytes?: number;
14
+ };
15
+ /**
16
+ * Shared options for OpenAI vector store handling.
17
+ *
18
+ * @public exported from `@promptbook/openai`
19
+ */
20
+ export type OpenAiVectorStoreHandlerOptions = OpenAiCompatibleExecutionToolsOptions & ClientOptions & {
21
+ /**
22
+ * Per-knowledge-source download timeout in milliseconds when preparing vector stores.
23
+ *
24
+ * @default 30000
25
+ */
26
+ readonly knowledgeSourceDownloadTimeoutMs?: number;
27
+ /**
28
+ * Max concurrency for uploading knowledge source files to the vector store.
29
+ *
30
+ * @default 5
31
+ */
32
+ readonly knowledgeSourceUploadMaxConcurrency?: number;
33
+ /**
34
+ * Poll interval in milliseconds when waiting for vector store file batch processing.
35
+ *
36
+ * @default 5000
37
+ */
38
+ readonly knowledgeSourceUploadPollIntervalMs?: number;
39
+ /**
40
+ * Overall timeout in milliseconds for vector store file batch processing.
41
+ *
42
+ * @default 900000
43
+ */
44
+ readonly knowledgeSourceUploadTimeoutMs?: number;
45
+ /**
46
+ * Whether we should continue even if vector store ingestion stalls.
47
+ *
48
+ * @default true
49
+ */
50
+ readonly shouldContinueOnVectorStoreStall?: boolean;
51
+ };
52
+ /**
53
+ * Base class for OpenAI execution tools that need hosted vector stores.
54
+ *
55
+ * @public exported from `@promptbook/openai`
56
+ */
57
+ export declare abstract class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
58
+ /**
59
+ * Returns the per-knowledge-source download timeout in milliseconds.
60
+ */
61
+ protected getKnowledgeSourceDownloadTimeoutMs(): number;
62
+ /**
63
+ * Returns the max concurrency for knowledge source uploads.
64
+ */
65
+ protected getKnowledgeSourceUploadMaxConcurrency(): number;
66
+ /**
67
+ * Returns the polling interval in milliseconds for vector store uploads.
68
+ */
69
+ protected getKnowledgeSourceUploadPollIntervalMs(): number;
70
+ /**
71
+ * Returns the overall upload timeout in milliseconds for vector store uploads.
72
+ */
73
+ protected getKnowledgeSourceUploadTimeoutMs(): number;
74
+ /**
75
+ * Returns true if we should continue even if vector store ingestion stalls.
76
+ */
77
+ protected shouldContinueOnVectorStoreStall(): boolean;
78
+ /**
79
+ * Returns vector-store-specific options with extended settings.
80
+ */
81
+ protected get vectorStoreOptions(): OpenAiVectorStoreHandlerOptions;
82
+ /**
83
+ * Returns the OpenAI vector stores API surface, supporting stable and beta SDKs.
84
+ */
85
+ protected getVectorStoresApi(client: OpenAI): TODO_any;
86
+ /**
87
+ * Downloads a knowledge source URL into a File for vector store upload.
88
+ */
89
+ protected downloadKnowledgeSourceFile(options: {
90
+ readonly source: string;
91
+ readonly timeoutMs: number;
92
+ readonly logLabel: string;
93
+ }): Promise<{
94
+ readonly file: File;
95
+ readonly sizeBytes: number;
96
+ readonly filename: string;
97
+ readonly elapsedMs: number;
98
+ } | null>;
99
+ /**
100
+ * Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
101
+ */
102
+ protected logVectorStoreFileBatchDiagnostics(options: {
103
+ readonly client: OpenAI;
104
+ readonly vectorStoreId: string;
105
+ readonly batchId: string;
106
+ readonly uploadedFiles: ReadonlyArray<KnowledgeSourceUploadMetadata>;
107
+ readonly logLabel: string;
108
+ readonly reason: 'stalled' | 'timeout' | 'failed';
109
+ }): Promise<void>;
110
+ /**
111
+ * Uploads knowledge source files to the vector store and polls until processing completes.
112
+ */
113
+ protected uploadKnowledgeSourceFilesToVectorStore(options: {
114
+ readonly client: OpenAI;
115
+ readonly vectorStoreId: string;
116
+ readonly files: ReadonlyArray<File>;
117
+ readonly totalBytes: number;
118
+ readonly logLabel: string;
119
+ }): Promise<TODO_any | null>;
120
+ /**
121
+ * Creates a vector store and uploads knowledge sources, returning its ID.
122
+ */
123
+ protected createVectorStoreWithKnowledgeSources(options: {
124
+ readonly client: OpenAI;
125
+ readonly name: string_title;
126
+ readonly knowledgeSources: ReadonlyArray<string>;
127
+ readonly logLabel: string;
128
+ }): Promise<{
129
+ readonly vectorStoreId: string;
130
+ readonly uploadedFileCount: number;
131
+ readonly skippedCount: number;
132
+ readonly totalBytes: number;
133
+ }>;
134
+ }
135
+ export {};
@@ -5,4 +5,4 @@ import type { LlmToolDefinition } from '../../../types/LlmToolDefinition';
5
5
  *
6
6
  * @private
7
7
  */
8
- export declare function mapToolsToOpenAi(tools: ReadonlyArray<LlmToolDefinition>): Array<OpenAI.Chat.Completions.ChatCompletionTool>;
8
+ export declare function mapToolsToOpenAi(tools: ReadonlyArray<LlmToolDefinition>): Array<OpenAI.Chat.Completions.ChatCompletionTool & OpenAI.Beta.AssistantTool>;
@@ -0,0 +1,10 @@
1
+ import type { ToolCall } from '../../types/ToolCall';
2
+ /**
3
+ * Builds a stable identity string for tool calls across partial updates.
4
+ *
5
+ * @param toolCall - Tool call entry to identify.
6
+ * @returns Stable identity string for deduplication.
7
+ *
8
+ * @private function of <Chat/>
9
+ */
10
+ export declare function getToolCallIdentity(toolCall: ToolCall): string;
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.110.0-4`).
18
+ * It follows semantic versioning (e.g., `0.110.0-7`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/javascript",
3
- "version": "0.110.0-5",
3
+ "version": "0.110.0-8",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -90,11 +90,14 @@
90
90
  "node": ">=18.18.0",
91
91
  "npm": ">=8.0.0"
92
92
  },
93
+ "overrides": {
94
+ "zod": "$zod"
95
+ },
93
96
  "main": "./umd/index.umd.js",
94
97
  "module": "./esm/index.es.js",
95
98
  "typings": "./esm/typings/src/_packages/javascript.index.d.ts",
96
99
  "peerDependencies": {
97
- "@promptbook/core": "0.110.0-5"
100
+ "@promptbook/core": "0.110.0-8"
98
101
  },
99
102
  "dependencies": {
100
103
  "crypto": "1.0.1",
package/umd/index.umd.js CHANGED
@@ -22,7 +22,7 @@
22
22
  * @generated
23
23
  * @see https://github.com/webgptorg/promptbook
24
24
  */
25
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-5';
25
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-8';
26
26
  /**
27
27
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
28
28
  * Note: [💞] Ignore a discrepancy between file name and entity name