@promptbook/legacy-documents 0.105.0-26 → 0.105.0-30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -26,7 +26,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
26
26
  * @generated
27
27
  * @see https://github.com/webgptorg/promptbook
28
28
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.105.0-26';
29
+ const PROMPTBOOK_ENGINE_VERSION = '0.105.0-30';
30
30
  /**
31
31
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
32
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3,6 +3,7 @@ import { createOpenAiAssistantExecutionTools } from '../llm-providers/openai/cre
3
3
  import { createOpenAiCompatibleExecutionTools } from '../llm-providers/openai/createOpenAiCompatibleExecutionTools';
4
4
  import { createOpenAiExecutionTools } from '../llm-providers/openai/createOpenAiExecutionTools';
5
5
  import { OPENAI_MODELS } from '../llm-providers/openai/openai-models';
6
+ import { OpenAiAgentExecutionTools } from '../llm-providers/openai/OpenAiAgentExecutionTools';
6
7
  import { OpenAiAssistantExecutionTools } from '../llm-providers/openai/OpenAiAssistantExecutionTools';
7
8
  import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
8
9
  import { OpenAiCompatibleExecutionTools } from '../llm-providers/openai/OpenAiCompatibleExecutionTools';
@@ -19,6 +20,7 @@ export { createOpenAiAssistantExecutionTools };
19
20
  export { createOpenAiCompatibleExecutionTools };
20
21
  export { createOpenAiExecutionTools };
21
22
  export { OPENAI_MODELS };
23
+ export { OpenAiAgentExecutionTools };
22
24
  export { OpenAiAssistantExecutionTools };
23
25
  export type { OpenAiAssistantExecutionToolsOptions };
24
26
  export { OpenAiCompatibleExecutionTools };
@@ -136,6 +136,7 @@ import type { AzureOpenAiExecutionToolsOptions } from '../llm-providers/azure-op
136
136
  import type { DeepseekExecutionToolsOptions } from '../llm-providers/deepseek/DeepseekExecutionToolsOptions';
137
137
  import type { GoogleExecutionToolsOptions } from '../llm-providers/google/GoogleExecutionToolsOptions';
138
138
  import type { OllamaExecutionToolsOptions } from '../llm-providers/ollama/OllamaExecutionToolsOptions';
139
+ import type { OpenAiAgentExecutionToolsOptions } from '../llm-providers/openai/OpenAiAgentExecutionTools';
139
140
  import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
140
141
  import type { OpenAiCompatibleExecutionToolsOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
141
142
  import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
@@ -530,6 +531,7 @@ export type { AzureOpenAiExecutionToolsOptions };
530
531
  export type { DeepseekExecutionToolsOptions };
531
532
  export type { GoogleExecutionToolsOptions };
532
533
  export type { OllamaExecutionToolsOptions };
534
+ export type { OpenAiAgentExecutionToolsOptions };
533
535
  export type { OpenAiAssistantExecutionToolsOptions };
534
536
  export type { OpenAiCompatibleExecutionToolsOptions };
535
537
  export type { OpenAiCompatibleExecutionToolsNonProxiedOptions };
@@ -0,0 +1,11 @@
1
+ import { ChatParticipant } from '../types/ChatParticipant';
2
+ /**
3
+ * Resolves the URL for a citation source by looking up KNOWLEDGE commitments in the agent's source code.
4
+ *
5
+ * @param source - The source filename (e.g. "document.pdf")
6
+ * @param participants - List of chat participants to search in
7
+ * @returns The resolved URL if found, or null
8
+ *
9
+ * @private utility of <Chat/> component
10
+ */
11
+ export declare function resolveCitationUrl(source: string, participants: ReadonlyArray<ChatParticipant>): string | null;
@@ -14,7 +14,8 @@ import type { AgentOptions } from './AgentOptions';
14
14
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
15
15
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16
16
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17
- * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17
+ * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
18
+ * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
18
19
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
19
20
  *
20
21
  * @public exported from `@promptbook/core`
@@ -16,7 +16,8 @@ import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecut
16
16
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17
17
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
18
18
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
19
- * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
19
+ * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
20
+ * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
20
21
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
21
22
  *
22
23
  * @public exported from `@promptbook/core`
@@ -27,6 +28,10 @@ export declare class AgentLlmExecutionTools implements LlmExecutionTools {
27
28
  * Cache of OpenAI assistants to avoid creating duplicates
28
29
  */
29
30
  private static assistantCache;
31
+ /**
32
+ * Cache of OpenAI vector stores to avoid creating duplicates
33
+ */
34
+ private static vectorStoreCache;
30
35
  /**
31
36
  * Cached model requirements to avoid re-parsing the agent source
32
37
  */
@@ -0,0 +1,43 @@
1
+ import OpenAI from 'openai';
2
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
+ import type { ChatPromptResult } from '../../execution/PromptResult';
4
+ import type { Prompt } from '../../types/Prompt';
5
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
6
+ import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
7
+ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
8
+ /**
9
+ * Options for OpenAiAgentExecutionTools
10
+ */
11
+ export type OpenAiAgentExecutionToolsOptions = OpenAiCompatibleExecutionToolsNonProxiedOptions & {
12
+ /**
13
+ * ID of the vector store to use for file search
14
+ */
15
+ readonly vectorStoreId?: string;
16
+ };
17
+ /**
18
+ * Execution Tools for calling OpenAI API using the Responses API (Agents)
19
+ *
20
+ * @public exported from `@promptbook/openai`
21
+ */
22
+ export declare class OpenAiAgentExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
23
+ readonly vectorStoreId?: string;
24
+ constructor(options: OpenAiAgentExecutionToolsOptions);
25
+ get title(): string_title & string_markdown_text;
26
+ get description(): string_markdown;
27
+ /**
28
+ * Calls OpenAI API to use a chat model with streaming.
29
+ */
30
+ callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void): Promise<ChatPromptResult>;
31
+ /**
32
+ * Creates a vector store from knowledge sources
33
+ */
34
+ static createVectorStore(client: OpenAI, name: string, knowledgeSources: ReadonlyArray<string>): Promise<string>;
35
+ /**
36
+ * Discriminant for type guards
37
+ */
38
+ protected get discriminant(): string;
39
+ /**
40
+ * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentExecutionTools`
41
+ */
42
+ static isOpenAiAgentExecutionTools(llmExecutionTools: LlmExecutionTools): llmExecutionTools is OpenAiAgentExecutionTools;
43
+ }
@@ -18,6 +18,7 @@ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
18
18
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
19
19
  *
20
20
  * @public exported from `@promptbook/openai`
21
+ * @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
21
22
  */
22
23
  export declare class OpenAiAssistantExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
23
24
  readonly assistantId: string_token;
@@ -73,7 +73,7 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
73
73
  /**
74
74
  * Calls OpenAI compatible API to use a image generation model
75
75
  */
76
- callImageGenerationModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ImagePromptResult>;
76
+ callImageGenerationModel(prompt: Prompt): Promise<ImagePromptResult>;
77
77
  /**
78
78
  * Internal method that handles parameter retry for image generation model calls
79
79
  */
@@ -129,6 +129,12 @@ export type CommonModelRequirements = {
129
129
  * Note: [🚉] This is fully serializable as JSON
130
130
  */
131
131
  readonly tools?: LlmToolDefinition[];
132
+ /**
133
+ * Optional list of knowledge source links that the model can use
134
+ *
135
+ * Note: [🚉] This is fully serializable as JSON
136
+ */
137
+ readonly knowledgeSources?: string[];
132
138
  };
133
139
  /**
134
140
  * TODO: [🧠][🈁] `seed` should maybe be somewhere else (not in `ModelRequirements`) (similar that `user` identification is not here)
@@ -72,6 +72,14 @@ export type ImagePrompt = CommonPrompt & {
72
72
  * Requirements for image generation model
73
73
  */
74
74
  modelRequirements: ImageGenerationModelRequirements;
75
+ /**
76
+ * Optional file attachments
77
+ */
78
+ attachments?: Array<{
79
+ name: string;
80
+ type: string;
81
+ url: string;
82
+ }>;
75
83
  };
76
84
  /**
77
85
  * Embedding prompt
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.105.0-25`).
18
+ * It follows semantic versioning (e.g., `0.105.0-28`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/legacy-documents",
3
- "version": "0.105.0-26",
3
+ "version": "0.105.0-30",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/legacy-documents.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.105.0-26"
98
+ "@promptbook/core": "0.105.0-30"
99
99
  },
100
100
  "dependencies": {
101
101
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.105.0-26';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.105.0-30';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name