@promptbook/remote-server 0.112.0-39 → 0.112.0-41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +23 -21
  2. package/esm/index.es.js +4416 -2771
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +8 -2
  5. package/esm/src/book-components/Chat/Chat/ChatInputArea.d.ts +0 -10
  6. package/esm/src/book-components/Chat/Chat/ChatInputUploadedFile.d.ts +10 -0
  7. package/esm/src/book-components/Chat/Chat/ChatToolCallModalContent.d.ts +46 -0
  8. package/esm/src/book-components/Chat/Chat/resolveRunBrowserToolCallDetailsState.d.ts +146 -0
  9. package/esm/src/book-components/Chat/Chat/useChatInputAreaAttachments.d.ts +1 -1
  10. package/esm/src/book-components/Chat/Chat/useChatInputAreaComposer.d.ts +39 -0
  11. package/esm/src/book-components/Chat/Chat/useChatPostprocessedMessages.d.ts +17 -0
  12. package/esm/src/book-components/Chat/Chat/useChatScrollState.d.ts +34 -0
  13. package/esm/src/book-components/Chat/Chat/useChatToolCallModalState.d.ts +61 -0
  14. package/esm/src/book-components/Chat/Chat/useChatToolCallState.d.ts +35 -0
  15. package/esm/src/book-components/Chat/LlmChat/useLlmChatMessageHandler.d.ts +58 -0
  16. package/esm/src/book-components/Chat/LlmChat/useLlmChatMessages.d.ts +29 -0
  17. package/esm/src/book-components/Chat/LlmChat/useLlmChatState.d.ts +53 -0
  18. package/esm/src/collection/pipeline-collection/constructors/createPipelineCollectionFromDirectory.d.ts +7 -1
  19. package/esm/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +52 -0
  20. package/esm/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +128 -0
  21. package/esm/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +3 -3
  22. package/esm/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +54 -0
  23. package/esm/src/llm-providers/openai/utils/OpenAiCompatibleUnsupportedParameterRetrier.d.ts +29 -0
  24. package/esm/src/llm-providers/openai/utils/callOpenAiCompatibleChatModel.d.ts +28 -0
  25. package/esm/src/types/number_usd.d.ts +1 -1
  26. package/esm/src/types/string_parameter_name.d.ts +2 -2
  27. package/esm/src/types/typeAliases.d.ts +2 -2
  28. package/esm/src/version.d.ts +1 -1
  29. package/package.json +2 -2
  30. package/umd/index.umd.js +4605 -2960
  31. package/umd/index.umd.js.map +1 -1
  32. package/umd/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +8 -2
  33. package/umd/src/book-components/Chat/Chat/ChatInputArea.d.ts +0 -10
  34. package/umd/src/book-components/Chat/Chat/ChatInputUploadedFile.d.ts +10 -0
  35. package/umd/src/book-components/Chat/Chat/ChatToolCallModalContent.d.ts +46 -0
  36. package/umd/src/book-components/Chat/Chat/resolveRunBrowserToolCallDetailsState.d.ts +146 -0
  37. package/umd/src/book-components/Chat/Chat/useChatInputAreaAttachments.d.ts +1 -1
  38. package/umd/src/book-components/Chat/Chat/useChatInputAreaComposer.d.ts +39 -0
  39. package/umd/src/book-components/Chat/Chat/useChatPostprocessedMessages.d.ts +17 -0
  40. package/umd/src/book-components/Chat/Chat/useChatScrollState.d.ts +34 -0
  41. package/umd/src/book-components/Chat/Chat/useChatToolCallModalState.d.ts +61 -0
  42. package/umd/src/book-components/Chat/Chat/useChatToolCallState.d.ts +35 -0
  43. package/umd/src/book-components/Chat/LlmChat/useLlmChatMessageHandler.d.ts +58 -0
  44. package/umd/src/book-components/Chat/LlmChat/useLlmChatMessages.d.ts +29 -0
  45. package/umd/src/book-components/Chat/LlmChat/useLlmChatState.d.ts +53 -0
  46. package/umd/src/collection/pipeline-collection/constructors/createPipelineCollectionFromDirectory.d.ts +7 -1
  47. package/umd/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +52 -0
  48. package/umd/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +128 -0
  49. package/umd/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +3 -3
  50. package/umd/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +54 -0
  51. package/umd/src/llm-providers/openai/utils/OpenAiCompatibleUnsupportedParameterRetrier.d.ts +29 -0
  52. package/umd/src/llm-providers/openai/utils/callOpenAiCompatibleChatModel.d.ts +28 -0
  53. package/umd/src/types/number_usd.d.ts +1 -1
  54. package/umd/src/types/string_parameter_name.d.ts +2 -2
  55. package/umd/src/types/typeAliases.d.ts +2 -2
  56. package/umd/src/version.d.ts +1 -1
@@ -40,6 +40,134 @@ export declare class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHand
40
40
  * Calls OpenAI API to use a chat model with streaming.
41
41
  */
42
42
  callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void, options?: CallChatModelStreamOptions): Promise<ChatPromptResult>;
43
+ /**
44
+ * Logs one assistant chat call when verbose output is enabled.
45
+ */
46
+ private logAssistantChatCall;
47
+ /**
48
+ * Validates the subset of model requirements supported by OpenAI Assistants.
49
+ */
50
+ private assertSupportedAssistantModelRequirements;
51
+ /**
52
+ * Returns true when the prompt exposes callable tools that require the Runs API flow.
53
+ */
54
+ private hasAssistantTools;
55
+ /**
56
+ * Resolves the raw user-visible prompt content sent to the assistant.
57
+ */
58
+ private createAssistantRawPromptContent;
59
+ /**
60
+ * Builds the thread history plus the current user message for one assistant call.
61
+ */
62
+ private createAssistantThreadMessages;
63
+ /**
64
+ * Converts the existing prompt thread into OpenAI assistant thread messages.
65
+ */
66
+ private createAssistantThreadHistoryMessages;
67
+ /**
68
+ * Creates the current user message, including uploaded file attachments when present.
69
+ */
70
+ private createAssistantCurrentUserMessage;
71
+ /**
72
+ * Runs assistant calls with tools through the non-streaming Runs API.
73
+ */
74
+ private callChatModelStreamWithTools;
75
+ /**
76
+ * Builds the non-streaming assistant request payload used when tool calls are enabled.
77
+ */
78
+ private createAssistantToolRunRequest;
79
+ /**
80
+ * Starts the assistant run and keeps polling until the run completes or fails.
81
+ */
82
+ private executeAssistantToolRun;
83
+ /**
84
+ * Polls one assistant run, executing and submitting tool outputs when OpenAI requests them.
85
+ */
86
+ private waitForAssistantToolRun;
87
+ /**
88
+ * Executes all required assistant tool calls and submits their outputs back to OpenAI.
89
+ */
90
+ private submitAssistantRequiredToolOutputs;
91
+ /**
92
+ * Waits a bit and then fetches the latest assistant run status.
93
+ */
94
+ private retrieveAssistantRunAfterDelay;
95
+ /**
96
+ * Executes each function tool requested by the assistant and records progress snapshots.
97
+ */
98
+ private executeAssistantRequiredToolCalls;
99
+ /**
100
+ * Executes one function tool requested by the assistant.
101
+ */
102
+ private executeAssistantRequiredToolCall;
103
+ /**
104
+ * Creates the initial pending snapshot for one assistant tool call.
105
+ */
106
+ private createPendingAssistantToolCall;
107
+ /**
108
+ * Resolves the configured script tools for assistant tool execution.
109
+ */
110
+ private resolveAssistantScriptTools;
111
+ /**
112
+ * Executes the configured script tool for one assistant-requested function call.
113
+ */
114
+ private executeAssistantFunctionTool;
115
+ /**
116
+ * Finalizes one assistant tool-call snapshot after execution ends.
117
+ */
118
+ private createCompletedAssistantToolCall;
119
+ /**
120
+ * Extracts the latest assistant text response from a completed thread.
121
+ */
122
+ private extractCompletedAssistantTextContent;
123
+ /**
124
+ * Runs assistant calls without tools through the streaming Assistants API.
125
+ */
126
+ private callChatModelStreamWithoutTools;
127
+ /**
128
+ * Builds the streaming assistant request payload used when no tool execution flow is needed.
129
+ */
130
+ private createAssistantStreamingRequest;
131
+ /**
132
+ * Registers verbose stream diagnostics plus incremental text progress forwarding.
133
+ */
134
+ private attachAssistantStreamListeners;
135
+ /**
136
+ * Resolves the final visible assistant text from a streaming response.
137
+ */
138
+ private resolveAssistantStreamingResultContent;
139
+ /**
140
+ * Extracts the single text content block returned by the assistant stream.
141
+ */
142
+ private extractSingleAssistantTextContentBlock;
143
+ /**
144
+ * Rewrites file citation markers to use retrieved filenames instead of generic source labels.
145
+ */
146
+ private replaceAssistantFileCitationMarkers;
147
+ /**
148
+ * Returns one citation filename, caching OpenAI file lookups across annotations.
149
+ */
150
+ private retrieveAssistantCitationFilename;
151
+ /**
152
+ * Emits one assistant progress chunk with shared timing and prompt metadata.
153
+ */
154
+ private emitAssistantProgress;
155
+ /**
156
+ * Creates the final assistant prompt result with uncertain usage plus measured duration.
157
+ */
158
+ private createAssistantPromptResult;
159
+ /**
160
+ * Computes the usage payload for assistant responses.
161
+ */
162
+ private createAssistantUsage;
163
+ /**
164
+ * Wraps the final assistant prompt result in the standard exported JSON envelope.
165
+ */
166
+ private exportAssistantPromptResult;
167
+ /**
168
+ * Logs one assistant request payload when verbose output is enabled.
169
+ */
170
+ private logVerboseAssistantRequest;
43
171
  /**
44
172
  * Get an existing assistant tool wrapper
45
173
  */
@@ -49,11 +49,11 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
49
49
  /**
50
50
  * Calls OpenAI compatible API to use a chat model with streaming.
51
51
  */
52
- callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void, options?: CallChatModelStreamOptions): Promise<ChatPromptResult>;
52
+ callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void, _options?: CallChatModelStreamOptions): Promise<ChatPromptResult>;
53
53
  /**
54
- * Internal method that handles parameter retry for chat model calls
54
+ * Executes one OpenAI request under the shared rate limiter and network retry policy.
55
55
  */
56
- private callChatModelWithRetry;
56
+ private executeRateLimitedRequest;
57
57
  /**
58
58
  * Calls OpenAI API to use a complete model.
59
59
  */
@@ -6,6 +6,8 @@ import type { OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleEx
6
6
  import { OpenAiExecutionTools } from './OpenAiExecutionTools';
7
7
  /**
8
8
  * Metadata for uploaded knowledge source files used for vector store diagnostics.
9
+ *
10
+ * @private internal utility of `OpenAiVectorStoreHandler`
9
11
  */
10
12
  type KnowledgeSourceUploadMetadata = {
11
13
  readonly fileId: string;
@@ -117,6 +119,58 @@ export declare abstract class OpenAiVectorStoreHandler extends OpenAiExecutionTo
117
119
  readonly totalBytes: number;
118
120
  readonly logLabel: string;
119
121
  }): Promise<TODO_any | null>;
122
+ /**
123
+ * Uploads the prepared knowledge-source files with bounded concurrency.
124
+ */
125
+ private uploadPreparedKnowledgeSourceFiles;
126
+ /**
127
+ * Reuses the shared iterator to upload one slice of knowledge-source files.
128
+ */
129
+ private uploadPreparedKnowledgeSourceFilesWorker;
130
+ /**
131
+ * Uploads one prepared knowledge-source file to OpenAI and records detailed diagnostics.
132
+ */
133
+ private uploadPreparedKnowledgeSourceFile;
134
+ /**
135
+ * Creates the OpenAI vector-store file batch for the uploaded knowledge-source files.
136
+ */
137
+ private createVectorStoreFileBatch;
138
+ /**
139
+ * Logs warnings for unexpected vector-store batch metadata right after creation.
140
+ */
141
+ private logCreatedVectorStoreFileBatchIssues;
142
+ /**
143
+ * Polls the created vector-store batch until ingestion completes, fails, or times out.
144
+ */
145
+ private pollVectorStoreFileBatchUntilSettled;
146
+ /**
147
+ * Resolves the next batch id to poll and retrieves the freshest OpenAI batch snapshot.
148
+ */
149
+ private retrieveVectorStoreFileBatchForPolling;
150
+ /**
151
+ * Normalizes the batch identifier returned by OpenAI and logs unusual id situations once.
152
+ */
153
+ private resolveVectorStoreFileBatchId;
154
+ /**
155
+ * Tracks observed polling progress and emits throttled verbose status logs.
156
+ */
157
+ private trackVectorStoreFileBatchProgress;
158
+ /**
159
+ * Emits deeper diagnostics when vector-store ingestion appears stalled for too long.
160
+ */
161
+ private maybeLogStalledVectorStoreFileBatch;
162
+ /**
163
+ * Handles terminal vector-store batch states that do not require further polling.
164
+ */
165
+ private handleVectorStoreFileBatchTerminalState;
166
+ /**
167
+ * Stops polling once the batch exceeds the configured timeout and handles optional cancellation.
168
+ */
169
+ private handleVectorStoreFileBatchTimeout;
170
+ /**
171
+ * Attempts to cancel a timed-out vector-store batch and logs any unusual id situation.
172
+ */
173
+ private cancelVectorStoreFileBatchAfterTimeout;
120
174
  /**
121
175
  * Creates a vector store and uploads knowledge sources, returning its ID.
122
176
  */
@@ -0,0 +1,29 @@
1
+ import type { ModelRequirements } from '../../../types/ModelRequirements';
2
+ import type { string_model_name } from '../../../types/typeAliases';
3
+ /**
4
+ * Tracks unsupported-parameter retries for one OpenAI-compatible model call.
5
+ *
6
+ * @private helper of `OpenAiCompatibleExecutionTools`
7
+ */
8
+ export declare class OpenAiCompatibleUnsupportedParameterRetrier {
9
+ private readonly isVerbose;
10
+ private readonly attemptStack;
11
+ private readonly retriedUnsupportedParameters;
12
+ constructor(isVerbose: boolean | undefined);
13
+ /**
14
+ * Resolves the next retry attempt after an unsupported-parameter failure or rethrows the final error.
15
+ */
16
+ resolveRetryOrThrow<TModelRequirements extends ModelRequirements>(options: {
17
+ readonly error: Error;
18
+ readonly modelName: string_model_name;
19
+ readonly currentModelRequirements: TModelRequirements;
20
+ }): TModelRequirements;
21
+ /**
22
+ * Rethrows the original error or wraps it with the collected retry history.
23
+ */
24
+ private throwWithAttemptHistory;
25
+ /**
26
+ * Creates the retry-history error message shared by all OpenAI-compatible model variants.
27
+ */
28
+ private createAttemptHistoryError;
29
+ }
@@ -0,0 +1,28 @@
1
+ import OpenAI from 'openai';
2
+ import type { AvailableModel } from '../../../execution/AvailableModel';
3
+ import type { ChatPromptResult } from '../../../execution/PromptResult';
4
+ import type { Usage } from '../../../execution/Usage';
5
+ import type { Prompt } from '../../../types/Prompt';
6
+ import type { string_markdown_text, string_title } from '../../../types/typeAliases';
7
+ import type { computeOpenAiUsage } from '../computeOpenAiUsage';
8
+ import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from '../OpenAiCompatibleExecutionToolsOptions';
9
+ /**
10
+ * Type describing dependencies needed by `callOpenAiCompatibleChatModel`.
11
+ */
12
+ type CallOpenAiCompatibleChatModelOptions = {
13
+ readonly prompt: Prompt;
14
+ readonly onProgress: (chunk: ChatPromptResult) => void;
15
+ readonly title: string_title & string_markdown_text;
16
+ readonly executionToolsOptions: OpenAiCompatibleExecutionToolsNonProxiedOptions;
17
+ readonly getClient: () => Promise<OpenAI>;
18
+ readonly executeRateLimitedRequest: <T>(requestFn: () => Promise<T>) => Promise<T>;
19
+ readonly computeUsage: (...args: Parameters<typeof computeOpenAiUsage>) => Usage;
20
+ readonly getDefaultChatModel: () => AvailableModel;
21
+ };
22
+ /**
23
+ * Calls the OpenAI-compatible chat model flow, including tool execution and unsupported-parameter retries.
24
+ *
25
+ * @private function of `OpenAiCompatibleExecutionTools`
26
+ */
27
+ export declare function callOpenAiCompatibleChatModel(options: CallOpenAiCompatibleChatModelOptions): Promise<ChatPromptResult>;
28
+ export {};
@@ -2,7 +2,7 @@
2
2
  * Semantic helper for US Dollars
3
3
  */
4
4
  export type number_usd = number;
5
- export type { number_bytes, number_gigabytes, number_kilobytes, number_megabytes, number_terabytes } from './number_bytes';
5
+ export type { number_bytes, number_gigabytes, number_kilobytes, number_megabytes, number_terabytes, } from './number_bytes';
6
6
  export type { number_id, number_linecol_number, number_tokens } from './number_id';
7
7
  export type { number_likeness } from './number_likeness';
8
8
  export type { number_days, number_hours, number_milliseconds, number_minutes, number_months, number_seconds, number_weeks, number_years, } from './number_milliseconds';
@@ -4,10 +4,10 @@
4
4
  * The concrete type declarations live in focused modules under `src/types`.
5
5
  */
6
6
  export type { InputParameters, Parameters, ReservedParameters } from './Parameters';
7
- export type { string_agent_hash, string_agent_name, string_agent_name_in_book, string_agent_permanent_id } from './string_agent_name';
7
+ export type { string_agent_hash, string_agent_name, string_agent_name_in_book, string_agent_permanent_id, } from './string_agent_name';
8
8
  export type { string_business_category_name } from './string_business_category_name';
9
9
  export type { string_model_name } from './string_model_name';
10
- export type { string_name, string_parameter_name, string_parameter_value, string_reserved_parameter_name } from './string_name';
10
+ export type { string_name, string_parameter_name, string_parameter_value, string_reserved_parameter_name, } from './string_name';
11
11
  export type { string_page, string_char } from './string_page';
12
12
  export type { string_chat_prompt, string_completion_prompt, string_prompt, string_prompt_image, string_system_message, string_template, string_text_prompt, } from './string_prompt';
13
13
  export type { string_persona_description, string_model_description } from './string_persona_description';
@@ -6,10 +6,10 @@ export type { string_base64, string_base_url, string_data_url, string_domain, st
6
6
  export type { string_knowledge_source_content, string_knowledge_source_link } from './string_knowledge_source_content';
7
7
  export type { string_attribute, string_attribute_value_scope, string_color, string_javascript_name, string_legal_entity, string_license, string_person_firstname, string_person_fullname, string_person_lastname, string_person_profile, string_postprocessing_function_name, string_translate_language, string_translate_name, string_translate_name_not_normalized, } from './string_person_fullname';
8
8
  export type { id, string_app_id, string_date_iso8601, string_language, string_license_token, string_password, string_pgp_key, string_promptbook_token, string_ssh_key, string_token, string_user_id, task_id, } from './string_token';
9
- export type { number_id, number_linecol_number, number_tokens, } from './number_id';
9
+ export type { number_id, number_linecol_number, number_tokens } from './number_id';
10
10
  export type { number_integer, number_negative, number_port, number_positive } from './number_positive';
11
11
  export type { number_model_temperature, number_percent, number_seed } from './number_percent';
12
12
  export type { number_likeness } from './number_likeness';
13
13
  export type { number_weeks, number_days, number_hours, number_milliseconds, number_minutes, number_months, number_seconds, number_years, } from './number_milliseconds';
14
- export type { number_bytes, number_gigabytes, number_kilobytes, number_megabytes, number_terabytes } from './number_bytes';
14
+ export type { number_bytes, number_gigabytes, number_kilobytes, number_megabytes, number_terabytes, } from './number_bytes';
15
15
  export type { number_usd } from './number_usd';
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.112.0-38`).
18
+ * It follows semantic versioning (e.g., `0.112.0-40`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.112.0-39",
3
+ "version": "0.112.0-41",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -98,7 +98,7 @@
98
98
  "module": "./esm/index.es.js",
99
99
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
100
100
  "peerDependencies": {
101
- "@promptbook/core": "0.112.0-39"
101
+ "@promptbook/core": "0.112.0-41"
102
102
  },
103
103
  "dependencies": {
104
104
  "@mozilla/readability": "0.6.0",