@promptbook/node 0.101.0-17 โ 0.101.0-19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +46 -39
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +6 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +4 -0
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +46 -39
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
- package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
- package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
- package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
- /package/esm/typings/src/{cli/test/ptbk.test.d.ts โ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -15,6 +15,9 @@ import { BookEditor } from '../book-components/BookEditor/BookEditor';
|
|
15
15
|
import { DEFAULT_BOOK_FONT_CLASS } from '../book-components/BookEditor/config';
|
16
16
|
import { Chat } from '../book-components/Chat/Chat/Chat';
|
17
17
|
import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
|
18
|
+
import { useChatAutoScroll } from '../book-components/Chat/hooks/useChatAutoScroll';
|
19
|
+
import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
20
|
+
import { useSendMessageToLlmChat } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
18
21
|
import { LlmChat } from '../book-components/Chat/LlmChat/LlmChat';
|
19
22
|
import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
|
20
23
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
@@ -44,6 +47,9 @@ export { BookEditor };
|
|
44
47
|
export { DEFAULT_BOOK_FONT_CLASS };
|
45
48
|
export { Chat };
|
46
49
|
export type { ChatProps };
|
50
|
+
export { useChatAutoScroll };
|
51
|
+
export type { SendMessageToLlmChatFunction };
|
52
|
+
export { useSendMessageToLlmChat };
|
47
53
|
export { LlmChat };
|
48
54
|
export type { LlmChatProps };
|
49
55
|
export type { ChatMessage };
|
@@ -120,6 +120,7 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
|
|
120
120
|
import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
|
121
121
|
import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
|
122
122
|
import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
|
123
|
+
import { getSingleLlmExecutionTools } from '../llm-providers/_multiple/getSingleLlmExecutionTools';
|
123
124
|
import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
|
124
125
|
import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
|
125
126
|
import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
|
@@ -291,6 +292,7 @@ export { createLlmToolsFromConfiguration };
|
|
291
292
|
export { cacheLlmTools };
|
292
293
|
export { countUsage };
|
293
294
|
export { limitTotalUsage };
|
295
|
+
export { getSingleLlmExecutionTools };
|
294
296
|
export { joinLlmExecutionTools };
|
295
297
|
export { MultipleLlmExecutionTools };
|
296
298
|
export { AgentLlmExecutionTools };
|
@@ -13,6 +13,8 @@ import type { MockedChatDelayConfig } from '../book-components/AvatarProfile/Ava
|
|
13
13
|
import type { MockedChatProps } from '../book-components/AvatarProfile/AvatarProfile/MockedChat';
|
14
14
|
import type { BookEditorProps } from '../book-components/BookEditor/BookEditor';
|
15
15
|
import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
|
16
|
+
import type { ChatAutoScrollConfig } from '../book-components/Chat/hooks/useChatAutoScroll';
|
17
|
+
import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
16
18
|
import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
|
17
19
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
18
20
|
import type { ChatParticipant } from '../book-components/Chat/types/ChatParticipant';
|
@@ -340,6 +342,8 @@ export type { MockedChatDelayConfig };
|
|
340
342
|
export type { MockedChatProps };
|
341
343
|
export type { BookEditorProps };
|
342
344
|
export type { ChatProps };
|
345
|
+
export type { ChatAutoScrollConfig };
|
346
|
+
export type { SendMessageToLlmChatFunction };
|
343
347
|
export type { LlmChatProps };
|
344
348
|
export type { ChatMessage };
|
345
349
|
export type { ChatParticipant };
|
@@ -8,3 +8,6 @@ import { string_agent_name, string_url_image } from '../../types/typeAliases';
|
|
8
8
|
* @public exported from `@promptbook/core`
|
9
9
|
*/
|
10
10
|
export declare function generatePlaceholderAgentProfileImageUrl(agentName?: string_agent_name): string_url_image;
|
11
|
+
/**
|
12
|
+
* TODO: [๐คน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
|
13
|
+
*/
|
@@ -31,7 +31,13 @@ export type MockedChatDelayConfig = {
|
|
31
31
|
*
|
32
32
|
* @public exported from `@promptbook/components`
|
33
33
|
*/
|
34
|
-
export type MockedChatProps = ChatProps & {
|
34
|
+
export type MockedChatProps = Omit<ChatProps, 'onReset' | /*'onMessage' | */ 'onUseTemplate' | 'isVoiceRecognitionButtonShown'> & {
|
35
|
+
/**
|
36
|
+
* Whether to show the reset button
|
37
|
+
*
|
38
|
+
* @default false
|
39
|
+
*/
|
40
|
+
isResetShown?: boolean;
|
35
41
|
/**
|
36
42
|
* Optional delays configuration for emulating typing behavior
|
37
43
|
*/
|
@@ -2,6 +2,7 @@ import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
|
|
2
2
|
import type { ChatProps } from '../Chat/ChatProps';
|
3
3
|
import type { ChatMessage } from '../types/ChatMessage';
|
4
4
|
import type { ChatParticipant } from '../types/ChatParticipant';
|
5
|
+
import type { SendMessageToLlmChatFunction } from '../hooks/useSendMessageToLlmChat';
|
5
6
|
/**
|
6
7
|
* Props for LlmChat component, derived from ChatProps but with LLM-specific modifications
|
7
8
|
*
|
@@ -17,8 +18,20 @@ export type LlmChatProps = Omit<ChatProps, 'messages' | 'onMessage' | 'onChange'
|
|
17
18
|
* When provided, the conversation will be saved and restored from localStorage
|
18
19
|
*/
|
19
20
|
readonly persistenceKey?: string;
|
21
|
+
/**
|
22
|
+
* Optional initial messages to pre-populate the chat.
|
23
|
+
* - They can include both USER and ASSISTANT messages.
|
24
|
+
* - They are only used when there is no persisted conversation (persistence takes precedence).
|
25
|
+
* - They are not automatically persisted until the user sends a new message.
|
26
|
+
*/
|
27
|
+
readonly initialMessages?: ReadonlyArray<ChatMessage>;
|
20
28
|
/**
|
21
29
|
* Called when the chat state changes (messages, participants, etc.)
|
22
30
|
*/
|
23
31
|
onChange?(messages: ReadonlyArray<ChatMessage>, participants: ReadonlyArray<ChatParticipant>): void;
|
32
|
+
/**
|
33
|
+
* Optional external sendMessage function produced by useSendMessageToLlmChat hook.
|
34
|
+
* When provided, LlmChat will attach its internal handler to it (no React context needed).
|
35
|
+
*/
|
36
|
+
readonly sendMessage?: SendMessageToLlmChatFunction;
|
24
37
|
};
|
@@ -0,0 +1,41 @@
|
|
1
|
+
/**
|
2
|
+
* Configuration for the auto-scroll behavior
|
3
|
+
*/
|
4
|
+
export type ChatAutoScrollConfig = {
|
5
|
+
/**
|
6
|
+
* Threshold in pixels from bottom to consider as "at bottom"
|
7
|
+
* @default 100
|
8
|
+
*/
|
9
|
+
bottomThreshold?: number;
|
10
|
+
/**
|
11
|
+
* Whether to use smooth scrolling
|
12
|
+
* @default true
|
13
|
+
*/
|
14
|
+
smoothScroll?: boolean;
|
15
|
+
/**
|
16
|
+
* Delay before checking scroll position after new messages (in milliseconds)
|
17
|
+
* @default 100
|
18
|
+
*/
|
19
|
+
scrollCheckDelay?: number;
|
20
|
+
};
|
21
|
+
/**
|
22
|
+
* Hook for managing auto-scroll behavior in chat components
|
23
|
+
*
|
24
|
+
* This hook provides:
|
25
|
+
* - Automatic scrolling to bottom when new messages arrive (if user is already at bottom)
|
26
|
+
* - Detection of when user scrolls away from bottom
|
27
|
+
* - Scroll-to-bottom functionality with smooth animation
|
28
|
+
* - Mobile-optimized scrolling behavior
|
29
|
+
*
|
30
|
+
* @public exported from `@promptbook/components`
|
31
|
+
*/
|
32
|
+
export declare function useChatAutoScroll(config?: ChatAutoScrollConfig): {
|
33
|
+
isAutoScrolling: boolean;
|
34
|
+
chatMessagesRef: (element: HTMLDivElement | null) => void;
|
35
|
+
handleScroll: (event: React.UIEvent<HTMLDivElement>) => void;
|
36
|
+
handleMessagesChange: () => void;
|
37
|
+
scrollToBottom: () => void;
|
38
|
+
enableAutoScroll: () => void;
|
39
|
+
disableAutoScroll: () => void;
|
40
|
+
isMobile: boolean;
|
41
|
+
};
|
@@ -0,0 +1,44 @@
|
|
1
|
+
/**
|
2
|
+
* Function type for sending a message to LlmChat.
|
3
|
+
*
|
4
|
+
* Implementation detail: The returned function is "attachable".
|
5
|
+
* LlmChat will call the internal `_attach` method (if present) to bind
|
6
|
+
* its real message handler. Messages sent before attachment are queued
|
7
|
+
* and flushed after attachment.
|
8
|
+
*
|
9
|
+
* @public exported from `@promptbook/components`
|
10
|
+
*/
|
11
|
+
export type SendMessageToLlmChatFunction = {
|
12
|
+
/**
|
13
|
+
* Send a message to the bound LlmChat instance (or queue it until attached).
|
14
|
+
*/
|
15
|
+
(message: string): void;
|
16
|
+
/**
|
17
|
+
* Internal method used by the <LlmChat/> component to attach its handler.
|
18
|
+
* Not intended for consumer usage.
|
19
|
+
*
|
20
|
+
* @internal
|
21
|
+
*/
|
22
|
+
_attach?: (handler: (message: string) => Promise<void> | void) => void;
|
23
|
+
};
|
24
|
+
/**
|
25
|
+
* Hook to create a sendMessage function for an <LlmChat/> component WITHOUT needing any React Context.
|
26
|
+
*
|
27
|
+
* Usage pattern:
|
28
|
+
* ```tsx
|
29
|
+
* const sendMessage = useSendMessageToLlmChat();
|
30
|
+
* return (
|
31
|
+
* <>
|
32
|
+
* <button onClick={() => sendMessage('Hello!')}>Hello</button>
|
33
|
+
* <LlmChat llmTools={llmTools} sendMessage={sendMessage} />
|
34
|
+
* </>
|
35
|
+
* );
|
36
|
+
* ```
|
37
|
+
*
|
38
|
+
* - No provider wrapping needed.
|
39
|
+
* - Safe to call before the <LlmChat/> mounts (messages will be queued).
|
40
|
+
* - Keeps DRY by letting <LlmChat/> reuse its internal `handleMessage` logic.
|
41
|
+
*
|
42
|
+
* @public exported from `@promptbook/components`
|
43
|
+
*/
|
44
|
+
export declare function useSendMessageToLlmChat(): SendMessageToLlmChatFunction;
|
@@ -1,8 +1,7 @@
|
|
1
1
|
import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
5
|
-
import type { string_parameter_name } from '../../types/typeAliases';
|
4
|
+
import type { Parameters, string_parameter_name } from '../../types/typeAliases';
|
6
5
|
import type { TODO_string } from '../../utils/organization/TODO_string';
|
7
6
|
import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
|
8
7
|
import type { PipelineExecutorResult } from '../PipelineExecutorResult';
|
@@ -1,9 +1,7 @@
|
|
1
1
|
import type { ReadonlyDeep } from 'type-fest';
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
5
|
-
import type { string_markdown } from '../../types/typeAliases';
|
6
|
-
import type { string_parameter_value } from '../../types/typeAliases';
|
4
|
+
import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
|
7
5
|
import type { ExecutionTools } from '../ExecutionTools';
|
8
6
|
/**
|
9
7
|
* Options for retrieving relevant knowledge for a specific task during pipeline execution.
|
@@ -1,7 +1,6 @@
|
|
1
1
|
import { Promisable } from 'type-fest';
|
2
2
|
import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
|
3
|
-
import type { string_app_id } from '../../../types/typeAliases';
|
4
|
-
import type { string_url } from '../../../types/typeAliases';
|
3
|
+
import type { string_app_id, string_url } from '../../../types/typeAliases';
|
5
4
|
import type { really_any } from '../../../utils/organization/really_any';
|
6
5
|
import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
|
7
6
|
import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import type { string_user_id } from '../../../types/typeAliases';
|
1
|
+
import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
|
2
2
|
import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
|
3
3
|
import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
4
4
|
/**
|
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
7
7
|
* @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
|
8
8
|
*/
|
9
9
|
export type CreateLlmToolsFromConfigurationOptions = {
|
10
|
+
/**
|
11
|
+
* Title of the LLM tools
|
12
|
+
*
|
13
|
+
* @default 'LLM Tools from Configuration'
|
14
|
+
*/
|
15
|
+
readonly title?: string_mime_type_with_wildcard & string_markdown_text;
|
10
16
|
/**
|
11
17
|
* This will will be passed to the created `LlmExecutionTools`
|
12
18
|
*
|
13
19
|
* @default false
|
14
20
|
*/
|
15
|
-
isVerbose?: boolean;
|
21
|
+
readonly isVerbose?: boolean;
|
16
22
|
/**
|
17
23
|
* Identifier of the end user
|
18
24
|
*
|
@@ -1,16 +1,8 @@
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
3
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
4
|
-
import type {
|
5
|
-
import type {
|
6
|
-
import type { PromptResult } from '../../execution/PromptResult';
|
7
|
-
import type { ChatPrompt } from '../../types/Prompt';
|
8
|
-
import type { CompletionPrompt } from '../../types/Prompt';
|
9
|
-
import type { EmbeddingPrompt } from '../../types/Prompt';
|
10
|
-
import type { Prompt } from '../../types/Prompt';
|
11
|
-
import type { string_markdown } from '../../types/typeAliases';
|
12
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
13
|
-
import type { string_title } from '../../types/typeAliases';
|
3
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
|
5
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
14
6
|
/**
|
15
7
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
16
8
|
*
|
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
|
|
18
10
|
* @public exported from `@promptbook/core`
|
19
11
|
*/
|
20
12
|
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
13
|
+
readonly title: string_title & string_markdown_text;
|
21
14
|
/**
|
22
15
|
* Array of execution tools in order of priority
|
23
16
|
*/
|
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
25
18
|
/**
|
26
19
|
* Gets array of execution tools in order of priority
|
27
20
|
*/
|
28
|
-
constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
29
|
-
get title(): string_title & string_markdown_text;
|
21
|
+
constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
30
22
|
get description(): string_markdown;
|
31
23
|
get profile(): {
|
32
24
|
name: string;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
2
|
+
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
3
|
+
/**
|
4
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
5
|
+
*
|
6
|
+
* @public exported from `@promptbook/core`
|
7
|
+
*/
|
8
|
+
export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
|
9
|
+
/**
|
10
|
+
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
11
|
+
*/
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
2
|
+
import { string_markdown_text, string_title } from '../../types/typeAliases';
|
2
3
|
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
3
4
|
/**
|
4
5
|
* Joins multiple LLM Execution Tools into one
|
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
15
16
|
*
|
16
17
|
* @public exported from `@promptbook/core`
|
17
18
|
*/
|
18
|
-
export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
19
|
+
export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
19
20
|
/**
|
20
21
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
21
22
|
*/
|
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
|
|
11
11
|
*/
|
12
12
|
export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
|
13
13
|
/**
|
14
|
+
* TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
|
14
15
|
* TODO: [๐] Unite object for expecting amount and format - use here also a format
|
15
16
|
*/
|
@@ -1,12 +1,9 @@
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
|
6
5
|
import type { Prompt } from '../../types/Prompt';
|
7
|
-
import type { string_markdown } from '../../types/typeAliases';
|
8
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
9
|
-
import type { string_title } from '../../types/typeAliases';
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
10
7
|
/**
|
11
8
|
* Mocked execution Tools for just echoing the requests for testing purposes.
|
12
9
|
*
|
@@ -1,13 +1,9 @@
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
6
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
7
5
|
import type { Prompt } from '../../types/Prompt';
|
8
|
-
import type { string_markdown } from '../../types/typeAliases';
|
9
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
10
|
-
import type { string_title } from '../../types/typeAliases';
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
11
7
|
/**
|
12
8
|
* Mocked execution Tools for just faking expected responses for testing purposes
|
13
9
|
*
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
|
2
|
-
import type { Scraper } from '../_common/Scraper';
|
3
|
-
import type { ScraperSourceHandler } from '../_common/Scraper';
|
2
|
+
import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
|
4
3
|
import type { ExecutionTools } from '../../execution/ExecutionTools';
|
5
4
|
import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
|
6
5
|
import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
16
16
|
/**
|
17
17
|
* Represents the version string of the Promptbook engine.
|
18
|
-
* It follows semantic versioning (e.g., `0.101.0-
|
18
|
+
* It follows semantic versioning (e.g., `0.101.0-18`).
|
19
19
|
*
|
20
20
|
* @generated
|
21
21
|
*/
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@promptbook/node",
|
3
|
-
"version": "0.101.0-
|
3
|
+
"version": "0.101.0-19",
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
5
5
|
"private": false,
|
6
6
|
"sideEffects": false,
|
@@ -93,7 +93,7 @@
|
|
93
93
|
"module": "./esm/index.es.js",
|
94
94
|
"typings": "./esm/typings/src/_packages/node.index.d.ts",
|
95
95
|
"peerDependencies": {
|
96
|
-
"@promptbook/core": "0.101.0-
|
96
|
+
"@promptbook/core": "0.101.0-19"
|
97
97
|
},
|
98
98
|
"dependencies": {
|
99
99
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
@@ -45,7 +45,7 @@
|
|
45
45
|
* @generated
|
46
46
|
* @see https://github.com/webgptorg/promptbook
|
47
47
|
*/
|
48
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
48
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
|
49
49
|
/**
|
50
50
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
51
51
|
* Note: [๐] Ignore a discrepancy between file name and entity name
|
@@ -3027,6 +3027,25 @@
|
|
3027
3027
|
return mappedParameters;
|
3028
3028
|
}
|
3029
3029
|
|
3030
|
+
/**
|
3031
|
+
* Takes an item or an array of items and returns an array of items
|
3032
|
+
*
|
3033
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
3034
|
+
* 2) Undefined returns empty array
|
3035
|
+
* 3) Array returns itself
|
3036
|
+
*
|
3037
|
+
* @private internal utility
|
3038
|
+
*/
|
3039
|
+
function arrayableToArray(input) {
|
3040
|
+
if (input === undefined) {
|
3041
|
+
return [];
|
3042
|
+
}
|
3043
|
+
if (input instanceof Array) {
|
3044
|
+
return input;
|
3045
|
+
}
|
3046
|
+
return [input];
|
3047
|
+
}
|
3048
|
+
|
3030
3049
|
/**
|
3031
3050
|
* Predefined profiles for LLM providers to maintain consistency across the application
|
3032
3051
|
* These profiles represent each provider as a virtual persona in chat interfaces
|
@@ -3107,12 +3126,10 @@
|
|
3107
3126
|
/**
|
3108
3127
|
* Gets array of execution tools in order of priority
|
3109
3128
|
*/
|
3110
|
-
constructor(...llmExecutionTools) {
|
3129
|
+
constructor(title, ...llmExecutionTools) {
|
3130
|
+
this.title = title;
|
3111
3131
|
this.llmExecutionTools = llmExecutionTools;
|
3112
3132
|
}
|
3113
|
-
get title() {
|
3114
|
-
return 'Multiple LLM Providers';
|
3115
|
-
}
|
3116
3133
|
get description() {
|
3117
3134
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
3118
3135
|
.map(({ title, description }, index) => {
|
@@ -3198,7 +3215,7 @@
|
|
3198
3215
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
3199
3216
|
// <- case [๐ค]:
|
3200
3217
|
default:
|
3201
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
3218
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
3202
3219
|
}
|
3203
3220
|
}
|
3204
3221
|
catch (error) {
|
@@ -3219,7 +3236,7 @@
|
|
3219
3236
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
3220
3237
|
// 3) ...
|
3221
3238
|
spaceTrim__default["default"]((block) => `
|
3222
|
-
All execution tools failed:
|
3239
|
+
All execution tools of ${this.title} failed:
|
3223
3240
|
|
3224
3241
|
${block(errors
|
3225
3242
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
@@ -3228,11 +3245,11 @@
|
|
3228
3245
|
`));
|
3229
3246
|
}
|
3230
3247
|
else if (this.llmExecutionTools.length === 0) {
|
3231
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
3248
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
3232
3249
|
}
|
3233
3250
|
else {
|
3234
3251
|
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
3235
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
3252
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
3236
3253
|
|
3237
3254
|
Available \`LlmExecutionTools\`:
|
3238
3255
|
${block(this.description)}
|
@@ -3262,7 +3279,7 @@
|
|
3262
3279
|
*
|
3263
3280
|
* @public exported from `@promptbook/core`
|
3264
3281
|
*/
|
3265
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
3282
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
3266
3283
|
if (llmExecutionTools.length === 0) {
|
3267
3284
|
const warningMessage = spaceTrim__default["default"](`
|
3268
3285
|
You have not provided any \`LlmExecutionTools\`
|
@@ -3294,30 +3311,27 @@
|
|
3294
3311
|
};
|
3295
3312
|
*/
|
3296
3313
|
}
|
3297
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
3314
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
3298
3315
|
}
|
3299
3316
|
/**
|
3300
3317
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
3301
3318
|
*/
|
3302
3319
|
|
3303
3320
|
/**
|
3304
|
-
*
|
3305
|
-
*
|
3306
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
3307
|
-
* 2) Undefined returns empty array
|
3308
|
-
* 3) Array returns itself
|
3321
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
3309
3322
|
*
|
3310
|
-
* @
|
3323
|
+
* @public exported from `@promptbook/core`
|
3311
3324
|
*/
|
3312
|
-
function
|
3313
|
-
|
3314
|
-
|
3315
|
-
|
3316
|
-
|
3317
|
-
|
3318
|
-
}
|
3319
|
-
return [input];
|
3325
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
3326
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
3327
|
+
const llmTools = _llms.length === 1
|
3328
|
+
? _llms[0]
|
3329
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
3330
|
+
return llmTools;
|
3320
3331
|
}
|
3332
|
+
/**
|
3333
|
+
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
3334
|
+
*/
|
3321
3335
|
|
3322
3336
|
/**
|
3323
3337
|
* Just says that the variable is not used but should be kept
|
@@ -4020,9 +4034,7 @@
|
|
4020
4034
|
$scriptPipelineExecutionErrors: [],
|
4021
4035
|
$failedResults: [], // Track all failed attempts
|
4022
4036
|
};
|
4023
|
-
|
4024
|
-
const _llms = arrayableToArray(tools.llm);
|
4025
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
4037
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
4026
4038
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
4027
4039
|
const isJokerAttempt = attemptIndex < 0;
|
4028
4040
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
@@ -4542,9 +4554,7 @@
|
|
4542
4554
|
return ''; // <- Note: Np knowledge present, return empty string
|
4543
4555
|
}
|
4544
4556
|
try {
|
4545
|
-
|
4546
|
-
const _llms = arrayableToArray(tools.llm);
|
4547
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
4557
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
4548
4558
|
const taskEmbeddingPrompt = {
|
4549
4559
|
title: 'Knowledge Search',
|
4550
4560
|
modelRequirements: {
|
@@ -5324,8 +5334,7 @@
|
|
5324
5334
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
5325
5335
|
tools,
|
5326
5336
|
});
|
5327
|
-
const
|
5328
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
5337
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
5329
5338
|
const availableModels = (await llmTools.listModels())
|
5330
5339
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
5331
5340
|
.map(({ modelName, modelDescription }) => ({
|
@@ -6225,9 +6234,7 @@
|
|
6225
6234
|
if (tools === undefined || tools.llm === undefined) {
|
6226
6235
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
6227
6236
|
}
|
6228
|
-
|
6229
|
-
const _llms = arrayableToArray(tools.llm);
|
6230
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
6237
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
6231
6238
|
const llmToolsWithUsage = countUsage(llmTools);
|
6232
6239
|
// <- TODO: [๐ฏ]
|
6233
6240
|
/*
|
@@ -10582,7 +10589,7 @@
|
|
10582
10589
|
* @public exported from `@promptbook/core`
|
10583
10590
|
*/
|
10584
10591
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
10585
|
-
const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
10592
|
+
const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
10586
10593
|
const llmTools = configuration.map((llmConfiguration) => {
|
10587
10594
|
const registeredItem = $llmToolsRegister
|
10588
10595
|
.list()
|
@@ -10614,7 +10621,7 @@
|
|
10614
10621
|
...llmConfiguration.options,
|
10615
10622
|
});
|
10616
10623
|
});
|
10617
|
-
return joinLlmExecutionTools(...llmTools);
|
10624
|
+
return joinLlmExecutionTools(title, ...llmTools);
|
10618
10625
|
}
|
10619
10626
|
/**
|
10620
10627
|
* TODO: [๐] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
@@ -11214,7 +11221,7 @@
|
|
11214
11221
|
throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
|
11215
11222
|
}
|
11216
11223
|
const fs = $provideFilesystemForNode();
|
11217
|
-
const llm = await $provideLlmToolsFromEnv(options);
|
11224
|
+
const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
|
11218
11225
|
const executables = await $provideExecutablesForNode();
|
11219
11226
|
const tools = {
|
11220
11227
|
llm,
|