@promptbook/wizard 0.101.0-17 โ 0.101.0-19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +50 -43
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +6 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +4 -0
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +50 -43
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
- package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
- package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
- package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
- /package/esm/typings/src/{cli/test/ptbk.test.d.ts โ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -15,6 +15,9 @@ import { BookEditor } from '../book-components/BookEditor/BookEditor';
|
|
15
15
|
import { DEFAULT_BOOK_FONT_CLASS } from '../book-components/BookEditor/config';
|
16
16
|
import { Chat } from '../book-components/Chat/Chat/Chat';
|
17
17
|
import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
|
18
|
+
import { useChatAutoScroll } from '../book-components/Chat/hooks/useChatAutoScroll';
|
19
|
+
import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
20
|
+
import { useSendMessageToLlmChat } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
18
21
|
import { LlmChat } from '../book-components/Chat/LlmChat/LlmChat';
|
19
22
|
import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
|
20
23
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
@@ -44,6 +47,9 @@ export { BookEditor };
|
|
44
47
|
export { DEFAULT_BOOK_FONT_CLASS };
|
45
48
|
export { Chat };
|
46
49
|
export type { ChatProps };
|
50
|
+
export { useChatAutoScroll };
|
51
|
+
export type { SendMessageToLlmChatFunction };
|
52
|
+
export { useSendMessageToLlmChat };
|
47
53
|
export { LlmChat };
|
48
54
|
export type { LlmChatProps };
|
49
55
|
export type { ChatMessage };
|
@@ -120,6 +120,7 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
|
|
120
120
|
import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
|
121
121
|
import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
|
122
122
|
import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
|
123
|
+
import { getSingleLlmExecutionTools } from '../llm-providers/_multiple/getSingleLlmExecutionTools';
|
123
124
|
import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
|
124
125
|
import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
|
125
126
|
import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
|
@@ -291,6 +292,7 @@ export { createLlmToolsFromConfiguration };
|
|
291
292
|
export { cacheLlmTools };
|
292
293
|
export { countUsage };
|
293
294
|
export { limitTotalUsage };
|
295
|
+
export { getSingleLlmExecutionTools };
|
294
296
|
export { joinLlmExecutionTools };
|
295
297
|
export { MultipleLlmExecutionTools };
|
296
298
|
export { AgentLlmExecutionTools };
|
@@ -13,6 +13,8 @@ import type { MockedChatDelayConfig } from '../book-components/AvatarProfile/Ava
|
|
13
13
|
import type { MockedChatProps } from '../book-components/AvatarProfile/AvatarProfile/MockedChat';
|
14
14
|
import type { BookEditorProps } from '../book-components/BookEditor/BookEditor';
|
15
15
|
import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
|
16
|
+
import type { ChatAutoScrollConfig } from '../book-components/Chat/hooks/useChatAutoScroll';
|
17
|
+
import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
16
18
|
import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
|
17
19
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
18
20
|
import type { ChatParticipant } from '../book-components/Chat/types/ChatParticipant';
|
@@ -340,6 +342,8 @@ export type { MockedChatDelayConfig };
|
|
340
342
|
export type { MockedChatProps };
|
341
343
|
export type { BookEditorProps };
|
342
344
|
export type { ChatProps };
|
345
|
+
export type { ChatAutoScrollConfig };
|
346
|
+
export type { SendMessageToLlmChatFunction };
|
343
347
|
export type { LlmChatProps };
|
344
348
|
export type { ChatMessage };
|
345
349
|
export type { ChatParticipant };
|
@@ -8,3 +8,6 @@ import { string_agent_name, string_url_image } from '../../types/typeAliases';
|
|
8
8
|
* @public exported from `@promptbook/core`
|
9
9
|
*/
|
10
10
|
export declare function generatePlaceholderAgentProfileImageUrl(agentName?: string_agent_name): string_url_image;
|
11
|
+
/**
|
12
|
+
* TODO: [๐คน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
|
13
|
+
*/
|
@@ -31,7 +31,13 @@ export type MockedChatDelayConfig = {
|
|
31
31
|
*
|
32
32
|
* @public exported from `@promptbook/components`
|
33
33
|
*/
|
34
|
-
export type MockedChatProps = ChatProps & {
|
34
|
+
export type MockedChatProps = Omit<ChatProps, 'onReset' | /*'onMessage' | */ 'onUseTemplate' | 'isVoiceRecognitionButtonShown'> & {
|
35
|
+
/**
|
36
|
+
* Whether to show the reset button
|
37
|
+
*
|
38
|
+
* @default false
|
39
|
+
*/
|
40
|
+
isResetShown?: boolean;
|
35
41
|
/**
|
36
42
|
* Optional delays configuration for emulating typing behavior
|
37
43
|
*/
|
@@ -2,6 +2,7 @@ import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
|
|
2
2
|
import type { ChatProps } from '../Chat/ChatProps';
|
3
3
|
import type { ChatMessage } from '../types/ChatMessage';
|
4
4
|
import type { ChatParticipant } from '../types/ChatParticipant';
|
5
|
+
import type { SendMessageToLlmChatFunction } from '../hooks/useSendMessageToLlmChat';
|
5
6
|
/**
|
6
7
|
* Props for LlmChat component, derived from ChatProps but with LLM-specific modifications
|
7
8
|
*
|
@@ -17,8 +18,20 @@ export type LlmChatProps = Omit<ChatProps, 'messages' | 'onMessage' | 'onChange'
|
|
17
18
|
* When provided, the conversation will be saved and restored from localStorage
|
18
19
|
*/
|
19
20
|
readonly persistenceKey?: string;
|
21
|
+
/**
|
22
|
+
* Optional initial messages to pre-populate the chat.
|
23
|
+
* - They can include both USER and ASSISTANT messages.
|
24
|
+
* - They are only used when there is no persisted conversation (persistence takes precedence).
|
25
|
+
* - They are not automatically persisted until the user sends a new message.
|
26
|
+
*/
|
27
|
+
readonly initialMessages?: ReadonlyArray<ChatMessage>;
|
20
28
|
/**
|
21
29
|
* Called when the chat state changes (messages, participants, etc.)
|
22
30
|
*/
|
23
31
|
onChange?(messages: ReadonlyArray<ChatMessage>, participants: ReadonlyArray<ChatParticipant>): void;
|
32
|
+
/**
|
33
|
+
* Optional external sendMessage function produced by useSendMessageToLlmChat hook.
|
34
|
+
* When provided, LlmChat will attach its internal handler to it (no React context needed).
|
35
|
+
*/
|
36
|
+
readonly sendMessage?: SendMessageToLlmChatFunction;
|
24
37
|
};
|
@@ -0,0 +1,41 @@
|
|
1
|
+
/**
|
2
|
+
* Configuration for the auto-scroll behavior
|
3
|
+
*/
|
4
|
+
export type ChatAutoScrollConfig = {
|
5
|
+
/**
|
6
|
+
* Threshold in pixels from bottom to consider as "at bottom"
|
7
|
+
* @default 100
|
8
|
+
*/
|
9
|
+
bottomThreshold?: number;
|
10
|
+
/**
|
11
|
+
* Whether to use smooth scrolling
|
12
|
+
* @default true
|
13
|
+
*/
|
14
|
+
smoothScroll?: boolean;
|
15
|
+
/**
|
16
|
+
* Delay before checking scroll position after new messages (in milliseconds)
|
17
|
+
* @default 100
|
18
|
+
*/
|
19
|
+
scrollCheckDelay?: number;
|
20
|
+
};
|
21
|
+
/**
|
22
|
+
* Hook for managing auto-scroll behavior in chat components
|
23
|
+
*
|
24
|
+
* This hook provides:
|
25
|
+
* - Automatic scrolling to bottom when new messages arrive (if user is already at bottom)
|
26
|
+
* - Detection of when user scrolls away from bottom
|
27
|
+
* - Scroll-to-bottom functionality with smooth animation
|
28
|
+
* - Mobile-optimized scrolling behavior
|
29
|
+
*
|
30
|
+
* @public exported from `@promptbook/components`
|
31
|
+
*/
|
32
|
+
export declare function useChatAutoScroll(config?: ChatAutoScrollConfig): {
|
33
|
+
isAutoScrolling: boolean;
|
34
|
+
chatMessagesRef: (element: HTMLDivElement | null) => void;
|
35
|
+
handleScroll: (event: React.UIEvent<HTMLDivElement>) => void;
|
36
|
+
handleMessagesChange: () => void;
|
37
|
+
scrollToBottom: () => void;
|
38
|
+
enableAutoScroll: () => void;
|
39
|
+
disableAutoScroll: () => void;
|
40
|
+
isMobile: boolean;
|
41
|
+
};
|
@@ -0,0 +1,44 @@
|
|
1
|
+
/**
|
2
|
+
* Function type for sending a message to LlmChat.
|
3
|
+
*
|
4
|
+
* Implementation detail: The returned function is "attachable".
|
5
|
+
* LlmChat will call the internal `_attach` method (if present) to bind
|
6
|
+
* its real message handler. Messages sent before attachment are queued
|
7
|
+
* and flushed after attachment.
|
8
|
+
*
|
9
|
+
* @public exported from `@promptbook/components`
|
10
|
+
*/
|
11
|
+
export type SendMessageToLlmChatFunction = {
|
12
|
+
/**
|
13
|
+
* Send a message to the bound LlmChat instance (or queue it until attached).
|
14
|
+
*/
|
15
|
+
(message: string): void;
|
16
|
+
/**
|
17
|
+
* Internal method used by the <LlmChat/> component to attach its handler.
|
18
|
+
* Not intended for consumer usage.
|
19
|
+
*
|
20
|
+
* @internal
|
21
|
+
*/
|
22
|
+
_attach?: (handler: (message: string) => Promise<void> | void) => void;
|
23
|
+
};
|
24
|
+
/**
|
25
|
+
* Hook to create a sendMessage function for an <LlmChat/> component WITHOUT needing any React Context.
|
26
|
+
*
|
27
|
+
* Usage pattern:
|
28
|
+
* ```tsx
|
29
|
+
* const sendMessage = useSendMessageToLlmChat();
|
30
|
+
* return (
|
31
|
+
* <>
|
32
|
+
* <button onClick={() => sendMessage('Hello!')}>Hello</button>
|
33
|
+
* <LlmChat llmTools={llmTools} sendMessage={sendMessage} />
|
34
|
+
* </>
|
35
|
+
* );
|
36
|
+
* ```
|
37
|
+
*
|
38
|
+
* - No provider wrapping needed.
|
39
|
+
* - Safe to call before the <LlmChat/> mounts (messages will be queued).
|
40
|
+
* - Keeps DRY by letting <LlmChat/> reuse its internal `handleMessage` logic.
|
41
|
+
*
|
42
|
+
* @public exported from `@promptbook/components`
|
43
|
+
*/
|
44
|
+
export declare function useSendMessageToLlmChat(): SendMessageToLlmChatFunction;
|
@@ -1,8 +1,7 @@
|
|
1
1
|
import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
5
|
-
import type { string_parameter_name } from '../../types/typeAliases';
|
4
|
+
import type { Parameters, string_parameter_name } from '../../types/typeAliases';
|
6
5
|
import type { TODO_string } from '../../utils/organization/TODO_string';
|
7
6
|
import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
|
8
7
|
import type { PipelineExecutorResult } from '../PipelineExecutorResult';
|
@@ -1,9 +1,7 @@
|
|
1
1
|
import type { ReadonlyDeep } from 'type-fest';
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
5
|
-
import type { string_markdown } from '../../types/typeAliases';
|
6
|
-
import type { string_parameter_value } from '../../types/typeAliases';
|
4
|
+
import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
|
7
5
|
import type { ExecutionTools } from '../ExecutionTools';
|
8
6
|
/**
|
9
7
|
* Options for retrieving relevant knowledge for a specific task during pipeline execution.
|
@@ -1,7 +1,6 @@
|
|
1
1
|
import { Promisable } from 'type-fest';
|
2
2
|
import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
|
3
|
-
import type { string_app_id } from '../../../types/typeAliases';
|
4
|
-
import type { string_url } from '../../../types/typeAliases';
|
3
|
+
import type { string_app_id, string_url } from '../../../types/typeAliases';
|
5
4
|
import type { really_any } from '../../../utils/organization/really_any';
|
6
5
|
import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
|
7
6
|
import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import type { string_user_id } from '../../../types/typeAliases';
|
1
|
+
import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
|
2
2
|
import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
|
3
3
|
import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
4
4
|
/**
|
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
7
7
|
* @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
|
8
8
|
*/
|
9
9
|
export type CreateLlmToolsFromConfigurationOptions = {
|
10
|
+
/**
|
11
|
+
* Title of the LLM tools
|
12
|
+
*
|
13
|
+
* @default 'LLM Tools from Configuration'
|
14
|
+
*/
|
15
|
+
readonly title?: string_mime_type_with_wildcard & string_markdown_text;
|
10
16
|
/**
|
11
17
|
* This will will be passed to the created `LlmExecutionTools`
|
12
18
|
*
|
13
19
|
* @default false
|
14
20
|
*/
|
15
|
-
isVerbose?: boolean;
|
21
|
+
readonly isVerbose?: boolean;
|
16
22
|
/**
|
17
23
|
* Identifier of the end user
|
18
24
|
*
|
@@ -1,16 +1,8 @@
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
3
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
4
|
-
import type {
|
5
|
-
import type {
|
6
|
-
import type { PromptResult } from '../../execution/PromptResult';
|
7
|
-
import type { ChatPrompt } from '../../types/Prompt';
|
8
|
-
import type { CompletionPrompt } from '../../types/Prompt';
|
9
|
-
import type { EmbeddingPrompt } from '../../types/Prompt';
|
10
|
-
import type { Prompt } from '../../types/Prompt';
|
11
|
-
import type { string_markdown } from '../../types/typeAliases';
|
12
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
13
|
-
import type { string_title } from '../../types/typeAliases';
|
3
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
|
5
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
14
6
|
/**
|
15
7
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
16
8
|
*
|
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
|
|
18
10
|
* @public exported from `@promptbook/core`
|
19
11
|
*/
|
20
12
|
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
13
|
+
readonly title: string_title & string_markdown_text;
|
21
14
|
/**
|
22
15
|
* Array of execution tools in order of priority
|
23
16
|
*/
|
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
25
18
|
/**
|
26
19
|
* Gets array of execution tools in order of priority
|
27
20
|
*/
|
28
|
-
constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
29
|
-
get title(): string_title & string_markdown_text;
|
21
|
+
constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
30
22
|
get description(): string_markdown;
|
31
23
|
get profile(): {
|
32
24
|
name: string;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
2
|
+
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
3
|
+
/**
|
4
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
5
|
+
*
|
6
|
+
* @public exported from `@promptbook/core`
|
7
|
+
*/
|
8
|
+
export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
|
9
|
+
/**
|
10
|
+
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
11
|
+
*/
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
2
|
+
import { string_markdown_text, string_title } from '../../types/typeAliases';
|
2
3
|
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
3
4
|
/**
|
4
5
|
* Joins multiple LLM Execution Tools into one
|
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
15
16
|
*
|
16
17
|
* @public exported from `@promptbook/core`
|
17
18
|
*/
|
18
|
-
export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
19
|
+
export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
19
20
|
/**
|
20
21
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
21
22
|
*/
|
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
|
|
11
11
|
*/
|
12
12
|
export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
|
13
13
|
/**
|
14
|
+
* TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
|
14
15
|
* TODO: [๐] Unite object for expecting amount and format - use here also a format
|
15
16
|
*/
|
@@ -1,12 +1,9 @@
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
|
6
5
|
import type { Prompt } from '../../types/Prompt';
|
7
|
-
import type { string_markdown } from '../../types/typeAliases';
|
8
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
9
|
-
import type { string_title } from '../../types/typeAliases';
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
10
7
|
/**
|
11
8
|
* Mocked execution Tools for just echoing the requests for testing purposes.
|
12
9
|
*
|
@@ -1,13 +1,9 @@
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
6
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
7
5
|
import type { Prompt } from '../../types/Prompt';
|
8
|
-
import type { string_markdown } from '../../types/typeAliases';
|
9
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
10
|
-
import type { string_title } from '../../types/typeAliases';
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
11
7
|
/**
|
12
8
|
* Mocked execution Tools for just faking expected responses for testing purposes
|
13
9
|
*
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
|
2
|
-
import type { Scraper } from '../_common/Scraper';
|
3
|
-
import type { ScraperSourceHandler } from '../_common/Scraper';
|
2
|
+
import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
|
4
3
|
import type { ExecutionTools } from '../../execution/ExecutionTools';
|
5
4
|
import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
|
6
5
|
import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
16
16
|
/**
|
17
17
|
* Represents the version string of the Promptbook engine.
|
18
|
-
* It follows semantic versioning (e.g., `0.101.0-
|
18
|
+
* It follows semantic versioning (e.g., `0.101.0-18`).
|
19
19
|
*
|
20
20
|
* @generated
|
21
21
|
*/
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@promptbook/wizard",
|
3
|
-
"version": "0.101.0-
|
3
|
+
"version": "0.101.0-19",
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
5
5
|
"private": false,
|
6
6
|
"sideEffects": false,
|
@@ -95,7 +95,7 @@
|
|
95
95
|
"module": "./esm/index.es.js",
|
96
96
|
"typings": "./esm/typings/src/_packages/wizard.index.d.ts",
|
97
97
|
"peerDependencies": {
|
98
|
-
"@promptbook/core": "0.101.0-
|
98
|
+
"@promptbook/core": "0.101.0-19"
|
99
99
|
},
|
100
100
|
"dependencies": {
|
101
101
|
"@ai-sdk/deepseek": "0.1.6",
|
package/umd/index.umd.js
CHANGED
@@ -48,7 +48,7 @@
|
|
48
48
|
* @generated
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
50
50
|
*/
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
|
52
52
|
/**
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
54
54
|
* Note: [๐] Ignore a discrepancy between file name and entity name
|
@@ -7856,6 +7856,25 @@
|
|
7856
7856
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
7857
7857
|
*/
|
7858
7858
|
|
7859
|
+
/**
|
7860
|
+
* Takes an item or an array of items and returns an array of items
|
7861
|
+
*
|
7862
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
7863
|
+
* 2) Undefined returns empty array
|
7864
|
+
* 3) Array returns itself
|
7865
|
+
*
|
7866
|
+
* @private internal utility
|
7867
|
+
*/
|
7868
|
+
function arrayableToArray(input) {
|
7869
|
+
if (input === undefined) {
|
7870
|
+
return [];
|
7871
|
+
}
|
7872
|
+
if (input instanceof Array) {
|
7873
|
+
return input;
|
7874
|
+
}
|
7875
|
+
return [input];
|
7876
|
+
}
|
7877
|
+
|
7859
7878
|
/**
|
7860
7879
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
7861
7880
|
*
|
@@ -7866,12 +7885,10 @@
|
|
7866
7885
|
/**
|
7867
7886
|
* Gets array of execution tools in order of priority
|
7868
7887
|
*/
|
7869
|
-
constructor(...llmExecutionTools) {
|
7888
|
+
constructor(title, ...llmExecutionTools) {
|
7889
|
+
this.title = title;
|
7870
7890
|
this.llmExecutionTools = llmExecutionTools;
|
7871
7891
|
}
|
7872
|
-
get title() {
|
7873
|
-
return 'Multiple LLM Providers';
|
7874
|
-
}
|
7875
7892
|
get description() {
|
7876
7893
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
7877
7894
|
.map(({ title, description }, index) => {
|
@@ -7957,7 +7974,7 @@
|
|
7957
7974
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
7958
7975
|
// <- case [๐ค]:
|
7959
7976
|
default:
|
7960
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
7977
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
7961
7978
|
}
|
7962
7979
|
}
|
7963
7980
|
catch (error) {
|
@@ -7978,7 +7995,7 @@
|
|
7978
7995
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
7979
7996
|
// 3) ...
|
7980
7997
|
spaceTrim__default["default"]((block) => `
|
7981
|
-
All execution tools failed:
|
7998
|
+
All execution tools of ${this.title} failed:
|
7982
7999
|
|
7983
8000
|
${block(errors
|
7984
8001
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
@@ -7987,11 +8004,11 @@
|
|
7987
8004
|
`));
|
7988
8005
|
}
|
7989
8006
|
else if (this.llmExecutionTools.length === 0) {
|
7990
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
8007
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
7991
8008
|
}
|
7992
8009
|
else {
|
7993
8010
|
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
7994
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
8011
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
7995
8012
|
|
7996
8013
|
Available \`LlmExecutionTools\`:
|
7997
8014
|
${block(this.description)}
|
@@ -8021,7 +8038,7 @@
|
|
8021
8038
|
*
|
8022
8039
|
* @public exported from `@promptbook/core`
|
8023
8040
|
*/
|
8024
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
8041
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
8025
8042
|
if (llmExecutionTools.length === 0) {
|
8026
8043
|
const warningMessage = spaceTrim__default["default"](`
|
8027
8044
|
You have not provided any \`LlmExecutionTools\`
|
@@ -8053,30 +8070,27 @@
|
|
8053
8070
|
};
|
8054
8071
|
*/
|
8055
8072
|
}
|
8056
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
8073
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
8057
8074
|
}
|
8058
8075
|
/**
|
8059
8076
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
8060
8077
|
*/
|
8061
8078
|
|
8062
8079
|
/**
|
8063
|
-
*
|
8064
|
-
*
|
8065
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
8066
|
-
* 2) Undefined returns empty array
|
8067
|
-
* 3) Array returns itself
|
8080
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
8068
8081
|
*
|
8069
|
-
* @
|
8082
|
+
* @public exported from `@promptbook/core`
|
8070
8083
|
*/
|
8071
|
-
function
|
8072
|
-
|
8073
|
-
|
8074
|
-
|
8075
|
-
|
8076
|
-
|
8077
|
-
}
|
8078
|
-
return [input];
|
8084
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
8085
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
8086
|
+
const llmTools = _llms.length === 1
|
8087
|
+
? _llms[0]
|
8088
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
8089
|
+
return llmTools;
|
8079
8090
|
}
|
8091
|
+
/**
|
8092
|
+
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
8093
|
+
*/
|
8080
8094
|
|
8081
8095
|
/**
|
8082
8096
|
* Prepares the persona for the pipeline
|
@@ -8095,8 +8109,7 @@
|
|
8095
8109
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
8096
8110
|
tools,
|
8097
8111
|
});
|
8098
|
-
const
|
8099
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8112
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
8100
8113
|
const availableModels = (await llmTools.listModels())
|
8101
8114
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
8102
8115
|
.map(({ modelName, modelDescription }) => ({
|
@@ -8713,9 +8726,7 @@
|
|
8713
8726
|
if (tools === undefined || tools.llm === undefined) {
|
8714
8727
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
8715
8728
|
}
|
8716
|
-
|
8717
|
-
const _llms = arrayableToArray(tools.llm);
|
8718
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8729
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
8719
8730
|
const llmToolsWithUsage = countUsage(llmTools);
|
8720
8731
|
// <- TODO: [๐ฏ]
|
8721
8732
|
/*
|
@@ -9585,9 +9596,7 @@
|
|
9585
9596
|
$scriptPipelineExecutionErrors: [],
|
9586
9597
|
$failedResults: [], // Track all failed attempts
|
9587
9598
|
};
|
9588
|
-
|
9589
|
-
const _llms = arrayableToArray(tools.llm);
|
9590
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
9599
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
9591
9600
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
9592
9601
|
const isJokerAttempt = attemptIndex < 0;
|
9593
9602
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
@@ -10107,9 +10116,7 @@
|
|
10107
10116
|
return ''; // <- Note: Np knowledge present, return empty string
|
10108
10117
|
}
|
10109
10118
|
try {
|
10110
|
-
|
10111
|
-
const _llms = arrayableToArray(tools.llm);
|
10112
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10119
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
10113
10120
|
const taskEmbeddingPrompt = {
|
10114
10121
|
title: 'Knowledge Search',
|
10115
10122
|
modelRequirements: {
|
@@ -10826,9 +10833,7 @@
|
|
10826
10833
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
10827
10834
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
10828
10835
|
}
|
10829
|
-
|
10830
|
-
const _llms = arrayableToArray(llm);
|
10831
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10836
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
10832
10837
|
// TODO: [๐ผ] In future use `ptbk make` and made getPipelineCollection
|
10833
10838
|
const collection = createCollectionFromJson(...PipelineCollection);
|
10834
10839
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|
@@ -12967,7 +12972,7 @@
|
|
12967
12972
|
* @public exported from `@promptbook/core`
|
12968
12973
|
*/
|
12969
12974
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
12970
|
-
const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12975
|
+
const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12971
12976
|
const llmTools = configuration.map((llmConfiguration) => {
|
12972
12977
|
const registeredItem = $llmToolsRegister
|
12973
12978
|
.list()
|
@@ -12999,7 +13004,7 @@
|
|
12999
13004
|
...llmConfiguration.options,
|
13000
13005
|
});
|
13001
13006
|
});
|
13002
|
-
return joinLlmExecutionTools(...llmTools);
|
13007
|
+
return joinLlmExecutionTools(title, ...llmTools);
|
13003
13008
|
}
|
13004
13009
|
/**
|
13005
13010
|
* TODO: [๐] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
@@ -13116,7 +13121,9 @@
|
|
13116
13121
|
});
|
13117
13122
|
}
|
13118
13123
|
else if (strategy === 'BRING_YOUR_OWN_KEYS') {
|
13119
|
-
llmExecutionTools = await $provideLlmToolsFromEnv(
|
13124
|
+
llmExecutionTools = await $provideLlmToolsFromEnv({
|
13125
|
+
title: 'LLM Tools for wizard or CLI with BYOK strategy',
|
13126
|
+
});
|
13120
13127
|
}
|
13121
13128
|
else {
|
13122
13129
|
throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
|
@@ -17149,7 +17156,7 @@
|
|
17149
17156
|
throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
|
17150
17157
|
}
|
17151
17158
|
const fs = $provideFilesystemForNode();
|
17152
|
-
const llm = await $provideLlmToolsFromEnv(options);
|
17159
|
+
const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
|
17153
17160
|
const executables = await $provideExecutablesForNode();
|
17154
17161
|
const tools = {
|
17155
17162
|
llm,
|