@promptbook/legacy-documents 0.101.0-17 โ 0.101.0-19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +44 -39
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +6 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +4 -0
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +44 -39
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
- package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
- package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
- package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
- /package/esm/typings/src/{cli/test/ptbk.test.d.ts โ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
|
@@ -15,6 +15,9 @@ import { BookEditor } from '../book-components/BookEditor/BookEditor';
|
|
|
15
15
|
import { DEFAULT_BOOK_FONT_CLASS } from '../book-components/BookEditor/config';
|
|
16
16
|
import { Chat } from '../book-components/Chat/Chat/Chat';
|
|
17
17
|
import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
|
|
18
|
+
import { useChatAutoScroll } from '../book-components/Chat/hooks/useChatAutoScroll';
|
|
19
|
+
import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
|
20
|
+
import { useSendMessageToLlmChat } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
|
18
21
|
import { LlmChat } from '../book-components/Chat/LlmChat/LlmChat';
|
|
19
22
|
import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
|
|
20
23
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
|
@@ -44,6 +47,9 @@ export { BookEditor };
|
|
|
44
47
|
export { DEFAULT_BOOK_FONT_CLASS };
|
|
45
48
|
export { Chat };
|
|
46
49
|
export type { ChatProps };
|
|
50
|
+
export { useChatAutoScroll };
|
|
51
|
+
export type { SendMessageToLlmChatFunction };
|
|
52
|
+
export { useSendMessageToLlmChat };
|
|
47
53
|
export { LlmChat };
|
|
48
54
|
export type { LlmChatProps };
|
|
49
55
|
export type { ChatMessage };
|
|
@@ -120,6 +120,7 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
|
|
|
120
120
|
import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
|
|
121
121
|
import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
|
|
122
122
|
import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
|
|
123
|
+
import { getSingleLlmExecutionTools } from '../llm-providers/_multiple/getSingleLlmExecutionTools';
|
|
123
124
|
import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
|
|
124
125
|
import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
|
|
125
126
|
import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
|
|
@@ -291,6 +292,7 @@ export { createLlmToolsFromConfiguration };
|
|
|
291
292
|
export { cacheLlmTools };
|
|
292
293
|
export { countUsage };
|
|
293
294
|
export { limitTotalUsage };
|
|
295
|
+
export { getSingleLlmExecutionTools };
|
|
294
296
|
export { joinLlmExecutionTools };
|
|
295
297
|
export { MultipleLlmExecutionTools };
|
|
296
298
|
export { AgentLlmExecutionTools };
|
|
@@ -13,6 +13,8 @@ import type { MockedChatDelayConfig } from '../book-components/AvatarProfile/Ava
|
|
|
13
13
|
import type { MockedChatProps } from '../book-components/AvatarProfile/AvatarProfile/MockedChat';
|
|
14
14
|
import type { BookEditorProps } from '../book-components/BookEditor/BookEditor';
|
|
15
15
|
import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
|
|
16
|
+
import type { ChatAutoScrollConfig } from '../book-components/Chat/hooks/useChatAutoScroll';
|
|
17
|
+
import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
|
|
16
18
|
import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
|
|
17
19
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
|
18
20
|
import type { ChatParticipant } from '../book-components/Chat/types/ChatParticipant';
|
|
@@ -340,6 +342,8 @@ export type { MockedChatDelayConfig };
|
|
|
340
342
|
export type { MockedChatProps };
|
|
341
343
|
export type { BookEditorProps };
|
|
342
344
|
export type { ChatProps };
|
|
345
|
+
export type { ChatAutoScrollConfig };
|
|
346
|
+
export type { SendMessageToLlmChatFunction };
|
|
343
347
|
export type { LlmChatProps };
|
|
344
348
|
export type { ChatMessage };
|
|
345
349
|
export type { ChatParticipant };
|
|
@@ -8,3 +8,6 @@ import { string_agent_name, string_url_image } from '../../types/typeAliases';
|
|
|
8
8
|
* @public exported from `@promptbook/core`
|
|
9
9
|
*/
|
|
10
10
|
export declare function generatePlaceholderAgentProfileImageUrl(agentName?: string_agent_name): string_url_image;
|
|
11
|
+
/**
|
|
12
|
+
* TODO: [๐คน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
|
|
13
|
+
*/
|
|
@@ -31,7 +31,13 @@ export type MockedChatDelayConfig = {
|
|
|
31
31
|
*
|
|
32
32
|
* @public exported from `@promptbook/components`
|
|
33
33
|
*/
|
|
34
|
-
export type MockedChatProps = ChatProps & {
|
|
34
|
+
export type MockedChatProps = Omit<ChatProps, 'onReset' | /*'onMessage' | */ 'onUseTemplate' | 'isVoiceRecognitionButtonShown'> & {
|
|
35
|
+
/**
|
|
36
|
+
* Whether to show the reset button
|
|
37
|
+
*
|
|
38
|
+
* @default false
|
|
39
|
+
*/
|
|
40
|
+
isResetShown?: boolean;
|
|
35
41
|
/**
|
|
36
42
|
* Optional delays configuration for emulating typing behavior
|
|
37
43
|
*/
|
|
@@ -2,6 +2,7 @@ import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
|
|
|
2
2
|
import type { ChatProps } from '../Chat/ChatProps';
|
|
3
3
|
import type { ChatMessage } from '../types/ChatMessage';
|
|
4
4
|
import type { ChatParticipant } from '../types/ChatParticipant';
|
|
5
|
+
import type { SendMessageToLlmChatFunction } from '../hooks/useSendMessageToLlmChat';
|
|
5
6
|
/**
|
|
6
7
|
* Props for LlmChat component, derived from ChatProps but with LLM-specific modifications
|
|
7
8
|
*
|
|
@@ -17,8 +18,20 @@ export type LlmChatProps = Omit<ChatProps, 'messages' | 'onMessage' | 'onChange'
|
|
|
17
18
|
* When provided, the conversation will be saved and restored from localStorage
|
|
18
19
|
*/
|
|
19
20
|
readonly persistenceKey?: string;
|
|
21
|
+
/**
|
|
22
|
+
* Optional initial messages to pre-populate the chat.
|
|
23
|
+
* - They can include both USER and ASSISTANT messages.
|
|
24
|
+
* - They are only used when there is no persisted conversation (persistence takes precedence).
|
|
25
|
+
* - They are not automatically persisted until the user sends a new message.
|
|
26
|
+
*/
|
|
27
|
+
readonly initialMessages?: ReadonlyArray<ChatMessage>;
|
|
20
28
|
/**
|
|
21
29
|
* Called when the chat state changes (messages, participants, etc.)
|
|
22
30
|
*/
|
|
23
31
|
onChange?(messages: ReadonlyArray<ChatMessage>, participants: ReadonlyArray<ChatParticipant>): void;
|
|
32
|
+
/**
|
|
33
|
+
* Optional external sendMessage function produced by useSendMessageToLlmChat hook.
|
|
34
|
+
* When provided, LlmChat will attach its internal handler to it (no React context needed).
|
|
35
|
+
*/
|
|
36
|
+
readonly sendMessage?: SendMessageToLlmChatFunction;
|
|
24
37
|
};
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Configuration for the auto-scroll behavior
|
|
3
|
+
*/
|
|
4
|
+
export type ChatAutoScrollConfig = {
|
|
5
|
+
/**
|
|
6
|
+
* Threshold in pixels from bottom to consider as "at bottom"
|
|
7
|
+
* @default 100
|
|
8
|
+
*/
|
|
9
|
+
bottomThreshold?: number;
|
|
10
|
+
/**
|
|
11
|
+
* Whether to use smooth scrolling
|
|
12
|
+
* @default true
|
|
13
|
+
*/
|
|
14
|
+
smoothScroll?: boolean;
|
|
15
|
+
/**
|
|
16
|
+
* Delay before checking scroll position after new messages (in milliseconds)
|
|
17
|
+
* @default 100
|
|
18
|
+
*/
|
|
19
|
+
scrollCheckDelay?: number;
|
|
20
|
+
};
|
|
21
|
+
/**
|
|
22
|
+
* Hook for managing auto-scroll behavior in chat components
|
|
23
|
+
*
|
|
24
|
+
* This hook provides:
|
|
25
|
+
* - Automatic scrolling to bottom when new messages arrive (if user is already at bottom)
|
|
26
|
+
* - Detection of when user scrolls away from bottom
|
|
27
|
+
* - Scroll-to-bottom functionality with smooth animation
|
|
28
|
+
* - Mobile-optimized scrolling behavior
|
|
29
|
+
*
|
|
30
|
+
* @public exported from `@promptbook/components`
|
|
31
|
+
*/
|
|
32
|
+
export declare function useChatAutoScroll(config?: ChatAutoScrollConfig): {
|
|
33
|
+
isAutoScrolling: boolean;
|
|
34
|
+
chatMessagesRef: (element: HTMLDivElement | null) => void;
|
|
35
|
+
handleScroll: (event: React.UIEvent<HTMLDivElement>) => void;
|
|
36
|
+
handleMessagesChange: () => void;
|
|
37
|
+
scrollToBottom: () => void;
|
|
38
|
+
enableAutoScroll: () => void;
|
|
39
|
+
disableAutoScroll: () => void;
|
|
40
|
+
isMobile: boolean;
|
|
41
|
+
};
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Function type for sending a message to LlmChat.
|
|
3
|
+
*
|
|
4
|
+
* Implementation detail: The returned function is "attachable".
|
|
5
|
+
* LlmChat will call the internal `_attach` method (if present) to bind
|
|
6
|
+
* its real message handler. Messages sent before attachment are queued
|
|
7
|
+
* and flushed after attachment.
|
|
8
|
+
*
|
|
9
|
+
* @public exported from `@promptbook/components`
|
|
10
|
+
*/
|
|
11
|
+
export type SendMessageToLlmChatFunction = {
|
|
12
|
+
/**
|
|
13
|
+
* Send a message to the bound LlmChat instance (or queue it until attached).
|
|
14
|
+
*/
|
|
15
|
+
(message: string): void;
|
|
16
|
+
/**
|
|
17
|
+
* Internal method used by the <LlmChat/> component to attach its handler.
|
|
18
|
+
* Not intended for consumer usage.
|
|
19
|
+
*
|
|
20
|
+
* @internal
|
|
21
|
+
*/
|
|
22
|
+
_attach?: (handler: (message: string) => Promise<void> | void) => void;
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Hook to create a sendMessage function for an <LlmChat/> component WITHOUT needing any React Context.
|
|
26
|
+
*
|
|
27
|
+
* Usage pattern:
|
|
28
|
+
* ```tsx
|
|
29
|
+
* const sendMessage = useSendMessageToLlmChat();
|
|
30
|
+
* return (
|
|
31
|
+
* <>
|
|
32
|
+
* <button onClick={() => sendMessage('Hello!')}>Hello</button>
|
|
33
|
+
* <LlmChat llmTools={llmTools} sendMessage={sendMessage} />
|
|
34
|
+
* </>
|
|
35
|
+
* );
|
|
36
|
+
* ```
|
|
37
|
+
*
|
|
38
|
+
* - No provider wrapping needed.
|
|
39
|
+
* - Safe to call before the <LlmChat/> mounts (messages will be queued).
|
|
40
|
+
* - Keeps DRY by letting <LlmChat/> reuse its internal `handleMessage` logic.
|
|
41
|
+
*
|
|
42
|
+
* @public exported from `@promptbook/components`
|
|
43
|
+
*/
|
|
44
|
+
export declare function useSendMessageToLlmChat(): SendMessageToLlmChatFunction;
|
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
|
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
|
5
|
-
import type { string_parameter_name } from '../../types/typeAliases';
|
|
4
|
+
import type { Parameters, string_parameter_name } from '../../types/typeAliases';
|
|
6
5
|
import type { TODO_string } from '../../utils/organization/TODO_string';
|
|
7
6
|
import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
|
|
8
7
|
import type { PipelineExecutorResult } from '../PipelineExecutorResult';
|
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
import type { ReadonlyDeep } from 'type-fest';
|
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
|
5
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
6
|
-
import type { string_parameter_value } from '../../types/typeAliases';
|
|
4
|
+
import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
|
|
7
5
|
import type { ExecutionTools } from '../ExecutionTools';
|
|
8
6
|
/**
|
|
9
7
|
* Options for retrieving relevant knowledge for a specific task during pipeline execution.
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import { Promisable } from 'type-fest';
|
|
2
2
|
import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
|
|
3
|
-
import type { string_app_id } from '../../../types/typeAliases';
|
|
4
|
-
import type { string_url } from '../../../types/typeAliases';
|
|
3
|
+
import type { string_app_id, string_url } from '../../../types/typeAliases';
|
|
5
4
|
import type { really_any } from '../../../utils/organization/really_any';
|
|
6
5
|
import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
|
|
7
6
|
import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { string_user_id } from '../../../types/typeAliases';
|
|
1
|
+
import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
|
|
2
2
|
import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
|
|
3
3
|
import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
4
4
|
/**
|
|
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
|
7
7
|
* @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
|
|
8
8
|
*/
|
|
9
9
|
export type CreateLlmToolsFromConfigurationOptions = {
|
|
10
|
+
/**
|
|
11
|
+
* Title of the LLM tools
|
|
12
|
+
*
|
|
13
|
+
* @default 'LLM Tools from Configuration'
|
|
14
|
+
*/
|
|
15
|
+
readonly title?: string_mime_type_with_wildcard & string_markdown_text;
|
|
10
16
|
/**
|
|
11
17
|
* This will will be passed to the created `LlmExecutionTools`
|
|
12
18
|
*
|
|
13
19
|
* @default false
|
|
14
20
|
*/
|
|
15
|
-
isVerbose?: boolean;
|
|
21
|
+
readonly isVerbose?: boolean;
|
|
16
22
|
/**
|
|
17
23
|
* Identifier of the end user
|
|
18
24
|
*
|
|
@@ -1,16 +1,8 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
6
|
-
import type { PromptResult } from '../../execution/PromptResult';
|
|
7
|
-
import type { ChatPrompt } from '../../types/Prompt';
|
|
8
|
-
import type { CompletionPrompt } from '../../types/Prompt';
|
|
9
|
-
import type { EmbeddingPrompt } from '../../types/Prompt';
|
|
10
|
-
import type { Prompt } from '../../types/Prompt';
|
|
11
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
12
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
|
13
|
-
import type { string_title } from '../../types/typeAliases';
|
|
3
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
|
|
5
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
14
6
|
/**
|
|
15
7
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
16
8
|
*
|
|
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
|
|
|
18
10
|
* @public exported from `@promptbook/core`
|
|
19
11
|
*/
|
|
20
12
|
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
13
|
+
readonly title: string_title & string_markdown_text;
|
|
21
14
|
/**
|
|
22
15
|
* Array of execution tools in order of priority
|
|
23
16
|
*/
|
|
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
|
25
18
|
/**
|
|
26
19
|
* Gets array of execution tools in order of priority
|
|
27
20
|
*/
|
|
28
|
-
constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
|
29
|
-
get title(): string_title & string_markdown_text;
|
|
21
|
+
constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
|
30
22
|
get description(): string_markdown;
|
|
31
23
|
get profile(): {
|
|
32
24
|
name: string;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
|
+
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
3
|
+
/**
|
|
4
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/core`
|
|
7
|
+
*/
|
|
8
|
+
export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
|
|
9
|
+
/**
|
|
10
|
+
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
|
11
|
+
*/
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
|
+
import { string_markdown_text, string_title } from '../../types/typeAliases';
|
|
2
3
|
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
3
4
|
/**
|
|
4
5
|
* Joins multiple LLM Execution Tools into one
|
|
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
|
15
16
|
*
|
|
16
17
|
* @public exported from `@promptbook/core`
|
|
17
18
|
*/
|
|
18
|
-
export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
|
19
|
+
export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
|
19
20
|
/**
|
|
20
21
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
|
21
22
|
*/
|
|
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
|
|
|
11
11
|
*/
|
|
12
12
|
export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
|
|
13
13
|
/**
|
|
14
|
+
* TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
|
|
14
15
|
* TODO: [๐] Unite object for expecting amount and format - use here also a format
|
|
15
16
|
*/
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
|
|
6
5
|
import type { Prompt } from '../../types/Prompt';
|
|
7
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
8
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
|
9
|
-
import type { string_title } from '../../types/typeAliases';
|
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
10
7
|
/**
|
|
11
8
|
* Mocked execution Tools for just echoing the requests for testing purposes.
|
|
12
9
|
*
|
|
@@ -1,13 +1,9 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
6
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
7
5
|
import type { Prompt } from '../../types/Prompt';
|
|
8
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
9
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
|
10
|
-
import type { string_title } from '../../types/typeAliases';
|
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
11
7
|
/**
|
|
12
8
|
* Mocked execution Tools for just faking expected responses for testing purposes
|
|
13
9
|
*
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
|
|
2
|
-
import type { Scraper } from '../_common/Scraper';
|
|
3
|
-
import type { ScraperSourceHandler } from '../_common/Scraper';
|
|
2
|
+
import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
|
|
4
3
|
import type { ExecutionTools } from '../../execution/ExecutionTools';
|
|
5
4
|
import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
|
|
6
5
|
import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.101.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.101.0-18`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/legacy-documents",
|
|
3
|
-
"version": "0.101.0-
|
|
3
|
+
"version": "0.101.0-19",
|
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -95,7 +95,7 @@
|
|
|
95
95
|
"module": "./esm/index.es.js",
|
|
96
96
|
"typings": "./esm/typings/src/_packages/legacy-documents.index.d.ts",
|
|
97
97
|
"peerDependencies": {
|
|
98
|
-
"@promptbook/core": "0.101.0-
|
|
98
|
+
"@promptbook/core": "0.101.0-19"
|
|
99
99
|
},
|
|
100
100
|
"dependencies": {
|
|
101
101
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [๐] Ignore a discrepancy between file name and entity name
|
|
@@ -3120,6 +3120,25 @@
|
|
|
3120
3120
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
|
3121
3121
|
*/
|
|
3122
3122
|
|
|
3123
|
+
/**
|
|
3124
|
+
* Takes an item or an array of items and returns an array of items
|
|
3125
|
+
*
|
|
3126
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
3127
|
+
* 2) Undefined returns empty array
|
|
3128
|
+
* 3) Array returns itself
|
|
3129
|
+
*
|
|
3130
|
+
* @private internal utility
|
|
3131
|
+
*/
|
|
3132
|
+
function arrayableToArray(input) {
|
|
3133
|
+
if (input === undefined) {
|
|
3134
|
+
return [];
|
|
3135
|
+
}
|
|
3136
|
+
if (input instanceof Array) {
|
|
3137
|
+
return input;
|
|
3138
|
+
}
|
|
3139
|
+
return [input];
|
|
3140
|
+
}
|
|
3141
|
+
|
|
3123
3142
|
/**
|
|
3124
3143
|
* Predefined profiles for LLM providers to maintain consistency across the application
|
|
3125
3144
|
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
@@ -3200,12 +3219,10 @@
|
|
|
3200
3219
|
/**
|
|
3201
3220
|
* Gets array of execution tools in order of priority
|
|
3202
3221
|
*/
|
|
3203
|
-
constructor(...llmExecutionTools) {
|
|
3222
|
+
constructor(title, ...llmExecutionTools) {
|
|
3223
|
+
this.title = title;
|
|
3204
3224
|
this.llmExecutionTools = llmExecutionTools;
|
|
3205
3225
|
}
|
|
3206
|
-
get title() {
|
|
3207
|
-
return 'Multiple LLM Providers';
|
|
3208
|
-
}
|
|
3209
3226
|
get description() {
|
|
3210
3227
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
|
3211
3228
|
.map(({ title, description }, index) => {
|
|
@@ -3291,7 +3308,7 @@
|
|
|
3291
3308
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
3292
3309
|
// <- case [๐ค]:
|
|
3293
3310
|
default:
|
|
3294
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
|
3311
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
3295
3312
|
}
|
|
3296
3313
|
}
|
|
3297
3314
|
catch (error) {
|
|
@@ -3312,7 +3329,7 @@
|
|
|
3312
3329
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
|
3313
3330
|
// 3) ...
|
|
3314
3331
|
spaceTrim__default["default"]((block) => `
|
|
3315
|
-
All execution tools failed:
|
|
3332
|
+
All execution tools of ${this.title} failed:
|
|
3316
3333
|
|
|
3317
3334
|
${block(errors
|
|
3318
3335
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
|
@@ -3321,11 +3338,11 @@
|
|
|
3321
3338
|
`));
|
|
3322
3339
|
}
|
|
3323
3340
|
else if (this.llmExecutionTools.length === 0) {
|
|
3324
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
|
3341
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
|
3325
3342
|
}
|
|
3326
3343
|
else {
|
|
3327
3344
|
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
|
3328
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
|
3345
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
|
3329
3346
|
|
|
3330
3347
|
Available \`LlmExecutionTools\`:
|
|
3331
3348
|
${block(this.description)}
|
|
@@ -3355,7 +3372,7 @@
|
|
|
3355
3372
|
*
|
|
3356
3373
|
* @public exported from `@promptbook/core`
|
|
3357
3374
|
*/
|
|
3358
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
|
3375
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
|
3359
3376
|
if (llmExecutionTools.length === 0) {
|
|
3360
3377
|
const warningMessage = spaceTrim__default["default"](`
|
|
3361
3378
|
You have not provided any \`LlmExecutionTools\`
|
|
@@ -3387,30 +3404,27 @@
|
|
|
3387
3404
|
};
|
|
3388
3405
|
*/
|
|
3389
3406
|
}
|
|
3390
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
|
3407
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
|
3391
3408
|
}
|
|
3392
3409
|
/**
|
|
3393
3410
|
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
|
3394
3411
|
*/
|
|
3395
3412
|
|
|
3396
3413
|
/**
|
|
3397
|
-
*
|
|
3398
|
-
*
|
|
3399
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
3400
|
-
* 2) Undefined returns empty array
|
|
3401
|
-
* 3) Array returns itself
|
|
3414
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
3402
3415
|
*
|
|
3403
|
-
* @
|
|
3416
|
+
* @public exported from `@promptbook/core`
|
|
3404
3417
|
*/
|
|
3405
|
-
function
|
|
3406
|
-
|
|
3407
|
-
|
|
3408
|
-
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
}
|
|
3412
|
-
return [input];
|
|
3418
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
|
3419
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
|
3420
|
+
const llmTools = _llms.length === 1
|
|
3421
|
+
? _llms[0]
|
|
3422
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
|
3423
|
+
return llmTools;
|
|
3413
3424
|
}
|
|
3425
|
+
/**
|
|
3426
|
+
* TODO: [๐ทโโ๏ธ] @@@ Manual about construction of llmTools
|
|
3427
|
+
*/
|
|
3414
3428
|
|
|
3415
3429
|
/**
|
|
3416
3430
|
* Prepares the persona for the pipeline
|
|
@@ -3429,8 +3443,7 @@
|
|
|
3429
3443
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
3430
3444
|
tools,
|
|
3431
3445
|
});
|
|
3432
|
-
const
|
|
3433
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
3446
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
3434
3447
|
const availableModels = (await llmTools.listModels())
|
|
3435
3448
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
3436
3449
|
.map(({ modelName, modelDescription }) => ({
|
|
@@ -4192,9 +4205,7 @@
|
|
|
4192
4205
|
if (tools === undefined || tools.llm === undefined) {
|
|
4193
4206
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
4194
4207
|
}
|
|
4195
|
-
|
|
4196
|
-
const _llms = arrayableToArray(tools.llm);
|
|
4197
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4208
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
4198
4209
|
const llmToolsWithUsage = countUsage(llmTools);
|
|
4199
4210
|
// <- TODO: [๐ฏ]
|
|
4200
4211
|
/*
|
|
@@ -5337,9 +5348,7 @@
|
|
|
5337
5348
|
$scriptPipelineExecutionErrors: [],
|
|
5338
5349
|
$failedResults: [], // Track all failed attempts
|
|
5339
5350
|
};
|
|
5340
|
-
|
|
5341
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5342
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5351
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5343
5352
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5344
5353
|
const isJokerAttempt = attemptIndex < 0;
|
|
5345
5354
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
@@ -5859,9 +5868,7 @@
|
|
|
5859
5868
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5860
5869
|
}
|
|
5861
5870
|
try {
|
|
5862
|
-
|
|
5863
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5864
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5871
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5865
5872
|
const taskEmbeddingPrompt = {
|
|
5866
5873
|
title: 'Knowledge Search',
|
|
5867
5874
|
modelRequirements: {
|
|
@@ -6578,9 +6585,7 @@
|
|
|
6578
6585
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
|
6579
6586
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
|
6580
6587
|
}
|
|
6581
|
-
|
|
6582
|
-
const _llms = arrayableToArray(llm);
|
|
6583
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
6588
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
|
6584
6589
|
// TODO: [๐ผ] In future use `ptbk make` and made getPipelineCollection
|
|
6585
6590
|
const collection = createCollectionFromJson(...PipelineCollection);
|
|
6586
6591
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|