@promptbook/javascript 0.101.0-17 โ†’ 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/esm/index.es.js +1 -1
  2. package/esm/typings/src/_packages/components.index.d.ts +6 -0
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  5. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  6. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  7. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  8. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  9. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  10. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  12. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  13. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  14. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  15. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  16. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  17. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  18. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  19. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  20. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  21. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  22. package/esm/typings/src/version.d.ts +1 -1
  23. package/package.json +2 -2
  24. package/umd/index.umd.js +1 -1
  25. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  26. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  27. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  28. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  29. /package/esm/typings/src/{cli/test/ptbk.test.d.ts โ†’ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -16,7 +16,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
16
16
  * @generated
17
17
  * @see https://github.com/webgptorg/promptbook
18
18
  */
19
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-17';
19
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
20
20
  /**
21
21
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
22
22
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -15,6 +15,9 @@ import { BookEditor } from '../book-components/BookEditor/BookEditor';
15
15
  import { DEFAULT_BOOK_FONT_CLASS } from '../book-components/BookEditor/config';
16
16
  import { Chat } from '../book-components/Chat/Chat/Chat';
17
17
  import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
18
+ import { useChatAutoScroll } from '../book-components/Chat/hooks/useChatAutoScroll';
19
+ import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
20
+ import { useSendMessageToLlmChat } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
18
21
  import { LlmChat } from '../book-components/Chat/LlmChat/LlmChat';
19
22
  import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
20
23
  import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
@@ -44,6 +47,9 @@ export { BookEditor };
44
47
  export { DEFAULT_BOOK_FONT_CLASS };
45
48
  export { Chat };
46
49
  export type { ChatProps };
50
+ export { useChatAutoScroll };
51
+ export type { SendMessageToLlmChatFunction };
52
+ export { useSendMessageToLlmChat };
47
53
  export { LlmChat };
48
54
  export type { LlmChatProps };
49
55
  export type { ChatMessage };
@@ -120,6 +120,7 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
120
120
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
121
121
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
122
122
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
123
+ import { getSingleLlmExecutionTools } from '../llm-providers/_multiple/getSingleLlmExecutionTools';
123
124
  import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
124
125
  import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
125
126
  import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
@@ -291,6 +292,7 @@ export { createLlmToolsFromConfiguration };
291
292
  export { cacheLlmTools };
292
293
  export { countUsage };
293
294
  export { limitTotalUsage };
295
+ export { getSingleLlmExecutionTools };
294
296
  export { joinLlmExecutionTools };
295
297
  export { MultipleLlmExecutionTools };
296
298
  export { AgentLlmExecutionTools };
@@ -13,6 +13,8 @@ import type { MockedChatDelayConfig } from '../book-components/AvatarProfile/Ava
13
13
  import type { MockedChatProps } from '../book-components/AvatarProfile/AvatarProfile/MockedChat';
14
14
  import type { BookEditorProps } from '../book-components/BookEditor/BookEditor';
15
15
  import type { ChatProps } from '../book-components/Chat/Chat/ChatProps';
16
+ import type { ChatAutoScrollConfig } from '../book-components/Chat/hooks/useChatAutoScroll';
17
+ import type { SendMessageToLlmChatFunction } from '../book-components/Chat/hooks/useSendMessageToLlmChat';
16
18
  import type { LlmChatProps } from '../book-components/Chat/LlmChat/LlmChatProps';
17
19
  import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
18
20
  import type { ChatParticipant } from '../book-components/Chat/types/ChatParticipant';
@@ -340,6 +342,8 @@ export type { MockedChatDelayConfig };
340
342
  export type { MockedChatProps };
341
343
  export type { BookEditorProps };
342
344
  export type { ChatProps };
345
+ export type { ChatAutoScrollConfig };
346
+ export type { SendMessageToLlmChatFunction };
343
347
  export type { LlmChatProps };
344
348
  export type { ChatMessage };
345
349
  export type { ChatParticipant };
@@ -8,3 +8,6 @@ import { string_agent_name, string_url_image } from '../../types/typeAliases';
8
8
  * @public exported from `@promptbook/core`
9
9
  */
10
10
  export declare function generatePlaceholderAgentProfileImageUrl(agentName?: string_agent_name): string_url_image;
11
+ /**
12
+ * TODO: [๐Ÿคน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
13
+ */
@@ -31,7 +31,13 @@ export type MockedChatDelayConfig = {
31
31
  *
32
32
  * @public exported from `@promptbook/components`
33
33
  */
34
- export type MockedChatProps = ChatProps & {
34
+ export type MockedChatProps = Omit<ChatProps, 'onReset' | /*'onMessage' | */ 'onUseTemplate' | 'isVoiceRecognitionButtonShown'> & {
35
+ /**
36
+ * Whether to show the reset button
37
+ *
38
+ * @default false
39
+ */
40
+ isResetShown?: boolean;
35
41
  /**
36
42
  * Optional delays configuration for emulating typing behavior
37
43
  */
@@ -2,6 +2,7 @@ import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
2
2
  import type { ChatProps } from '../Chat/ChatProps';
3
3
  import type { ChatMessage } from '../types/ChatMessage';
4
4
  import type { ChatParticipant } from '../types/ChatParticipant';
5
+ import type { SendMessageToLlmChatFunction } from '../hooks/useSendMessageToLlmChat';
5
6
  /**
6
7
  * Props for LlmChat component, derived from ChatProps but with LLM-specific modifications
7
8
  *
@@ -17,8 +18,20 @@ export type LlmChatProps = Omit<ChatProps, 'messages' | 'onMessage' | 'onChange'
17
18
  * When provided, the conversation will be saved and restored from localStorage
18
19
  */
19
20
  readonly persistenceKey?: string;
21
+ /**
22
+ * Optional initial messages to pre-populate the chat.
23
+ * - They can include both USER and ASSISTANT messages.
24
+ * - They are only used when there is no persisted conversation (persistence takes precedence).
25
+ * - They are not automatically persisted until the user sends a new message.
26
+ */
27
+ readonly initialMessages?: ReadonlyArray<ChatMessage>;
20
28
  /**
21
29
  * Called when the chat state changes (messages, participants, etc.)
22
30
  */
23
31
  onChange?(messages: ReadonlyArray<ChatMessage>, participants: ReadonlyArray<ChatParticipant>): void;
32
+ /**
33
+ * Optional external sendMessage function produced by useSendMessageToLlmChat hook.
34
+ * When provided, LlmChat will attach its internal handler to it (no React context needed).
35
+ */
36
+ readonly sendMessage?: SendMessageToLlmChatFunction;
24
37
  };
@@ -0,0 +1,2 @@
1
+ export * from './useChatAutoScroll';
2
+ export * from './useSendMessageToLlmChat';
@@ -0,0 +1,41 @@
1
+ /**
2
+ * Configuration for the auto-scroll behavior
3
+ */
4
+ export type ChatAutoScrollConfig = {
5
+ /**
6
+ * Threshold in pixels from bottom to consider as "at bottom"
7
+ * @default 100
8
+ */
9
+ bottomThreshold?: number;
10
+ /**
11
+ * Whether to use smooth scrolling
12
+ * @default true
13
+ */
14
+ smoothScroll?: boolean;
15
+ /**
16
+ * Delay before checking scroll position after new messages (in milliseconds)
17
+ * @default 100
18
+ */
19
+ scrollCheckDelay?: number;
20
+ };
21
+ /**
22
+ * Hook for managing auto-scroll behavior in chat components
23
+ *
24
+ * This hook provides:
25
+ * - Automatic scrolling to bottom when new messages arrive (if user is already at bottom)
26
+ * - Detection of when user scrolls away from bottom
27
+ * - Scroll-to-bottom functionality with smooth animation
28
+ * - Mobile-optimized scrolling behavior
29
+ *
30
+ * @public exported from `@promptbook/components`
31
+ */
32
+ export declare function useChatAutoScroll(config?: ChatAutoScrollConfig): {
33
+ isAutoScrolling: boolean;
34
+ chatMessagesRef: (element: HTMLDivElement | null) => void;
35
+ handleScroll: (event: React.UIEvent<HTMLDivElement>) => void;
36
+ handleMessagesChange: () => void;
37
+ scrollToBottom: () => void;
38
+ enableAutoScroll: () => void;
39
+ disableAutoScroll: () => void;
40
+ isMobile: boolean;
41
+ };
@@ -0,0 +1,44 @@
1
+ /**
2
+ * Function type for sending a message to LlmChat.
3
+ *
4
+ * Implementation detail: The returned function is "attachable".
5
+ * LlmChat will call the internal `_attach` method (if present) to bind
6
+ * its real message handler. Messages sent before attachment are queued
7
+ * and flushed after attachment.
8
+ *
9
+ * @public exported from `@promptbook/components`
10
+ */
11
+ export type SendMessageToLlmChatFunction = {
12
+ /**
13
+ * Send a message to the bound LlmChat instance (or queue it until attached).
14
+ */
15
+ (message: string): void;
16
+ /**
17
+ * Internal method used by the <LlmChat/> component to attach its handler.
18
+ * Not intended for consumer usage.
19
+ *
20
+ * @internal
21
+ */
22
+ _attach?: (handler: (message: string) => Promise<void> | void) => void;
23
+ };
24
+ /**
25
+ * Hook to create a sendMessage function for an <LlmChat/> component WITHOUT needing any React Context.
26
+ *
27
+ * Usage pattern:
28
+ * ```tsx
29
+ * const sendMessage = useSendMessageToLlmChat();
30
+ * return (
31
+ * <>
32
+ * <button onClick={() => sendMessage('Hello!')}>Hello</button>
33
+ * <LlmChat llmTools={llmTools} sendMessage={sendMessage} />
34
+ * </>
35
+ * );
36
+ * ```
37
+ *
38
+ * - No provider wrapping needed.
39
+ * - Safe to call before the <LlmChat/> mounts (messages will be queued).
40
+ * - Keeps DRY by letting <LlmChat/> reuse its internal `handleMessage` logic.
41
+ *
42
+ * @public exported from `@promptbook/components`
43
+ */
44
+ export declare function useSendMessageToLlmChat(): SendMessageToLlmChatFunction;
@@ -1,8 +1,7 @@
1
1
  import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_parameter_name } from '../../types/typeAliases';
4
+ import type { Parameters, string_parameter_name } from '../../types/typeAliases';
6
5
  import type { TODO_string } from '../../utils/organization/TODO_string';
7
6
  import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
8
7
  import type { PipelineExecutorResult } from '../PipelineExecutorResult';
@@ -1,9 +1,7 @@
1
1
  import type { ReadonlyDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_markdown } from '../../types/typeAliases';
6
- import type { string_parameter_value } from '../../types/typeAliases';
4
+ import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
7
5
  import type { ExecutionTools } from '../ExecutionTools';
8
6
  /**
9
7
  * Options for retrieving relevant knowledge for a specific task during pipeline execution.
@@ -1,7 +1,6 @@
1
1
  import { Promisable } from 'type-fest';
2
2
  import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
3
- import type { string_app_id } from '../../../types/typeAliases';
4
- import type { string_url } from '../../../types/typeAliases';
3
+ import type { string_app_id, string_url } from '../../../types/typeAliases';
5
4
  import type { really_any } from '../../../utils/organization/really_any';
6
5
  import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
7
6
  import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
@@ -1,4 +1,4 @@
1
- import type { string_user_id } from '../../../types/typeAliases';
1
+ import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
2
2
  import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
7
7
  * @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
8
8
  */
9
9
  export type CreateLlmToolsFromConfigurationOptions = {
10
+ /**
11
+ * Title of the LLM tools
12
+ *
13
+ * @default 'LLM Tools from Configuration'
14
+ */
15
+ readonly title?: string_mime_type_with_wildcard & string_markdown_text;
10
16
  /**
11
17
  * This will will be passed to the created `LlmExecutionTools`
12
18
  *
13
19
  * @default false
14
20
  */
15
- isVerbose?: boolean;
21
+ readonly isVerbose?: boolean;
16
22
  /**
17
23
  * Identifier of the end user
18
24
  *
@@ -1,16 +1,8 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { ChatPromptResult } from '../../execution/PromptResult';
4
- import type { CompletionPromptResult } from '../../execution/PromptResult';
5
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
6
- import type { PromptResult } from '../../execution/PromptResult';
7
- import type { ChatPrompt } from '../../types/Prompt';
8
- import type { CompletionPrompt } from '../../types/Prompt';
9
- import type { EmbeddingPrompt } from '../../types/Prompt';
10
- import type { Prompt } from '../../types/Prompt';
11
- import type { string_markdown } from '../../types/typeAliases';
12
- import type { string_markdown_text } from '../../types/typeAliases';
13
- import type { string_title } from '../../types/typeAliases';
3
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
5
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
14
6
  /**
15
7
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
16
8
  *
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
18
10
  * @public exported from `@promptbook/core`
19
11
  */
20
12
  export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
13
+ readonly title: string_title & string_markdown_text;
21
14
  /**
22
15
  * Array of execution tools in order of priority
23
16
  */
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
25
18
  /**
26
19
  * Gets array of execution tools in order of priority
27
20
  */
28
- constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
29
- get title(): string_title & string_markdown_text;
21
+ constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
30
22
  get description(): string_markdown;
31
23
  get profile(): {
32
24
  name: string;
@@ -0,0 +1,11 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
+ /**
4
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
5
+ *
6
+ * @public exported from `@promptbook/core`
7
+ */
8
+ export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
9
+ /**
10
+ * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
11
+ */
@@ -1,4 +1,5 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { string_markdown_text, string_title } from '../../types/typeAliases';
2
3
  import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
4
  /**
4
5
  * Joins multiple LLM Execution Tools into one
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
15
16
  *
16
17
  * @public exported from `@promptbook/core`
17
18
  */
18
- export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
+ export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
20
  /**
20
21
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
21
22
  */
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
11
11
  */
12
12
  export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
13
13
  /**
14
+ * TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
14
15
  * TODO: [๐Ÿ’] Unite object for expecting amount and format - use here also a format
15
16
  */
@@ -1,12 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
6
5
  import type { Prompt } from '../../types/Prompt';
7
- import type { string_markdown } from '../../types/typeAliases';
8
- import type { string_markdown_text } from '../../types/typeAliases';
9
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
10
7
  /**
11
8
  * Mocked execution Tools for just echoing the requests for testing purposes.
12
9
  *
@@ -1,13 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
7
5
  import type { Prompt } from '../../types/Prompt';
8
- import type { string_markdown } from '../../types/typeAliases';
9
- import type { string_markdown_text } from '../../types/typeAliases';
10
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
11
7
  /**
12
8
  * Mocked execution Tools for just faking expected responses for testing purposes
13
9
  *
@@ -1,6 +1,5 @@
1
1
  import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
2
- import type { Scraper } from '../_common/Scraper';
3
- import type { ScraperSourceHandler } from '../_common/Scraper';
2
+ import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
4
3
  import type { ExecutionTools } from '../../execution/ExecutionTools';
5
4
  import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
6
5
  import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-16`).
18
+ * It follows semantic versioning (e.g., `0.101.0-18`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/javascript",
3
- "version": "0.101.0-17",
3
+ "version": "0.101.0-19",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -94,7 +94,7 @@
94
94
  "module": "./esm/index.es.js",
95
95
  "typings": "./esm/typings/src/_packages/javascript.index.d.ts",
96
96
  "peerDependencies": {
97
- "@promptbook/core": "0.101.0-17"
97
+ "@promptbook/core": "0.101.0-19"
98
98
  },
99
99
  "dependencies": {
100
100
  "crypto": "1.0.1",
package/umd/index.umd.js CHANGED
@@ -22,7 +22,7 @@
22
22
  * @generated
23
23
  * @see https://github.com/webgptorg/promptbook
24
24
  */
25
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-17';
25
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
26
26
  /**
27
27
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
28
28
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -1,4 +0,0 @@
1
- export {};
2
- /**
3
- * TODO: [๐Ÿง ] What should be name of this test "MockedEchoExecutionTools.test.ts" or "createPipelineExecutor.test.ts"
4
- */
@@ -1,5 +0,0 @@
1
- export {};
2
- /**
3
- * TODO: [๐Ÿง ] What should be name of this test "MockedEchoExecutionTools.test.ts" or "createPipelineExecutor.test.ts"
4
- * Note: [๐Ÿค–] For each new model variant consider adding new testing unit like "faked-completion.test.ts", "mocked-chat.test.ts" and "mocked-completion.test.ts"
5
- */
@@ -1,4 +0,0 @@
1
- export {};
2
- /**
3
- * TODO: [๐Ÿง ] What should be name of this test "MockedEchoExecutionTools.test.ts" or "createPipelineExecutor.test.ts"
4
- */