@promptbook/fake-llm 0.101.0-17 → 0.101.0-19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +97 -78
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +6 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +4 -0
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +100 -81
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
- package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
- package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
- package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
- /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Configuration for the auto-scroll behavior
|
|
3
|
+
*/
|
|
4
|
+
export type ChatAutoScrollConfig = {
|
|
5
|
+
/**
|
|
6
|
+
* Threshold in pixels from bottom to consider as "at bottom"
|
|
7
|
+
* @default 100
|
|
8
|
+
*/
|
|
9
|
+
bottomThreshold?: number;
|
|
10
|
+
/**
|
|
11
|
+
* Whether to use smooth scrolling
|
|
12
|
+
* @default true
|
|
13
|
+
*/
|
|
14
|
+
smoothScroll?: boolean;
|
|
15
|
+
/**
|
|
16
|
+
* Delay before checking scroll position after new messages (in milliseconds)
|
|
17
|
+
* @default 100
|
|
18
|
+
*/
|
|
19
|
+
scrollCheckDelay?: number;
|
|
20
|
+
};
|
|
21
|
+
/**
|
|
22
|
+
* Hook for managing auto-scroll behavior in chat components
|
|
23
|
+
*
|
|
24
|
+
* This hook provides:
|
|
25
|
+
* - Automatic scrolling to bottom when new messages arrive (if user is already at bottom)
|
|
26
|
+
* - Detection of when user scrolls away from bottom
|
|
27
|
+
* - Scroll-to-bottom functionality with smooth animation
|
|
28
|
+
* - Mobile-optimized scrolling behavior
|
|
29
|
+
*
|
|
30
|
+
* @public exported from `@promptbook/components`
|
|
31
|
+
*/
|
|
32
|
+
export declare function useChatAutoScroll(config?: ChatAutoScrollConfig): {
|
|
33
|
+
isAutoScrolling: boolean;
|
|
34
|
+
chatMessagesRef: (element: HTMLDivElement | null) => void;
|
|
35
|
+
handleScroll: (event: React.UIEvent<HTMLDivElement>) => void;
|
|
36
|
+
handleMessagesChange: () => void;
|
|
37
|
+
scrollToBottom: () => void;
|
|
38
|
+
enableAutoScroll: () => void;
|
|
39
|
+
disableAutoScroll: () => void;
|
|
40
|
+
isMobile: boolean;
|
|
41
|
+
};
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Function type for sending a message to LlmChat.
|
|
3
|
+
*
|
|
4
|
+
* Implementation detail: The returned function is "attachable".
|
|
5
|
+
* LlmChat will call the internal `_attach` method (if present) to bind
|
|
6
|
+
* its real message handler. Messages sent before attachment are queued
|
|
7
|
+
* and flushed after attachment.
|
|
8
|
+
*
|
|
9
|
+
* @public exported from `@promptbook/components`
|
|
10
|
+
*/
|
|
11
|
+
export type SendMessageToLlmChatFunction = {
|
|
12
|
+
/**
|
|
13
|
+
* Send a message to the bound LlmChat instance (or queue it until attached).
|
|
14
|
+
*/
|
|
15
|
+
(message: string): void;
|
|
16
|
+
/**
|
|
17
|
+
* Internal method used by the <LlmChat/> component to attach its handler.
|
|
18
|
+
* Not intended for consumer usage.
|
|
19
|
+
*
|
|
20
|
+
* @internal
|
|
21
|
+
*/
|
|
22
|
+
_attach?: (handler: (message: string) => Promise<void> | void) => void;
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Hook to create a sendMessage function for an <LlmChat/> component WITHOUT needing any React Context.
|
|
26
|
+
*
|
|
27
|
+
* Usage pattern:
|
|
28
|
+
* ```tsx
|
|
29
|
+
* const sendMessage = useSendMessageToLlmChat();
|
|
30
|
+
* return (
|
|
31
|
+
* <>
|
|
32
|
+
* <button onClick={() => sendMessage('Hello!')}>Hello</button>
|
|
33
|
+
* <LlmChat llmTools={llmTools} sendMessage={sendMessage} />
|
|
34
|
+
* </>
|
|
35
|
+
* );
|
|
36
|
+
* ```
|
|
37
|
+
*
|
|
38
|
+
* - No provider wrapping needed.
|
|
39
|
+
* - Safe to call before the <LlmChat/> mounts (messages will be queued).
|
|
40
|
+
* - Keeps DRY by letting <LlmChat/> reuse its internal `handleMessage` logic.
|
|
41
|
+
*
|
|
42
|
+
* @public exported from `@promptbook/components`
|
|
43
|
+
*/
|
|
44
|
+
export declare function useSendMessageToLlmChat(): SendMessageToLlmChatFunction;
|
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
|
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
|
5
|
-
import type { string_parameter_name } from '../../types/typeAliases';
|
|
4
|
+
import type { Parameters, string_parameter_name } from '../../types/typeAliases';
|
|
6
5
|
import type { TODO_string } from '../../utils/organization/TODO_string';
|
|
7
6
|
import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
|
|
8
7
|
import type { PipelineExecutorResult } from '../PipelineExecutorResult';
|
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
import type { ReadonlyDeep } from 'type-fest';
|
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
|
5
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
6
|
-
import type { string_parameter_value } from '../../types/typeAliases';
|
|
4
|
+
import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
|
|
7
5
|
import type { ExecutionTools } from '../ExecutionTools';
|
|
8
6
|
/**
|
|
9
7
|
* Options for retrieving relevant knowledge for a specific task during pipeline execution.
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import { Promisable } from 'type-fest';
|
|
2
2
|
import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
|
|
3
|
-
import type { string_app_id } from '../../../types/typeAliases';
|
|
4
|
-
import type { string_url } from '../../../types/typeAliases';
|
|
3
|
+
import type { string_app_id, string_url } from '../../../types/typeAliases';
|
|
5
4
|
import type { really_any } from '../../../utils/organization/really_any';
|
|
6
5
|
import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
|
|
7
6
|
import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { string_user_id } from '../../../types/typeAliases';
|
|
1
|
+
import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
|
|
2
2
|
import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
|
|
3
3
|
import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
4
4
|
/**
|
|
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
|
7
7
|
* @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
|
|
8
8
|
*/
|
|
9
9
|
export type CreateLlmToolsFromConfigurationOptions = {
|
|
10
|
+
/**
|
|
11
|
+
* Title of the LLM tools
|
|
12
|
+
*
|
|
13
|
+
* @default 'LLM Tools from Configuration'
|
|
14
|
+
*/
|
|
15
|
+
readonly title?: string_mime_type_with_wildcard & string_markdown_text;
|
|
10
16
|
/**
|
|
11
17
|
* This will will be passed to the created `LlmExecutionTools`
|
|
12
18
|
*
|
|
13
19
|
* @default false
|
|
14
20
|
*/
|
|
15
|
-
isVerbose?: boolean;
|
|
21
|
+
readonly isVerbose?: boolean;
|
|
16
22
|
/**
|
|
17
23
|
* Identifier of the end user
|
|
18
24
|
*
|
|
@@ -1,16 +1,8 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
6
|
-
import type { PromptResult } from '../../execution/PromptResult';
|
|
7
|
-
import type { ChatPrompt } from '../../types/Prompt';
|
|
8
|
-
import type { CompletionPrompt } from '../../types/Prompt';
|
|
9
|
-
import type { EmbeddingPrompt } from '../../types/Prompt';
|
|
10
|
-
import type { Prompt } from '../../types/Prompt';
|
|
11
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
12
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
|
13
|
-
import type { string_title } from '../../types/typeAliases';
|
|
3
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
|
|
5
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
14
6
|
/**
|
|
15
7
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
16
8
|
*
|
|
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
|
|
|
18
10
|
* @public exported from `@promptbook/core`
|
|
19
11
|
*/
|
|
20
12
|
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
13
|
+
readonly title: string_title & string_markdown_text;
|
|
21
14
|
/**
|
|
22
15
|
* Array of execution tools in order of priority
|
|
23
16
|
*/
|
|
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
|
25
18
|
/**
|
|
26
19
|
* Gets array of execution tools in order of priority
|
|
27
20
|
*/
|
|
28
|
-
constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
|
29
|
-
get title(): string_title & string_markdown_text;
|
|
21
|
+
constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
|
30
22
|
get description(): string_markdown;
|
|
31
23
|
get profile(): {
|
|
32
24
|
name: string;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
|
+
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
3
|
+
/**
|
|
4
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/core`
|
|
7
|
+
*/
|
|
8
|
+
export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
|
|
9
|
+
/**
|
|
10
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
11
|
+
*/
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
|
+
import { string_markdown_text, string_title } from '../../types/typeAliases';
|
|
2
3
|
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
3
4
|
/**
|
|
4
5
|
* Joins multiple LLM Execution Tools into one
|
|
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
|
15
16
|
*
|
|
16
17
|
* @public exported from `@promptbook/core`
|
|
17
18
|
*/
|
|
18
|
-
export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
|
19
|
+
export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
|
19
20
|
/**
|
|
20
21
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
21
22
|
*/
|
|
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
|
|
|
11
11
|
*/
|
|
12
12
|
export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
|
|
13
13
|
/**
|
|
14
|
+
* TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
|
|
14
15
|
* TODO: [💝] Unite object for expecting amount and format - use here also a format
|
|
15
16
|
*/
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
|
|
6
5
|
import type { Prompt } from '../../types/Prompt';
|
|
7
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
8
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
|
9
|
-
import type { string_title } from '../../types/typeAliases';
|
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
10
7
|
/**
|
|
11
8
|
* Mocked execution Tools for just echoing the requests for testing purposes.
|
|
12
9
|
*
|
|
@@ -1,13 +1,9 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
6
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
7
5
|
import type { Prompt } from '../../types/Prompt';
|
|
8
|
-
import type { string_markdown } from '../../types/typeAliases';
|
|
9
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
|
10
|
-
import type { string_title } from '../../types/typeAliases';
|
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
11
7
|
/**
|
|
12
8
|
* Mocked execution Tools for just faking expected responses for testing purposes
|
|
13
9
|
*
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
|
|
2
|
-
import type { Scraper } from '../_common/Scraper';
|
|
3
|
-
import type { ScraperSourceHandler } from '../_common/Scraper';
|
|
2
|
+
import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
|
|
4
3
|
import type { ExecutionTools } from '../../execution/ExecutionTools';
|
|
5
4
|
import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
|
|
6
5
|
import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.101.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.101.0-18`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/fake-llm",
|
|
3
|
-
"version": "0.101.0-
|
|
3
|
+
"version": "0.101.0-19",
|
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -96,7 +96,7 @@
|
|
|
96
96
|
"module": "./esm/index.es.js",
|
|
97
97
|
"typings": "./esm/typings/src/_packages/fake-llm.index.d.ts",
|
|
98
98
|
"peerDependencies": {
|
|
99
|
-
"@promptbook/core": "0.101.0-
|
|
99
|
+
"@promptbook/core": "0.101.0-19"
|
|
100
100
|
},
|
|
101
101
|
"dependencies": {
|
|
102
102
|
"crypto": "1.0.1",
|
package/umd/index.umd.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
(function (global, factory) {
|
|
2
|
-
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('
|
|
3
|
-
typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', '
|
|
4
|
-
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-fake-llm"] = {}, global.spaceTrim, global.
|
|
5
|
-
})(this, (function (exports, spaceTrim,
|
|
2
|
+
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('waitasecond'), require('crypto'), require('lorem-ipsum'), require('path')) :
|
|
3
|
+
typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'waitasecond', 'crypto', 'lorem-ipsum', 'path'], factory) :
|
|
4
|
+
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-fake-llm"] = {}, global.spaceTrim, global.waitasecond, global.crypto, global.loremIpsum));
|
|
5
|
+
})(this, (function (exports, spaceTrim, waitasecond, crypto, loremIpsum) { 'use strict';
|
|
6
6
|
|
|
7
7
|
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
|
|
8
8
|
|
|
@@ -22,82 +22,12 @@
|
|
|
22
22
|
* @generated
|
|
23
23
|
* @see https://github.com/webgptorg/promptbook
|
|
24
24
|
*/
|
|
25
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
|
25
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
|
|
26
26
|
/**
|
|
27
27
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
28
28
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
29
29
|
*/
|
|
30
30
|
|
|
31
|
-
/**
|
|
32
|
-
* Predefined profiles for LLM providers to maintain consistency across the application
|
|
33
|
-
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
34
|
-
*
|
|
35
|
-
* @private !!!!
|
|
36
|
-
*/
|
|
37
|
-
const LLM_PROVIDER_PROFILES = {
|
|
38
|
-
OPENAI: {
|
|
39
|
-
name: 'OPENAI',
|
|
40
|
-
fullname: 'OpenAI GPT',
|
|
41
|
-
color: '#10a37f', // OpenAI's signature green
|
|
42
|
-
// Note: avatarSrc could be added when we have provider logos available
|
|
43
|
-
},
|
|
44
|
-
ANTHROPIC: {
|
|
45
|
-
name: 'ANTHROPIC',
|
|
46
|
-
fullname: 'Anthropic Claude',
|
|
47
|
-
color: '#d97706', // Anthropic's orange/amber color
|
|
48
|
-
},
|
|
49
|
-
AZURE_OPENAI: {
|
|
50
|
-
name: 'AZURE_OPENAI',
|
|
51
|
-
fullname: 'Azure OpenAI',
|
|
52
|
-
color: '#0078d4', // Microsoft Azure blue
|
|
53
|
-
},
|
|
54
|
-
GOOGLE: {
|
|
55
|
-
name: 'GOOGLE',
|
|
56
|
-
fullname: 'Google Gemini',
|
|
57
|
-
color: '#4285f4', // Google blue
|
|
58
|
-
},
|
|
59
|
-
DEEPSEEK: {
|
|
60
|
-
name: 'DEEPSEEK',
|
|
61
|
-
fullname: 'DeepSeek',
|
|
62
|
-
color: '#7c3aed', // Purple color for DeepSeek
|
|
63
|
-
},
|
|
64
|
-
OLLAMA: {
|
|
65
|
-
name: 'OLLAMA',
|
|
66
|
-
fullname: 'Ollama',
|
|
67
|
-
color: '#059669', // Emerald green for local models
|
|
68
|
-
},
|
|
69
|
-
REMOTE: {
|
|
70
|
-
name: 'REMOTE',
|
|
71
|
-
fullname: 'Remote Server',
|
|
72
|
-
color: '#6b7280', // Gray for remote/proxy connections
|
|
73
|
-
},
|
|
74
|
-
MOCKED_ECHO: {
|
|
75
|
-
name: 'MOCKED_ECHO',
|
|
76
|
-
fullname: 'Echo (Test)',
|
|
77
|
-
color: '#8b5cf6', // Purple for test/mock tools
|
|
78
|
-
},
|
|
79
|
-
MOCKED_FAKE: {
|
|
80
|
-
name: 'MOCKED_FAKE',
|
|
81
|
-
fullname: 'Fake LLM (Test)',
|
|
82
|
-
color: '#ec4899', // Pink for fake/test tools
|
|
83
|
-
},
|
|
84
|
-
VERCEL: {
|
|
85
|
-
name: 'VERCEL',
|
|
86
|
-
fullname: 'Vercel AI',
|
|
87
|
-
color: '#000000', // Vercel's black
|
|
88
|
-
},
|
|
89
|
-
MULTIPLE: {
|
|
90
|
-
name: 'MULTIPLE',
|
|
91
|
-
fullname: 'Multiple Providers',
|
|
92
|
-
color: '#6366f1', // Indigo for combined/multiple providers
|
|
93
|
-
},
|
|
94
|
-
};
|
|
95
|
-
/**
|
|
96
|
-
* TODO: Refactor this - each profile must be alongside the provider definition
|
|
97
|
-
* TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
98
|
-
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
99
|
-
*/
|
|
100
|
-
|
|
101
31
|
/**
|
|
102
32
|
* Freezes the given object and all its nested objects recursively
|
|
103
33
|
*
|
|
@@ -855,6 +785,76 @@
|
|
|
855
785
|
return replacedTemplates;
|
|
856
786
|
}
|
|
857
787
|
|
|
788
|
+
/**
|
|
789
|
+
* Predefined profiles for LLM providers to maintain consistency across the application
|
|
790
|
+
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
791
|
+
*
|
|
792
|
+
* @private !!!!
|
|
793
|
+
*/
|
|
794
|
+
const LLM_PROVIDER_PROFILES = {
|
|
795
|
+
OPENAI: {
|
|
796
|
+
name: 'OPENAI',
|
|
797
|
+
fullname: 'OpenAI GPT',
|
|
798
|
+
color: '#10a37f', // OpenAI's signature green
|
|
799
|
+
// Note: avatarSrc could be added when we have provider logos available
|
|
800
|
+
},
|
|
801
|
+
ANTHROPIC: {
|
|
802
|
+
name: 'ANTHROPIC',
|
|
803
|
+
fullname: 'Anthropic Claude',
|
|
804
|
+
color: '#d97706', // Anthropic's orange/amber color
|
|
805
|
+
},
|
|
806
|
+
AZURE_OPENAI: {
|
|
807
|
+
name: 'AZURE_OPENAI',
|
|
808
|
+
fullname: 'Azure OpenAI',
|
|
809
|
+
color: '#0078d4', // Microsoft Azure blue
|
|
810
|
+
},
|
|
811
|
+
GOOGLE: {
|
|
812
|
+
name: 'GOOGLE',
|
|
813
|
+
fullname: 'Google Gemini',
|
|
814
|
+
color: '#4285f4', // Google blue
|
|
815
|
+
},
|
|
816
|
+
DEEPSEEK: {
|
|
817
|
+
name: 'DEEPSEEK',
|
|
818
|
+
fullname: 'DeepSeek',
|
|
819
|
+
color: '#7c3aed', // Purple color for DeepSeek
|
|
820
|
+
},
|
|
821
|
+
OLLAMA: {
|
|
822
|
+
name: 'OLLAMA',
|
|
823
|
+
fullname: 'Ollama',
|
|
824
|
+
color: '#059669', // Emerald green for local models
|
|
825
|
+
},
|
|
826
|
+
REMOTE: {
|
|
827
|
+
name: 'REMOTE',
|
|
828
|
+
fullname: 'Remote Server',
|
|
829
|
+
color: '#6b7280', // Gray for remote/proxy connections
|
|
830
|
+
},
|
|
831
|
+
MOCKED_ECHO: {
|
|
832
|
+
name: 'MOCKED_ECHO',
|
|
833
|
+
fullname: 'Echo (Test)',
|
|
834
|
+
color: '#8b5cf6', // Purple for test/mock tools
|
|
835
|
+
},
|
|
836
|
+
MOCKED_FAKE: {
|
|
837
|
+
name: 'MOCKED_FAKE',
|
|
838
|
+
fullname: 'Fake LLM (Test)',
|
|
839
|
+
color: '#ec4899', // Pink for fake/test tools
|
|
840
|
+
},
|
|
841
|
+
VERCEL: {
|
|
842
|
+
name: 'VERCEL',
|
|
843
|
+
fullname: 'Vercel AI',
|
|
844
|
+
color: '#000000', // Vercel's black
|
|
845
|
+
},
|
|
846
|
+
MULTIPLE: {
|
|
847
|
+
name: 'MULTIPLE',
|
|
848
|
+
fullname: 'Multiple Providers',
|
|
849
|
+
color: '#6366f1', // Indigo for combined/multiple providers
|
|
850
|
+
},
|
|
851
|
+
};
|
|
852
|
+
/**
|
|
853
|
+
* TODO: Refactor this - each profile must be alongside the provider definition
|
|
854
|
+
* TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
855
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
856
|
+
*/
|
|
857
|
+
|
|
858
858
|
/**
|
|
859
859
|
* Mocked execution Tools for just echoing the requests for testing purposes.
|
|
860
860
|
*
|
|
@@ -917,6 +917,10 @@
|
|
|
917
917
|
content: spaceTrim.spaceTrim((block) => `
|
|
918
918
|
You said:
|
|
919
919
|
${block(rawPromptContent)}
|
|
920
|
+
|
|
921
|
+
[1️⃣ Say that again!](?message=${encodeURIComponent(rawPromptContent)})
|
|
922
|
+
[2️⃣ Say that twice!](?message=${encodeURIComponent(rawPromptContent).repeat(2)})
|
|
923
|
+
[3️⃣ Say that thrice!](?message=${encodeURIComponent(rawPromptContent).repeat(3)})
|
|
920
924
|
`),
|
|
921
925
|
modelName,
|
|
922
926
|
timing: {
|
|
@@ -2372,11 +2376,20 @@
|
|
|
2372
2376
|
* @private internal utility for MockedFackedLlmExecutionTools
|
|
2373
2377
|
*/
|
|
2374
2378
|
async function $fakeTextToExpectations(expectations, postprocessingFunctionNames) {
|
|
2379
|
+
console.info({ expectations });
|
|
2380
|
+
// If no expectations are set, return one paragraph
|
|
2381
|
+
if (!expectations || Object.keys(expectations).length === 0) {
|
|
2382
|
+
const lorem = new loremIpsum.LoremIpsum({
|
|
2383
|
+
wordsPerSentence: { min: 5, max: 15 },
|
|
2384
|
+
sentencesPerParagraph: { min: 5, max: 15 },
|
|
2385
|
+
});
|
|
2386
|
+
return lorem.generateParagraphs(1);
|
|
2387
|
+
}
|
|
2375
2388
|
const lorem = new loremIpsum.LoremIpsum({
|
|
2376
2389
|
wordsPerSentence: { min: 5, max: 15 },
|
|
2377
2390
|
sentencesPerParagraph: { min: 5, max: 15 },
|
|
2378
2391
|
});
|
|
2379
|
-
let
|
|
2392
|
+
let loremWords = [];
|
|
2380
2393
|
let text = '';
|
|
2381
2394
|
for (let loopLimit = CHARACTER_LOOP_LIMIT; loopLimit-- > 0;) {
|
|
2382
2395
|
let textToCheck = text;
|
|
@@ -2398,11 +2411,14 @@
|
|
|
2398
2411
|
if (isPassingExpectations(expectations, textToCheck)) {
|
|
2399
2412
|
return text; // <- Note: Returning the text because the postprocessing
|
|
2400
2413
|
}
|
|
2401
|
-
if (
|
|
2402
|
-
loremText = lorem.generateParagraphs(1)
|
|
2414
|
+
if (loremWords.length === 0) {
|
|
2415
|
+
const loremText = lorem.generateParagraphs(1);
|
|
2416
|
+
loremWords = loremText.split(/\s+/);
|
|
2417
|
+
}
|
|
2418
|
+
const nextWord = loremWords.shift();
|
|
2419
|
+
if (nextWord) {
|
|
2420
|
+
text += (text ? ' ' : '') + nextWord;
|
|
2403
2421
|
}
|
|
2404
|
-
text += loremText.substring(0, 1);
|
|
2405
|
-
loremText = loremText.substring(1);
|
|
2406
2422
|
}
|
|
2407
2423
|
throw new LimitReachedError(spaceTrim.spaceTrim((block) => `
|
|
2408
2424
|
Can not generate fake text to met the expectations
|
|
@@ -2417,6 +2433,7 @@
|
|
|
2417
2433
|
`));
|
|
2418
2434
|
}
|
|
2419
2435
|
/**
|
|
2436
|
+
* TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
|
|
2420
2437
|
* TODO: [💝] Unite object for expecting amount and format - use here also a format
|
|
2421
2438
|
*/
|
|
2422
2439
|
|
|
@@ -2471,7 +2488,9 @@
|
|
|
2471
2488
|
const usage = ZERO_USAGE;
|
|
2472
2489
|
// <- TODO: [🧠] Compute here at least words, characters,... etc
|
|
2473
2490
|
const content = await $fakeTextToExpectations(prompt.expectations || {
|
|
2474
|
-
|
|
2491
|
+
words: { min: 5, max: 1000 },
|
|
2492
|
+
sentences: { min: 5, max: 20 },
|
|
2493
|
+
paragraphs: { min: 1, max: 1 },
|
|
2475
2494
|
}, prompt.postprocessingFunctionNames);
|
|
2476
2495
|
const result = {
|
|
2477
2496
|
content,
|