@promptbook/wizard 0.101.0-2 â 0.101.0-21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +423 -250
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +20 -0
- package/esm/typings/src/_packages/core.index.d.ts +14 -0
- package/esm/typings/src/_packages/types.index.d.ts +14 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +35 -0
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +33 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +34 -0
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +35 -0
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +56 -0
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +25 -10
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +34 -0
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +18 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -12
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +29 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/PromptResult.d.ts +2 -4
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -1
- package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +423 -250
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
- package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
- package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
- package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
- /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts â utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -1,9 +1,7 @@
|
|
1
1
|
import type { ReadonlyDeep } from 'type-fest';
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
4
|
-
import type { Parameters } from '../../types/typeAliases';
|
5
|
-
import type { string_markdown } from '../../types/typeAliases';
|
6
|
-
import type { string_parameter_value } from '../../types/typeAliases';
|
4
|
+
import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
|
7
5
|
import type { ExecutionTools } from '../ExecutionTools';
|
8
6
|
/**
|
9
7
|
* Options for retrieving relevant knowledge for a specific task during pipeline execution.
|
package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts
CHANGED
@@ -16,4 +16,4 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
16
16
|
export declare function $provideLlmToolsConfigurationFromEnv(): Promise<LlmToolsConfiguration>;
|
17
17
|
/**
|
18
18
|
* Note: [ðĒ] Code in this file should never be never released in packages that could be imported into browser environment
|
19
|
-
*/
|
19
|
+
*/
|
@@ -1,7 +1,6 @@
|
|
1
1
|
import { Promisable } from 'type-fest';
|
2
2
|
import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
|
3
|
-
import type { string_app_id } from '../../../types/typeAliases';
|
4
|
-
import type { string_url } from '../../../types/typeAliases';
|
3
|
+
import type { string_app_id, string_url } from '../../../types/typeAliases';
|
5
4
|
import type { really_any } from '../../../utils/organization/really_any';
|
6
5
|
import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
|
7
6
|
import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import type { string_user_id } from '../../../types/typeAliases';
|
1
|
+
import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
|
2
2
|
import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
|
3
3
|
import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
4
4
|
/**
|
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
7
7
|
* @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
|
8
8
|
*/
|
9
9
|
export type CreateLlmToolsFromConfigurationOptions = {
|
10
|
+
/**
|
11
|
+
* Title of the LLM tools
|
12
|
+
*
|
13
|
+
* @default 'LLM Tools from Configuration'
|
14
|
+
*/
|
15
|
+
readonly title?: string_mime_type_with_wildcard & string_markdown_text;
|
10
16
|
/**
|
11
17
|
* This will will be passed to the created `LlmExecutionTools`
|
12
18
|
*
|
13
19
|
* @default false
|
14
20
|
*/
|
15
|
-
isVerbose?: boolean;
|
21
|
+
readonly isVerbose?: boolean;
|
16
22
|
/**
|
17
23
|
* Identifier of the end user
|
18
24
|
*
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import type { ModelRequirements } from '../../../types/ModelRequirements';
|
2
|
+
/**
|
3
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
4
|
+
*
|
5
|
+
* @param errorMessage The error message from OpenAI API
|
6
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
7
|
+
* @private utility of LLM Tools
|
8
|
+
*/
|
9
|
+
export declare function parseUnsupportedParameterError(errorMessage: string): string | null;
|
10
|
+
/**
|
11
|
+
* Creates a copy of model requirements with the specified parameter removed
|
12
|
+
*
|
13
|
+
* @param modelRequirements Original model requirements
|
14
|
+
* @param unsupportedParameter The parameter to remove
|
15
|
+
* @returns New model requirements without the unsupported parameter
|
16
|
+
* @private utility of LLM Tools
|
17
|
+
*/
|
18
|
+
export declare function removeUnsupportedModelRequirement(modelRequirements: ModelRequirements, unsupportedParameter: string): ModelRequirements;
|
19
|
+
/**
|
20
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
21
|
+
* @param error The error to check
|
22
|
+
* @returns true if this is an unsupported parameter error
|
23
|
+
* @private utility of LLM Tools
|
24
|
+
*/
|
25
|
+
export declare function isUnsupportedParameterError(error: Error): boolean;
|
@@ -1,16 +1,9 @@
|
|
1
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
1
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
3
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
4
|
-
import type {
|
5
|
-
import type {
|
6
|
-
import type { PromptResult } from '../../execution/PromptResult';
|
7
|
-
import type { ChatPrompt } from '../../types/Prompt';
|
8
|
-
import type { CompletionPrompt } from '../../types/Prompt';
|
9
|
-
import type { EmbeddingPrompt } from '../../types/Prompt';
|
10
|
-
import type { Prompt } from '../../types/Prompt';
|
11
|
-
import type { string_markdown } from '../../types/typeAliases';
|
12
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
13
|
-
import type { string_title } from '../../types/typeAliases';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
|
5
|
+
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
|
6
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
14
7
|
/**
|
15
8
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
16
9
|
*
|
@@ -18,6 +11,7 @@ import type { string_title } from '../../types/typeAliases';
|
|
18
11
|
* @public exported from `@promptbook/core`
|
19
12
|
*/
|
20
13
|
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
14
|
+
readonly title: string_title & string_markdown_text;
|
21
15
|
/**
|
22
16
|
* Array of execution tools in order of priority
|
23
17
|
*/
|
@@ -25,14 +19,9 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
25
19
|
/**
|
26
20
|
* Gets array of execution tools in order of priority
|
27
21
|
*/
|
28
|
-
constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
29
|
-
get title(): string_title & string_markdown_text;
|
22
|
+
constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
|
30
23
|
get description(): string_markdown;
|
31
|
-
get profile():
|
32
|
-
name: string;
|
33
|
-
fullname: string;
|
34
|
-
color: string;
|
35
|
-
};
|
24
|
+
get profile(): ChatParticipant;
|
36
25
|
/**
|
37
26
|
* Check the configuration of all execution tools
|
38
27
|
*/
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
2
|
+
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
3
|
+
/**
|
4
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
5
|
+
*
|
6
|
+
* @public exported from `@promptbook/core`
|
7
|
+
*/
|
8
|
+
export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
|
9
|
+
/**
|
10
|
+
* TODO: [ð·ââïļ] @@@ Manual about construction of llmTools
|
11
|
+
*/
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
2
|
+
import { string_markdown_text, string_title } from '../../types/typeAliases';
|
2
3
|
import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
3
4
|
/**
|
4
5
|
* Joins multiple LLM Execution Tools into one
|
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
15
16
|
*
|
16
17
|
* @public exported from `@promptbook/core`
|
17
18
|
*/
|
18
|
-
export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
19
|
+
export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
19
20
|
/**
|
20
21
|
* TODO: [ð·ââïļ] @@@ Manual about construction of llmTools
|
21
22
|
*/
|
@@ -0,0 +1,58 @@
|
|
1
|
+
import type { Promisable } from 'type-fest';
|
2
|
+
import type { string_book } from '../../book-2.0/agent-source/string_book';
|
3
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
4
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
5
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
6
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
7
|
+
import type { Prompt } from '../../types/Prompt';
|
8
|
+
import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
|
9
|
+
/**
|
10
|
+
* Execution Tools for calling LLM models with a predefined agent "soul"
|
11
|
+
* This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
|
12
|
+
*
|
13
|
+
* @public exported from `@promptbook/core`
|
14
|
+
*/
|
15
|
+
export declare class AgentLlmExecutionTools implements LlmExecutionTools {
|
16
|
+
private readonly llmTools;
|
17
|
+
private readonly agentSource;
|
18
|
+
/**
|
19
|
+
* Cached model requirements to avoid re-parsing the agent source
|
20
|
+
*/
|
21
|
+
private _cachedModelRequirements;
|
22
|
+
/**
|
23
|
+
* Cached parsed agent information
|
24
|
+
*/
|
25
|
+
private _cachedAgentInfo;
|
26
|
+
/**
|
27
|
+
* Creates new AgentLlmExecutionTools
|
28
|
+
*
|
29
|
+
* @param llmTools The underlying LLM execution tools to wrap
|
30
|
+
* @param agentSource The agent source string that defines the agent's behavior
|
31
|
+
*/
|
32
|
+
constructor(llmTools: LlmExecutionTools, agentSource: string_book);
|
33
|
+
/**
|
34
|
+
* Get cached or parse agent information
|
35
|
+
*/
|
36
|
+
private getAgentInfo;
|
37
|
+
/**
|
38
|
+
* Get cached or create agent model requirements
|
39
|
+
*/
|
40
|
+
private getAgentModelRequirements;
|
41
|
+
get title(): string_title & string_markdown_text;
|
42
|
+
get description(): string_markdown;
|
43
|
+
get profile(): ChatParticipant | undefined;
|
44
|
+
checkConfiguration(): Promisable<void>;
|
45
|
+
/**
|
46
|
+
* Returns a virtual model name representing the agent behavior
|
47
|
+
*/
|
48
|
+
get modelName(): string_model_name;
|
49
|
+
listModels(): Promisable<ReadonlyArray<AvailableModel>>;
|
50
|
+
/**
|
51
|
+
* Calls the chat model with agent-specific system prompt and requirements
|
52
|
+
*/
|
53
|
+
callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
|
54
|
+
}
|
55
|
+
/**
|
56
|
+
* TODO: [ð] Implement Destroyable pattern to free resources
|
57
|
+
* TODO: [ð§ ] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
|
58
|
+
*/
|
@@ -0,0 +1,29 @@
|
|
1
|
+
import type { string_book } from '../../book-2.0/agent-source/string_book';
|
2
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
3
|
+
import { AgentLlmExecutionTools } from './AgentLlmExecutionTools';
|
4
|
+
/**
|
5
|
+
* Options for creating AgentLlmExecutionTools
|
6
|
+
*/
|
7
|
+
export type CreateAgentLlmExecutionToolsOptions = {
|
8
|
+
/**
|
9
|
+
* The underlying LLM execution tools to wrap
|
10
|
+
*/
|
11
|
+
llmTools: LlmExecutionTools;
|
12
|
+
/**
|
13
|
+
* The agent source string that defines the agent's behavior
|
14
|
+
*/
|
15
|
+
agentSource: string_book;
|
16
|
+
};
|
17
|
+
/**
|
18
|
+
* Creates new AgentLlmExecutionTools that wrap underlying LLM tools with agent-specific behavior
|
19
|
+
*
|
20
|
+
* @public exported from `@promptbook/core`
|
21
|
+
*/
|
22
|
+
export declare const createAgentLlmExecutionTools: ((options: CreateAgentLlmExecutionToolsOptions) => AgentLlmExecutionTools) & {
|
23
|
+
packageName: string;
|
24
|
+
className: string;
|
25
|
+
};
|
26
|
+
/**
|
27
|
+
* TODO: [ð§ ] Consider adding validation for agent source format
|
28
|
+
* TODO: [ð§ ] Consider adding options for caching behavior
|
29
|
+
*/
|
@@ -0,0 +1,8 @@
|
|
1
|
+
#!/usr/bin/env ts-node
|
2
|
+
export {};
|
3
|
+
/**
|
4
|
+
* TODO: [ð§ ] Add more complex agent scenarios
|
5
|
+
* TODO: [ð§ ] Add parameter substitution demo
|
6
|
+
* TODO: [ð§ ] Add multi-turn conversation demo
|
7
|
+
* Note: [âŦ] Code in this file should never be published in any package
|
8
|
+
*/
|
@@ -0,0 +1,11 @@
|
|
1
|
+
/**
|
2
|
+
* Metadata for Agent LLM execution tools
|
3
|
+
*
|
4
|
+
* @public exported from `@promptbook/core`
|
5
|
+
*/
|
6
|
+
export declare const _AgentMetadata: import("../../utils/$Register").Registration;
|
7
|
+
/**
|
8
|
+
* TODO: [ð§ ] Consider adding a special trust level for AgentLlmExecutionTools
|
9
|
+
* TODO: [ðķ] Naming "constructor" vs "creator" vs "factory"
|
10
|
+
* Note: [ð] Ignore a discrepancy between file name and entity name
|
11
|
+
*/
|
@@ -0,0 +1,13 @@
|
|
1
|
+
import type { Registration } from '../../utils/$Register';
|
2
|
+
/**
|
3
|
+
* Registration of Agent LLM provider
|
4
|
+
*
|
5
|
+
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
|
6
|
+
*
|
7
|
+
* @public exported from `@promptbook/core`
|
8
|
+
*/
|
9
|
+
export declare const _AgentRegistration: Registration;
|
10
|
+
/**
|
11
|
+
* TODO: [ðķ] Naming "constructor" vs "creator" vs "factory"
|
12
|
+
* Note: [ð] Ignore a discrepancy between file name and entity name
|
13
|
+
*/
|
@@ -1,11 +1,10 @@
|
|
1
1
|
import Anthropic from '@anthropic-ai/sdk';
|
2
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
2
3
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
3
4
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
5
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
6
|
import type { Prompt } from '../../types/Prompt';
|
6
|
-
import type { string_markdown } from '../../types/typeAliases';
|
7
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
8
|
-
import type { string_title } from '../../types/typeAliases';
|
7
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
9
8
|
import type { AnthropicClaudeExecutionToolsNonProxiedOptions } from './AnthropicClaudeExecutionToolsOptions';
|
10
9
|
/**
|
11
10
|
* Execution Tools for calling Anthropic Claude API.
|
@@ -28,11 +27,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
28
27
|
constructor(options?: AnthropicClaudeExecutionToolsNonProxiedOptions);
|
29
28
|
get title(): string_title & string_markdown_text;
|
30
29
|
get description(): string_markdown;
|
31
|
-
get profile():
|
32
|
-
name: string;
|
33
|
-
fullname: string;
|
34
|
-
color: string;
|
35
|
-
};
|
30
|
+
get profile(): ChatParticipant;
|
36
31
|
getClient(): Promise<Anthropic>;
|
37
32
|
/**
|
38
33
|
* Check the `options` passed to `constructor`
|
@@ -1,12 +1,10 @@
|
|
1
1
|
import { OpenAIClient } from '@azure/openai';
|
2
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
2
3
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
3
4
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
5
|
+
import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
|
6
6
|
import type { Prompt } from '../../types/Prompt';
|
7
|
-
import type { string_markdown } from '../../types/typeAliases';
|
8
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
9
|
-
import type { string_title } from '../../types/typeAliases';
|
7
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
10
8
|
import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';
|
11
9
|
/**
|
12
10
|
* Execution Tools for calling Azure OpenAI API.
|
@@ -31,6 +29,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
31
29
|
constructor(options: AzureOpenAiExecutionToolsOptions);
|
32
30
|
get title(): string_title & string_markdown_text;
|
33
31
|
get description(): string_markdown;
|
32
|
+
get profile(): ChatParticipant;
|
34
33
|
getClient(): Promise<OpenAIClient>;
|
35
34
|
/**
|
36
35
|
* Check the `options` passed to `constructor`
|
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
|
|
11
11
|
*/
|
12
12
|
export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
|
13
13
|
/**
|
14
|
+
* TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
|
14
15
|
* TODO: [ð] Unite object for expecting amount and format - use here also a format
|
15
16
|
*/
|
@@ -1,12 +1,10 @@
|
|
1
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
1
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
3
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
3
4
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
5
|
+
import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
|
6
6
|
import type { Prompt } from '../../types/Prompt';
|
7
|
-
import type { string_markdown } from '../../types/typeAliases';
|
8
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
9
|
-
import type { string_title } from '../../types/typeAliases';
|
7
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
10
8
|
/**
|
11
9
|
* Mocked execution Tools for just echoing the requests for testing purposes.
|
12
10
|
*
|
@@ -17,11 +15,7 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
|
|
17
15
|
constructor(options?: CommonToolsOptions);
|
18
16
|
get title(): string_title & string_markdown_text;
|
19
17
|
get description(): string_markdown;
|
20
|
-
get profile():
|
21
|
-
name: string;
|
22
|
-
fullname: string;
|
23
|
-
color: string;
|
24
|
-
};
|
18
|
+
get profile(): ChatParticipant;
|
25
19
|
/**
|
26
20
|
* Does nothing, just to implement the interface
|
27
21
|
*/
|
@@ -1,13 +1,10 @@
|
|
1
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
1
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
3
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
3
4
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
6
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
5
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
7
6
|
import type { Prompt } from '../../types/Prompt';
|
8
|
-
import type { string_markdown } from '../../types/typeAliases';
|
9
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
10
|
-
import type { string_title } from '../../types/typeAliases';
|
7
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
11
8
|
/**
|
12
9
|
* Mocked execution Tools for just faking expected responses for testing purposes
|
13
10
|
*
|
@@ -18,6 +15,7 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
|
|
18
15
|
constructor(options?: CommonToolsOptions);
|
19
16
|
get title(): string_title & string_markdown_text;
|
20
17
|
get description(): string_markdown;
|
18
|
+
get profile(): ChatParticipant;
|
21
19
|
/**
|
22
20
|
* Does nothing, just to implement the interface
|
23
21
|
*/
|
@@ -1,9 +1,8 @@
|
|
1
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
1
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
3
4
|
import type { Usage } from '../../execution/Usage';
|
4
|
-
import type { string_markdown } from '../../types/typeAliases';
|
5
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
6
|
-
import type { string_title } from '../../types/typeAliases';
|
5
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
7
6
|
import { computeOpenAiUsage } from '../openai/computeOpenAiUsage';
|
8
7
|
import { OpenAiCompatibleExecutionTools } from '../openai/OpenAiCompatibleExecutionTools';
|
9
8
|
import type { OllamaExecutionToolsOptions } from './OllamaExecutionToolsOptions';
|
@@ -16,6 +15,7 @@ export declare class OllamaExecutionTools extends OpenAiCompatibleExecutionTools
|
|
16
15
|
constructor(ollamaOptions: OllamaExecutionToolsOptions);
|
17
16
|
get title(): string_title & string_markdown_text;
|
18
17
|
get description(): string_markdown;
|
18
|
+
get profile(): ChatParticipant;
|
19
19
|
/**
|
20
20
|
* List all available models (non dynamically)
|
21
21
|
*
|
@@ -1,15 +1,10 @@
|
|
1
1
|
import OpenAI from 'openai';
|
2
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
6
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
7
5
|
import type { Usage } from '../../execution/Usage';
|
8
6
|
import type { Prompt } from '../../types/Prompt';
|
9
|
-
import type { string_markdown } from '../../types/typeAliases';
|
10
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
11
|
-
import type { string_model_name } from '../../types/typeAliases';
|
12
|
-
import type { string_title } from '../../types/typeAliases';
|
7
|
+
import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
|
13
8
|
import { computeOpenAiUsage } from './computeOpenAiUsage';
|
14
9
|
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
|
15
10
|
/**
|
@@ -27,6 +22,10 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
27
22
|
* Rate limiter instance
|
28
23
|
*/
|
29
24
|
private limiter;
|
25
|
+
/**
|
26
|
+
* Tracks models and parameters that have already been retried to prevent infinite loops
|
27
|
+
*/
|
28
|
+
private retriedUnsupportedParameters;
|
30
29
|
/**
|
31
30
|
* Creates OpenAI compatible Execution Tools.
|
32
31
|
*
|
@@ -48,10 +47,18 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
48
47
|
* Calls OpenAI compatible API to use a chat model.
|
49
48
|
*/
|
50
49
|
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
|
50
|
+
/**
|
51
|
+
* Internal method that handles parameter retry for chat model calls
|
52
|
+
*/
|
53
|
+
private callChatModelWithRetry;
|
51
54
|
/**
|
52
55
|
* Calls OpenAI API to use a complete model.
|
53
56
|
*/
|
54
57
|
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
58
|
+
/**
|
59
|
+
* Internal method that handles parameter retry for completion model calls
|
60
|
+
*/
|
61
|
+
private callCompletionModelWithRetry;
|
55
62
|
/**
|
56
63
|
* Calls OpenAI compatible API to use a embedding model
|
57
64
|
*/
|
@@ -85,7 +92,7 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
85
92
|
/**
|
86
93
|
* Makes a request with retry logic for network errors like ECONNRESET
|
87
94
|
*/
|
88
|
-
private
|
95
|
+
private makeRequestWithNetworkRetry;
|
89
96
|
/**
|
90
97
|
* Determines if an error is retryable (network-related errors)
|
91
98
|
*/
|
@@ -96,4 +103,5 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
96
103
|
* TODO: [ð] Maybe make custom `OpenAiCompatibleError`
|
97
104
|
* TODO: [ð§ ][ð] Maybe use `isDeterministic` from options
|
98
105
|
* TODO: [ð§ ][ð°] Allow to pass `title` for tracking purposes
|
106
|
+
* TODO: [ð§ ][ðĶĒ] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
|
99
107
|
*/
|
@@ -1,8 +1,7 @@
|
|
1
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
1
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
3
|
-
import type { string_markdown } from '../../types/typeAliases';
|
4
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
5
|
-
import type { string_title } from '../../types/typeAliases';
|
4
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
6
5
|
import { computeOpenAiUsage } from './computeOpenAiUsage';
|
7
6
|
import { OpenAiCompatibleExecutionTools } from './OpenAiCompatibleExecutionTools';
|
8
7
|
/**
|
@@ -13,11 +12,7 @@ import { OpenAiCompatibleExecutionTools } from './OpenAiCompatibleExecutionTools
|
|
13
12
|
export declare class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools implements LlmExecutionTools {
|
14
13
|
get title(): string_title & string_markdown_text;
|
15
14
|
get description(): string_markdown;
|
16
|
-
get profile():
|
17
|
-
name: string;
|
18
|
-
fullname: string;
|
19
|
-
color: string;
|
20
|
-
};
|
15
|
+
get profile(): ChatParticipant;
|
21
16
|
/**
|
22
17
|
* List all available models (non dynamically)
|
23
18
|
*
|
@@ -1,15 +1,10 @@
|
|
1
|
+
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
1
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
3
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
4
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
6
5
|
import type { RemoteClientOptions } from '../../remote-server/types/RemoteClientOptions';
|
7
|
-
import type { ChatPrompt } from '../../types/Prompt';
|
8
|
-
import type {
|
9
|
-
import type { EmbeddingPrompt } from '../../types/Prompt';
|
10
|
-
import type { string_markdown } from '../../types/typeAliases';
|
11
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
12
|
-
import type { string_title } from '../../types/typeAliases';
|
6
|
+
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt } from '../../types/Prompt';
|
7
|
+
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
13
8
|
/**
|
14
9
|
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
15
10
|
*
|
@@ -24,11 +19,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
|
|
24
19
|
constructor(options: RemoteClientOptions<TCustomOptions>);
|
25
20
|
get title(): string_title & string_markdown_text;
|
26
21
|
get description(): string_markdown;
|
27
|
-
get profile():
|
28
|
-
name: string;
|
29
|
-
fullname: string;
|
30
|
-
color: string;
|
31
|
-
};
|
22
|
+
get profile(): ChatParticipant;
|
32
23
|
/**
|
33
24
|
* Check the configuration of all execution tools
|
34
25
|
*/
|
@@ -10,6 +10,7 @@ import type { string_persona_description } from '../types/typeAliases';
|
|
10
10
|
*/
|
11
11
|
export declare function preparePersona(personaDescription: string_persona_description, tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions): Promise<Pick<PersonaPreparedJson, 'modelsRequirements'>>;
|
12
12
|
/**
|
13
|
+
* TODO: [ðĐ] DRY `preparePersona` and `selectBestModelFromAvailable`
|
13
14
|
* TODO: [ð][main] If the persona was prepared with different version or different set of models, prepare it once again
|
14
15
|
* TODO: [ðĒ] Check validity of `modelName` in pipeline
|
15
16
|
* TODO: [ðĒ] Check validity of `systemMessage` in pipeline
|