@promptbook/node 0.101.0-12 → 0.101.0-14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -121,6 +121,10 @@ import { countUsage } from '../llm-providers/_common/utils/count-total-usage/cou
121
121
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
122
122
  import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
123
123
  import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
124
+ import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
125
+ import { createAgentLlmExecutionTools } from '../llm-providers/agent/createAgentLlmExecutionTools';
126
+ import { _AgentMetadata } from '../llm-providers/agent/register-configuration';
127
+ import { _AgentRegistration } from '../llm-providers/agent/register-constructor';
124
128
  import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
125
129
  import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
126
130
  import { _DeepseekMetadataRegistration } from '../llm-providers/deepseek/register-configuration';
@@ -287,6 +291,10 @@ export { countUsage };
287
291
  export { limitTotalUsage };
288
292
  export { joinLlmExecutionTools };
289
293
  export { MultipleLlmExecutionTools };
294
+ export { AgentLlmExecutionTools };
295
+ export { createAgentLlmExecutionTools };
296
+ export { _AgentMetadata };
297
+ export { _AgentRegistration };
290
298
  export { _AnthropicClaudeMetadataRegistration };
291
299
  export { _AzureOpenAiMetadataRegistration };
292
300
  export { _DeepseekMetadataRegistration };
@@ -93,6 +93,7 @@ import type { LlmToolsOptions } from '../llm-providers/_common/register/LlmTools
93
93
  import type { CacheItem } from '../llm-providers/_common/utils/cache/CacheItem';
94
94
  import type { CacheLlmToolsOptions } from '../llm-providers/_common/utils/cache/CacheLlmToolsOptions';
95
95
  import type { LlmExecutionToolsWithTotalUsage } from '../llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
96
+ import type { CreateAgentLlmExecutionToolsOptions } from '../llm-providers/agent/createAgentLlmExecutionTools';
96
97
  import type { AnthropicClaudeExecutionToolsOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
97
98
  import type { AnthropicClaudeExecutionToolsNonProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
98
99
  import type { AnthropicClaudeExecutionToolsProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
@@ -418,6 +419,7 @@ export type { LlmToolsOptions };
418
419
  export type { CacheItem };
419
420
  export type { CacheLlmToolsOptions };
420
421
  export type { LlmExecutionToolsWithTotalUsage };
422
+ export type { CreateAgentLlmExecutionToolsOptions };
421
423
  export type { AnthropicClaudeExecutionToolsOptions };
422
424
  export type { AnthropicClaudeExecutionToolsNonProxiedOptions };
423
425
  export type { AnthropicClaudeExecutionToolsProxiedOptions };
@@ -1,3 +1,4 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
1
2
  import type { string_model_name } from '../../types/typeAliases';
2
3
  import type { AgentModelRequirements } from './AgentModelRequirements';
3
4
  import type { string_book } from './string_book';
@@ -11,7 +12,7 @@ import type { string_book } from './string_book';
11
12
  *
12
13
  * @public exported from `@promptbook/core`
13
14
  */
14
- export declare function createAgentModelRequirements(agentSource: string_book, modelName?: string_model_name): Promise<AgentModelRequirements>;
15
+ export declare function createAgentModelRequirements(agentSource: string_book, modelName?: string_model_name, availableModels?: readonly AvailableModel[]): Promise<AgentModelRequirements>;
15
16
  /**
16
17
  * Clears the cache for createAgentModelRequirements
17
18
  * Useful when agent sources are updated and cached results should be invalidated
@@ -60,3 +61,6 @@ export declare function extractAgentName(agentSource: string_book): string;
60
61
  * @private
61
62
  */
62
63
  export declare function extractAgentProfileImage(agentSource: string_book): string;
64
+ /**
65
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
66
+ */
@@ -0,0 +1,54 @@
1
+ import type { Promisable } from 'type-fest';
2
+ import type { string_book } from '../../book-2.0/agent-source/string_book';
3
+ import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
4
+ import type { AvailableModel } from '../../execution/AvailableModel';
5
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
6
+ import type { ChatPromptResult } from '../../execution/PromptResult';
7
+ import type { Prompt } from '../../types/Prompt';
8
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
9
+ /**
10
+ * Execution Tools for calling LLM models with a predefined agent "soul"
11
+ * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
12
+ *
13
+ * @public exported from `@promptbook/core`
14
+ */
15
+ export declare class AgentLlmExecutionTools implements LlmExecutionTools {
16
+ private readonly llmTools;
17
+ private readonly agentSource;
18
+ /**
19
+ * Cached model requirements to avoid re-parsing the agent source
20
+ */
21
+ private _cachedModelRequirements;
22
+ /**
23
+ * Cached parsed agent information
24
+ */
25
+ private _cachedAgentInfo;
26
+ /**
27
+ * Creates new AgentLlmExecutionTools
28
+ *
29
+ * @param llmTools The underlying LLM execution tools to wrap
30
+ * @param agentSource The agent source string that defines the agent's behavior
31
+ */
32
+ constructor(llmTools: LlmExecutionTools, agentSource: string_book);
33
+ /**
34
+ * Get cached or parse agent information
35
+ */
36
+ private getAgentInfo;
37
+ /**
38
+ * Get cached or create agent model requirements
39
+ */
40
+ private getAgentModelRequirements;
41
+ get title(): string_title & string_markdown_text;
42
+ get description(): string_markdown;
43
+ get profile(): ChatParticipant | undefined;
44
+ checkConfiguration(): Promisable<void>;
45
+ listModels(): Promisable<ReadonlyArray<AvailableModel>>;
46
+ /**
47
+ * Calls the chat model with agent-specific system prompt and requirements
48
+ */
49
+ callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
50
+ }
51
+ /**
52
+ * TODO: [🍚] Implement Destroyable pattern to free resources
53
+ * TODO: !!!! adding parameter substitution support
54
+ */
@@ -0,0 +1,29 @@
1
+ import type { string_book } from '../../book-2.0/agent-source/string_book';
2
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
+ import { AgentLlmExecutionTools } from './AgentLlmExecutionTools';
4
+ /**
5
+ * Options for creating AgentLlmExecutionTools
6
+ */
7
+ export type CreateAgentLlmExecutionToolsOptions = {
8
+ /**
9
+ * The underlying LLM execution tools to wrap
10
+ */
11
+ llmTools: LlmExecutionTools;
12
+ /**
13
+ * The agent source string that defines the agent's behavior
14
+ */
15
+ agentSource: string_book;
16
+ };
17
+ /**
18
+ * Creates new AgentLlmExecutionTools that wrap underlying LLM tools with agent-specific behavior
19
+ *
20
+ * @public exported from `@promptbook/core`
21
+ */
22
+ export declare const createAgentLlmExecutionTools: ((options: CreateAgentLlmExecutionToolsOptions) => AgentLlmExecutionTools) & {
23
+ packageName: string;
24
+ className: string;
25
+ };
26
+ /**
27
+ * TODO: [🧠] Consider adding validation for agent source format
28
+ * TODO: [🧠] Consider adding options for caching behavior
29
+ */
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
3
+ /**
4
+ * TODO: [🧠] Add more complex agent scenarios
5
+ * TODO: [🧠] Add parameter substitution demo
6
+ * TODO: [🧠] Add multi-turn conversation demo
7
+ * Note: [⚫] Code in this file should never be published in any package
8
+ */
@@ -0,0 +1,11 @@
1
+ /**
2
+ * Metadata for Agent LLM execution tools
3
+ *
4
+ * @public exported from `@promptbook/core`
5
+ */
6
+ export declare const _AgentMetadata: import("../../utils/$Register").Registration;
7
+ /**
8
+ * TODO: [🧠] Consider adding a special trust level for AgentLlmExecutionTools
9
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
10
+ * Note: [💞] Ignore a discrepancy between file name and entity name
11
+ */
@@ -0,0 +1,13 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of Agent LLM provider
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
6
+ *
7
+ * @public exported from `@promptbook/core`
8
+ */
9
+ export declare const _AgentRegistration: Registration;
10
+ /**
11
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
12
+ * Note: [💞] Ignore a discrepancy between file name and entity name
13
+ */
@@ -10,6 +10,7 @@ import type { string_persona_description } from '../types/typeAliases';
10
10
  */
11
11
  export declare function preparePersona(personaDescription: string_persona_description, tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions): Promise<Pick<PersonaPreparedJson, 'modelsRequirements'>>;
12
12
  /**
13
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
13
14
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
14
15
  * TODO: [🏢] Check validity of `modelName` in pipeline
15
16
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-11`).
18
+ * It follows semantic versioning (e.g., `0.101.0-13`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.101.0-12",
3
+ "version": "0.101.0-14",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -93,7 +93,7 @@
93
93
  "module": "./esm/index.es.js",
94
94
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
95
95
  "peerDependencies": {
96
- "@promptbook/core": "0.101.0-12"
96
+ "@promptbook/core": "0.101.0-14"
97
97
  },
98
98
  "dependencies": {
99
99
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -45,7 +45,7 @@
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-12';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-14';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -5369,6 +5369,7 @@
5369
5369
  };
5370
5370
  }
5371
5371
  /**
5372
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
5372
5373
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
5373
5374
  * TODO: [🏢] Check validity of `modelName` in pipeline
5374
5375
  * TODO: [🏢] Check validity of `systemMessage` in pipeline