@promptbook/components 0.104.0-11 → 0.104.0-13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +91 -29
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +5 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.tools.test.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/save/_common/string_chat_format_name.d.ts +1 -1
- package/esm/typings/src/commands/_common/types/Command.d.ts +1 -1
- package/esm/typings/src/commitments/META/META_DESCRIPTION.d.ts +41 -0
- package/esm/typings/src/commitments/USE_SEARCH_ENGINE/USE_SEARCH_ENGINE.d.ts +2 -2
- package/esm/typings/src/commitments/_base/BookCommitment.d.ts +1 -1
- package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +1 -1
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +8 -0
- package/esm/typings/src/search-engines/SearchResult.d.ts +4 -4
- package/esm/typings/src/search-engines/bing/BingSearchEngine.d.ts +1 -1
- package/esm/typings/src/search-engines/dummy/DummySearchEngine.d.ts +1 -1
- package/esm/typings/src/types/LlmToolDefinition.d.ts +20 -0
- package/esm/typings/src/types/ModelRequirements.d.ts +13 -0
- package/esm/typings/src/utils/random/$randomItem.d.ts +1 -1
- package/esm/typings/src/utils/random/$randomSeed.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +91 -29
- package/umd/index.umd.js.map +1 -1
|
@@ -188,6 +188,7 @@ import type { BookTranspiler } from '../transpilers/_common/BookTranspiler';
|
|
|
188
188
|
import type { BookTranspilerOptions } from '../transpilers/_common/BookTranspilerOptions';
|
|
189
189
|
import type { IntermediateFilesStrategy } from '../types/IntermediateFilesStrategy';
|
|
190
190
|
import type { LlmCall } from '../types/LlmCall';
|
|
191
|
+
import type { LlmToolDefinition } from '../types/LlmToolDefinition';
|
|
191
192
|
import type { Message } from '../types/Message';
|
|
192
193
|
import type { ModelRequirements } from '../types/ModelRequirements';
|
|
193
194
|
import type { CompletionModelRequirements } from '../types/ModelRequirements';
|
|
@@ -559,6 +560,7 @@ export type { BookTranspiler };
|
|
|
559
560
|
export type { BookTranspilerOptions };
|
|
560
561
|
export type { IntermediateFilesStrategy };
|
|
561
562
|
export type { LlmCall };
|
|
563
|
+
export type { LlmToolDefinition };
|
|
562
564
|
export type { Message };
|
|
563
565
|
export type { ModelRequirements };
|
|
564
566
|
export type { CompletionModelRequirements };
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import type { LlmToolDefinition } from '../../types/LlmToolDefinition';
|
|
1
2
|
import type { string_knowledge_source_link } from '../../types/typeAliases';
|
|
2
3
|
import type { TODO_any } from '../../utils/organization/TODO_any';
|
|
3
4
|
/**
|
|
@@ -45,6 +46,10 @@ export type AgentModelRequirements = {
|
|
|
45
46
|
* Top-k sampling value for the agent's responses
|
|
46
47
|
*/
|
|
47
48
|
readonly topK?: number;
|
|
49
|
+
/**
|
|
50
|
+
* Tools available for the agent
|
|
51
|
+
*/
|
|
52
|
+
readonly tools?: ReadonlyArray<LlmToolDefinition>;
|
|
48
53
|
/**
|
|
49
54
|
* Arbitrary metadata storage for commitments
|
|
50
55
|
* Each commitment can store its own data here
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -3,4 +3,4 @@ import { CHAT_SAVE_FORMATS } from '../index';
|
|
|
3
3
|
* Supported chat export formatNames
|
|
4
4
|
* @public exported from `@promptbook/components`
|
|
5
5
|
*/
|
|
6
|
-
export type string_chat_format_name = typeof CHAT_SAVE_FORMATS[number]['formatName'];
|
|
6
|
+
export type string_chat_format_name = (typeof CHAT_SAVE_FORMATS)[number]['formatName'];
|
|
@@ -3,4 +3,4 @@ import { COMMANDS } from '../../index';
|
|
|
3
3
|
* Command is one piece of the book file section which adds some logic to the task or the whole pipeline.
|
|
4
4
|
* It is parsed from the markdown from ul/ol items - one command per one item.
|
|
5
5
|
*/
|
|
6
|
-
export type Command = ReturnType<typeof COMMANDS[number]['parse']>;
|
|
6
|
+
export type Command = ReturnType<(typeof COMMANDS)[number]['parse']>;
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import type { AgentModelRequirements } from '../../book-2.0/agent-source/AgentModelRequirements';
|
|
2
|
+
import { BaseCommitmentDefinition } from '../_base/BaseCommitmentDefinition';
|
|
3
|
+
/**
|
|
4
|
+
* META DESCRIPTION commitment definition
|
|
5
|
+
*
|
|
6
|
+
* The META DESCRIPTION commitment sets the agent's meta description for the profile page.
|
|
7
|
+
* This commitment is special because it doesn't affect the system message,
|
|
8
|
+
* but is handled separately in the parsing logic.
|
|
9
|
+
*
|
|
10
|
+
* Example usage in agent source:
|
|
11
|
+
*
|
|
12
|
+
* ```book
|
|
13
|
+
* META DESCRIPTION An AI assistant specialized in business tasks
|
|
14
|
+
* ```
|
|
15
|
+
*
|
|
16
|
+
* @private [🪔] Maybe export the commitments through some package
|
|
17
|
+
*/
|
|
18
|
+
export declare class MetaDescriptionCommitmentDefinition extends BaseCommitmentDefinition<'META DESCRIPTION'> {
|
|
19
|
+
constructor();
|
|
20
|
+
/**
|
|
21
|
+
* Short one-line description of META DESCRIPTION.
|
|
22
|
+
*/
|
|
23
|
+
get description(): string;
|
|
24
|
+
/**
|
|
25
|
+
* Icon for this commitment.
|
|
26
|
+
*/
|
|
27
|
+
get icon(): string;
|
|
28
|
+
/**
|
|
29
|
+
* Markdown documentation for META DESCRIPTION commitment.
|
|
30
|
+
*/
|
|
31
|
+
get documentation(): string;
|
|
32
|
+
applyToAgentModelRequirements(requirements: AgentModelRequirements, content: string): AgentModelRequirements;
|
|
33
|
+
/**
|
|
34
|
+
* Extracts the meta description from the content
|
|
35
|
+
* This is used by the parsing logic
|
|
36
|
+
*/
|
|
37
|
+
extractMetaDescription(content: string): string | null;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
41
|
+
*/
|
|
@@ -6,13 +6,13 @@ import { BaseCommitmentDefinition } from '../_base/BaseCommitmentDefinition';
|
|
|
6
6
|
* The `USE SEARCH ENGINE` commitment indicates that the agent should utilize a search engine tool
|
|
7
7
|
* to access and retrieve up-to-date information from the internet when necessary.
|
|
8
8
|
*
|
|
9
|
-
* The content following `USE SEARCH ENGINE` is
|
|
9
|
+
* The content following `USE SEARCH ENGINE` is an arbitrary text that the agent should know (e.g. search scope or instructions).
|
|
10
10
|
*
|
|
11
11
|
* Example usage in agent source:
|
|
12
12
|
*
|
|
13
13
|
* ```book
|
|
14
14
|
* USE SEARCH ENGINE
|
|
15
|
-
* USE SEARCH ENGINE
|
|
15
|
+
* USE SEARCH ENGINE Hledej informace o Přemyslovcích
|
|
16
16
|
* ```
|
|
17
17
|
*
|
|
18
18
|
* @private [🪔] Maybe export the commitments through some package
|
|
@@ -7,4 +7,4 @@ import { FORMFACTOR_DEFINITIONS } from '../index';
|
|
|
7
7
|
* Note: [🚉] This is fully serializable as JSON
|
|
8
8
|
* @see https://github.com/webgptorg/promptbook/discussions/172
|
|
9
9
|
*/
|
|
10
|
-
export type FormfactorDefinition = typeof FORMFACTOR_DEFINITIONS[number];
|
|
10
|
+
export type FormfactorDefinition = (typeof FORMFACTOR_DEFINITIONS)[number];
|
|
@@ -21,7 +21,7 @@ export declare function $provideLlmToolsForTestingAndScriptsAndPlayground(option
|
|
|
21
21
|
export {};
|
|
22
22
|
/**
|
|
23
23
|
* Note: [⚪] This should never be in any released package
|
|
24
|
-
* TODO: [👷♂️]
|
|
24
|
+
* TODO: [👷♂️] Write a comprehensive manual about the construction of LLM tools
|
|
25
25
|
* TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
|
|
26
26
|
* TODO: [®] DRY Register logi
|
|
27
27
|
*/
|
|
@@ -51,7 +51,7 @@ export declare function $provideLlmToolsForWizardOrCli(options?: ProvideLlmTools
|
|
|
51
51
|
export {};
|
|
52
52
|
/**
|
|
53
53
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
54
|
-
* TODO: [👷♂️]
|
|
54
|
+
* TODO: [👷♂️] Write a comprehensive manual about the construction of LLM tools
|
|
55
55
|
* TODO: [🥃] Allow `ptbk make` without llm tools
|
|
56
56
|
* TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
|
|
57
57
|
* TODO: [®] DRY Register logic
|
|
@@ -17,6 +17,6 @@ export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
|
|
|
17
17
|
spending(): Observable<Usage>;
|
|
18
18
|
};
|
|
19
19
|
/**
|
|
20
|
-
* TODO: [👷♂️]
|
|
20
|
+
* TODO: [👷♂️] Write a comprehensive manual about the construction of LLM tools
|
|
21
21
|
* Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
|
|
22
22
|
*/
|
|
@@ -32,5 +32,5 @@ export {};
|
|
|
32
32
|
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
|
|
33
33
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
34
34
|
* TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
|
|
35
|
-
* TODO: [👷♂️]
|
|
35
|
+
* TODO: [👷♂️] Write a comprehensive manual about the construction of LLM tools
|
|
36
36
|
*/
|
|
@@ -8,5 +8,5 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
|
8
8
|
export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
|
|
9
9
|
/**
|
|
10
10
|
* TODO: [🙆] `getSingleLlmExecutionTools` vs `joinLlmExecutionTools` - explain difference or pick one
|
|
11
|
-
* TODO: [👷♂️]
|
|
11
|
+
* TODO: [👷♂️] Write a comprehensive manual about how to construct and use LLM execution tools in Promptbook
|
|
12
12
|
*/
|
|
@@ -19,5 +19,5 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
|
19
19
|
export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
|
|
20
20
|
/**
|
|
21
21
|
* TODO: [🙆] `getSingleLlmExecutionTools` vs `joinLlmExecutionTools` - explain difference or pick one
|
|
22
|
-
* TODO: [👷♂️]
|
|
22
|
+
* TODO: [👷♂️] Write a comprehensive manual about how to construct and use LLM execution tools in Promptbook
|
|
23
23
|
*/
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type OpenAI from 'openai';
|
|
2
|
+
import type { LlmToolDefinition } from '../../../types/LlmToolDefinition';
|
|
3
|
+
/**
|
|
4
|
+
* Maps Promptbook tools to OpenAI tools.
|
|
5
|
+
*
|
|
6
|
+
* @private
|
|
7
|
+
*/
|
|
8
|
+
export declare function mapToolsToOpenAi(tools: ReadonlyArray<LlmToolDefinition>): Array<OpenAI.Chat.Completions.ChatCompletionTool>;
|
|
@@ -1,18 +1,18 @@
|
|
|
1
1
|
import type { string_url } from '../types/typeAliases';
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
3
|
+
* Represents a search result from a search engine.
|
|
4
4
|
*/
|
|
5
5
|
export type SearchResult = {
|
|
6
6
|
/**
|
|
7
|
-
*
|
|
7
|
+
* The title of the search result.
|
|
8
8
|
*/
|
|
9
9
|
title: string;
|
|
10
10
|
/**
|
|
11
|
-
*
|
|
11
|
+
* The URL of the search result.
|
|
12
12
|
*/
|
|
13
13
|
url: string_url;
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* A short snippet or description of the search result.
|
|
16
16
|
*/
|
|
17
17
|
snippet: string;
|
|
18
18
|
};
|
|
@@ -3,7 +3,7 @@ import type { string_markdown, string_markdown_text, string_title } from '../../
|
|
|
3
3
|
import type { SearchEngine } from '../SearchEngine';
|
|
4
4
|
import type { SearchResult } from '../SearchResult';
|
|
5
5
|
/**
|
|
6
|
-
*
|
|
6
|
+
* A search engine implementation that uses the Bing Web Search API.
|
|
7
7
|
*
|
|
8
8
|
* @private <- TODO: !!!! Export via some package
|
|
9
9
|
*/
|
|
@@ -3,7 +3,7 @@ import type { string_markdown, string_markdown_text, string_title } from '../../
|
|
|
3
3
|
import type { SearchEngine } from '../SearchEngine';
|
|
4
4
|
import type { SearchResult } from '../SearchResult';
|
|
5
5
|
/**
|
|
6
|
-
*
|
|
6
|
+
* A dummy implementation of SearchEngine for testing purposes.
|
|
7
7
|
*
|
|
8
8
|
* @private <- TODO: !!!! Export via some package, maybe `@promptbook/search-engines` or `@promptbook/fake-llm`
|
|
9
9
|
*/
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import type { string_markdown_text, string_name } from './typeAliases';
|
|
2
|
+
/**
|
|
3
|
+
* Definition of a tool that can be used by the model
|
|
4
|
+
*
|
|
5
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
6
|
+
*/
|
|
7
|
+
export type LlmToolDefinition = {
|
|
8
|
+
/**
|
|
9
|
+
* Name of the tool
|
|
10
|
+
*/
|
|
11
|
+
readonly name: string_name;
|
|
12
|
+
/**
|
|
13
|
+
* Description of the tool
|
|
14
|
+
*/
|
|
15
|
+
readonly description: string_markdown_text;
|
|
16
|
+
/**
|
|
17
|
+
* Parameters of the tool in JSON Schema format
|
|
18
|
+
*/
|
|
19
|
+
readonly parameters: Record<string, unknown>;
|
|
20
|
+
};
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import type { LlmToolDefinition } from './LlmToolDefinition';
|
|
1
2
|
import type { ModelVariant } from './ModelVariant';
|
|
2
3
|
import type { number_model_temperature, number_seed, string_model_name, string_system_message } from './typeAliases';
|
|
3
4
|
/**
|
|
@@ -28,6 +29,12 @@ export type CompletionModelRequirements = CommonModelRequirements & {
|
|
|
28
29
|
* Maximum number of tokens that can be generated by the model
|
|
29
30
|
*/
|
|
30
31
|
readonly maxTokens?: number;
|
|
32
|
+
/**
|
|
33
|
+
* Tools available for the model
|
|
34
|
+
*
|
|
35
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
36
|
+
*/
|
|
37
|
+
readonly tools?: LlmToolDefinition[];
|
|
31
38
|
};
|
|
32
39
|
/**
|
|
33
40
|
* Model requirements for the chat variant
|
|
@@ -53,6 +60,12 @@ export type ChatModelRequirements = CommonModelRequirements & {
|
|
|
53
60
|
* Maximum number of tokens that can be generated by the model
|
|
54
61
|
*/
|
|
55
62
|
readonly maxTokens?: number;
|
|
63
|
+
/**
|
|
64
|
+
* Tools available for the model
|
|
65
|
+
*
|
|
66
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
67
|
+
*/
|
|
68
|
+
readonly tools?: LlmToolDefinition[];
|
|
56
69
|
};
|
|
57
70
|
/**
|
|
58
71
|
* Model requirements for the image generation variant
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.104.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.104.0-12`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
package/umd/index.umd.js
CHANGED
|
@@ -30,7 +30,7 @@
|
|
|
30
30
|
* @generated
|
|
31
31
|
* @see https://github.com/webgptorg/promptbook
|
|
32
32
|
*/
|
|
33
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
33
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-13';
|
|
34
34
|
/**
|
|
35
35
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
36
36
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -4177,9 +4177,7 @@
|
|
|
4177
4177
|
// Get existing dictionary entries from metadata
|
|
4178
4178
|
const existingDictionary = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
|
|
4179
4179
|
// Merge the new dictionary entry with existing entries
|
|
4180
|
-
const mergedDictionary = existingDictionary
|
|
4181
|
-
? `${existingDictionary}\n${trimmedContent}`
|
|
4182
|
-
: trimmedContent;
|
|
4180
|
+
const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
|
|
4183
4181
|
// Store the merged dictionary in metadata for debugging and inspection
|
|
4184
4182
|
const updatedMetadata = {
|
|
4185
4183
|
...requirements.metadata,
|
|
@@ -6708,19 +6706,37 @@
|
|
|
6708
6706
|
`);
|
|
6709
6707
|
}
|
|
6710
6708
|
applyToAgentModelRequirements(requirements, content) {
|
|
6711
|
-
// We simply mark that browser capability is enabled in metadata
|
|
6712
|
-
// Get existing metadata
|
|
6713
|
-
const existingMetadata = requirements.metadata || {};
|
|
6714
6709
|
// Get existing tools array or create new one
|
|
6715
|
-
const existingTools =
|
|
6716
|
-
// Add '
|
|
6717
|
-
const updatedTools = existingTools.
|
|
6718
|
-
|
|
6710
|
+
const existingTools = requirements.tools || [];
|
|
6711
|
+
// Add 'web_browser' to tools if not already present
|
|
6712
|
+
const updatedTools = existingTools.some((tool) => tool.name === 'web_browser')
|
|
6713
|
+
? existingTools
|
|
6714
|
+
: [
|
|
6715
|
+
...existingTools,
|
|
6716
|
+
{
|
|
6717
|
+
name: 'web_browser',
|
|
6718
|
+
description: spaceTrim$1.spaceTrim(`
|
|
6719
|
+
A tool that can browse the web.
|
|
6720
|
+
Use this tool when you need to access specific websites or browse the internet.
|
|
6721
|
+
`),
|
|
6722
|
+
parameters: {
|
|
6723
|
+
type: 'object',
|
|
6724
|
+
properties: {
|
|
6725
|
+
url: {
|
|
6726
|
+
type: 'string',
|
|
6727
|
+
description: 'The URL to browse',
|
|
6728
|
+
},
|
|
6729
|
+
},
|
|
6730
|
+
required: ['url'],
|
|
6731
|
+
},
|
|
6732
|
+
},
|
|
6733
|
+
];
|
|
6734
|
+
// Return requirements with updated tools and metadata
|
|
6719
6735
|
return {
|
|
6720
6736
|
...requirements,
|
|
6737
|
+
tools: updatedTools,
|
|
6721
6738
|
metadata: {
|
|
6722
|
-
...
|
|
6723
|
-
tools: updatedTools,
|
|
6739
|
+
...requirements.metadata,
|
|
6724
6740
|
useBrowser: true,
|
|
6725
6741
|
},
|
|
6726
6742
|
};
|
|
@@ -6813,13 +6829,13 @@
|
|
|
6813
6829
|
* The `USE SEARCH ENGINE` commitment indicates that the agent should utilize a search engine tool
|
|
6814
6830
|
* to access and retrieve up-to-date information from the internet when necessary.
|
|
6815
6831
|
*
|
|
6816
|
-
* The content following `USE SEARCH ENGINE` is
|
|
6832
|
+
* The content following `USE SEARCH ENGINE` is an arbitrary text that the agent should know (e.g. search scope or instructions).
|
|
6817
6833
|
*
|
|
6818
6834
|
* Example usage in agent source:
|
|
6819
6835
|
*
|
|
6820
6836
|
* ```book
|
|
6821
6837
|
* USE SEARCH ENGINE
|
|
6822
|
-
* USE SEARCH ENGINE
|
|
6838
|
+
* USE SEARCH ENGINE Hledej informace o Přemyslovcích
|
|
6823
6839
|
* ```
|
|
6824
6840
|
*
|
|
6825
6841
|
* @private [🪔] Maybe export the commitments through some package
|
|
@@ -6851,7 +6867,7 @@
|
|
|
6851
6867
|
|
|
6852
6868
|
## Key aspects
|
|
6853
6869
|
|
|
6854
|
-
- The content following \`USE SEARCH ENGINE\` is
|
|
6870
|
+
- The content following \`USE SEARCH ENGINE\` is an arbitrary text that the agent should know (e.g. search scope or instructions).
|
|
6855
6871
|
- The actual search engine tool usage is handled by the agent runtime
|
|
6856
6872
|
- Allows the agent to search for current information from the web
|
|
6857
6873
|
- Useful for research tasks, finding facts, and accessing dynamic content
|
|
@@ -6876,20 +6892,39 @@
|
|
|
6876
6892
|
`);
|
|
6877
6893
|
}
|
|
6878
6894
|
applyToAgentModelRequirements(requirements, content) {
|
|
6879
|
-
// We simply mark that search engine capability is enabled in metadata
|
|
6880
|
-
// Get existing metadata
|
|
6881
|
-
const existingMetadata = requirements.metadata || {};
|
|
6882
6895
|
// Get existing tools array or create new one
|
|
6883
|
-
const existingTools =
|
|
6884
|
-
// Add '
|
|
6885
|
-
const updatedTools = existingTools.
|
|
6886
|
-
|
|
6896
|
+
const existingTools = requirements.tools || [];
|
|
6897
|
+
// Add 'web_search' to tools if not already present
|
|
6898
|
+
const updatedTools = existingTools.some((tool) => tool.name === 'web_search')
|
|
6899
|
+
? existingTools
|
|
6900
|
+
: [
|
|
6901
|
+
...existingTools,
|
|
6902
|
+
{
|
|
6903
|
+
name: 'web_search',
|
|
6904
|
+
description: spaceTrim$1.spaceTrim(`
|
|
6905
|
+
Search the internet for information.
|
|
6906
|
+
Use this tool when you need to find up-to-date information or facts that you don't know.
|
|
6907
|
+
${!content ? '' : `Search scope / instructions: ${content}`}
|
|
6908
|
+
`),
|
|
6909
|
+
parameters: {
|
|
6910
|
+
type: 'object',
|
|
6911
|
+
properties: {
|
|
6912
|
+
query: {
|
|
6913
|
+
type: 'string',
|
|
6914
|
+
description: 'The search query',
|
|
6915
|
+
},
|
|
6916
|
+
},
|
|
6917
|
+
required: ['query'],
|
|
6918
|
+
},
|
|
6919
|
+
},
|
|
6920
|
+
];
|
|
6921
|
+
// Return requirements with updated tools and metadata
|
|
6887
6922
|
return {
|
|
6888
6923
|
...requirements,
|
|
6924
|
+
tools: updatedTools,
|
|
6889
6925
|
metadata: {
|
|
6890
|
-
...
|
|
6891
|
-
|
|
6892
|
-
useSearchEngine: true,
|
|
6926
|
+
...requirements.metadata,
|
|
6927
|
+
useSearchEngine: content || true,
|
|
6893
6928
|
},
|
|
6894
6929
|
};
|
|
6895
6930
|
}
|
|
@@ -7373,6 +7408,10 @@
|
|
|
7373
7408
|
meta.image = spaceTrim__default["default"](commitment.content);
|
|
7374
7409
|
continue;
|
|
7375
7410
|
}
|
|
7411
|
+
if (commitment.type === 'META DESCRIPTION') {
|
|
7412
|
+
meta.description = spaceTrim__default["default"](commitment.content);
|
|
7413
|
+
continue;
|
|
7414
|
+
}
|
|
7376
7415
|
if (commitment.type === 'META COLOR') {
|
|
7377
7416
|
meta.color = normalizeSeparator(commitment.content);
|
|
7378
7417
|
continue;
|
|
@@ -11433,7 +11472,7 @@
|
|
|
11433
11472
|
}
|
|
11434
11473
|
/**
|
|
11435
11474
|
* TODO: [🙆] `getSingleLlmExecutionTools` vs `joinLlmExecutionTools` - explain difference or pick one
|
|
11436
|
-
* TODO: [👷♂️]
|
|
11475
|
+
* TODO: [👷♂️] Write a comprehensive manual about how to construct and use LLM execution tools in Promptbook
|
|
11437
11476
|
*/
|
|
11438
11477
|
|
|
11439
11478
|
/**
|
|
@@ -11450,7 +11489,7 @@
|
|
|
11450
11489
|
}
|
|
11451
11490
|
/**
|
|
11452
11491
|
* TODO: [🙆] `getSingleLlmExecutionTools` vs `joinLlmExecutionTools` - explain difference or pick one
|
|
11453
|
-
* TODO: [👷♂️]
|
|
11492
|
+
* TODO: [👷♂️] Write a comprehensive manual about how to construct and use LLM execution tools in Promptbook
|
|
11454
11493
|
*/
|
|
11455
11494
|
|
|
11456
11495
|
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
@@ -16220,6 +16259,22 @@
|
|
|
16220
16259
|
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
16221
16260
|
*/
|
|
16222
16261
|
|
|
16262
|
+
/**
|
|
16263
|
+
* Maps Promptbook tools to OpenAI tools.
|
|
16264
|
+
*
|
|
16265
|
+
* @private
|
|
16266
|
+
*/
|
|
16267
|
+
function mapToolsToOpenAi(tools) {
|
|
16268
|
+
return tools.map((tool) => ({
|
|
16269
|
+
type: 'function',
|
|
16270
|
+
function: {
|
|
16271
|
+
name: tool.name,
|
|
16272
|
+
description: tool.description,
|
|
16273
|
+
parameters: tool.parameters,
|
|
16274
|
+
},
|
|
16275
|
+
}));
|
|
16276
|
+
}
|
|
16277
|
+
|
|
16223
16278
|
/**
|
|
16224
16279
|
* Parses an OpenAI error message to identify which parameter is unsupported
|
|
16225
16280
|
*
|
|
@@ -16417,6 +16472,9 @@
|
|
|
16417
16472
|
},
|
|
16418
16473
|
],
|
|
16419
16474
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
|
16475
|
+
tools: currentModelRequirements.tools === undefined
|
|
16476
|
+
? undefined
|
|
16477
|
+
: mapToolsToOpenAi(currentModelRequirements.tools),
|
|
16420
16478
|
};
|
|
16421
16479
|
const start = $getCurrentDate();
|
|
16422
16480
|
if (this.options.isVerbose) {
|
|
@@ -16561,6 +16619,7 @@
|
|
|
16561
16619
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
16562
16620
|
const rawRequest = {
|
|
16563
16621
|
...modelSettings,
|
|
16622
|
+
model: modelName,
|
|
16564
16623
|
prompt: rawPromptContent,
|
|
16565
16624
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
|
16566
16625
|
};
|
|
@@ -16815,8 +16874,8 @@
|
|
|
16815
16874
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
16816
16875
|
const rawRequest = {
|
|
16817
16876
|
...modelSettings,
|
|
16818
|
-
size: modelSettings.size || '1024x1024',
|
|
16819
16877
|
prompt: rawPromptContent,
|
|
16878
|
+
size: modelSettings.size || '1024x1024',
|
|
16820
16879
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
|
16821
16880
|
response_format: 'url', // TODO: [🧠] Maybe allow b64_json
|
|
16822
16881
|
};
|
|
@@ -17218,6 +17277,7 @@
|
|
|
17218
17277
|
thread: {
|
|
17219
17278
|
messages: threadMessages,
|
|
17220
17279
|
},
|
|
17280
|
+
tools: modelRequirements.tools === undefined ? undefined : mapToolsToOpenAi(modelRequirements.tools),
|
|
17221
17281
|
// <- TODO: Add user identification here> user: this.options.user,
|
|
17222
17282
|
};
|
|
17223
17283
|
const start = $getCurrentDate();
|
|
@@ -17741,6 +17801,8 @@
|
|
|
17741
17801
|
modelRequirements: {
|
|
17742
17802
|
...chatPrompt.modelRequirements,
|
|
17743
17803
|
...modelRequirements,
|
|
17804
|
+
// Spread tools to convert readonly array to mutable
|
|
17805
|
+
tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
|
|
17744
17806
|
// Prepend agent system message to existing system message
|
|
17745
17807
|
systemMessage: modelRequirements.systemMessage +
|
|
17746
17808
|
(chatPrompt.modelRequirements.systemMessage
|