@promptbook/core 0.103.0-47 ā 0.103.0-48
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +796 -575
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +6 -0
- package/esm/typings/src/_packages/types.index.d.ts +2 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +7 -3
- package/esm/typings/src/book-2.0/agent-source/AgentSourceParseResult.d.ts +2 -1
- package/esm/typings/src/book-2.0/agent-source/computeAgentHash.d.ts +8 -0
- package/esm/typings/src/book-2.0/agent-source/computeAgentHash.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createDefaultAgentName.d.ts +8 -0
- package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.d.ts +9 -0
- package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.d.ts +1 -1
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +57 -32
- package/esm/typings/src/llm-providers/_common/utils/assertUniqueModels.d.ts +12 -0
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +7 -2
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +24 -3
- package/esm/typings/src/llm-providers/openai/openai-models.test.d.ts +4 -0
- package/esm/typings/src/remote-server/startAgentServer.d.ts +1 -1
- package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
- package/esm/typings/src/transpilers/openai-sdk/register.d.ts +1 -1
- package/esm/typings/src/types/typeAliases.d.ts +6 -0
- package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +2 -0
- package/esm/typings/src/utils/normalization/normalizeTo_PascalCase.d.ts +3 -0
- package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +2 -0
- package/esm/typings/src/utils/normalization/titleToName.d.ts +2 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +803 -579
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
(function (global, factory) {
|
|
2
|
-
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('
|
|
3
|
-
typeof define === 'function' && define.amd ? define(['exports', '
|
|
4
|
-
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim$1, global.crypto, global.rxjs, global.waitasecond, global.
|
|
5
|
-
})(this, (function (exports, spaceTrim$1, crypto, rxjs, waitasecond,
|
|
2
|
+
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('crypto-js'), require('crypto-js/enc-hex'), require('spacetrim'), require('crypto'), require('rxjs'), require('waitasecond'), require('crypto-js/sha256'), require('path'), require('mime-types'), require('papaparse'), require('moment'), require('colors'), require('bottleneck'), require('openai')) :
|
|
3
|
+
typeof define === 'function' && define.amd ? define(['exports', 'crypto-js', 'crypto-js/enc-hex', 'spacetrim', 'crypto', 'rxjs', 'waitasecond', 'crypto-js/sha256', 'path', 'mime-types', 'papaparse', 'moment', 'colors', 'bottleneck', 'openai'], factory) :
|
|
4
|
+
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.cryptoJs, global.hexEncoder, global.spaceTrim$1, global.crypto, global.rxjs, global.waitasecond, global.sha256, global.path, global.mimeTypes, global.papaparse, global.moment, global.colors, global.Bottleneck, global.OpenAI));
|
|
5
|
+
})(this, (function (exports, cryptoJs, hexEncoder, spaceTrim$1, crypto, rxjs, waitasecond, sha256, path, mimeTypes, papaparse, moment, colors, Bottleneck, OpenAI) { 'use strict';
|
|
6
6
|
|
|
7
7
|
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
|
|
8
8
|
|
|
9
|
-
var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim$1);
|
|
10
9
|
var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
|
|
10
|
+
var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim$1);
|
|
11
11
|
var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
|
|
12
12
|
var moment__default = /*#__PURE__*/_interopDefaultLegacy(moment);
|
|
13
13
|
var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
|
|
@@ -28,12 +28,21 @@
|
|
|
28
28
|
* @generated
|
|
29
29
|
* @see https://github.com/webgptorg/promptbook
|
|
30
30
|
*/
|
|
31
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-
|
|
31
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-48';
|
|
32
32
|
/**
|
|
33
33
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
34
34
|
* Note: [š] Ignore a discrepancy between file name and entity name
|
|
35
35
|
*/
|
|
36
36
|
|
|
37
|
+
/**
|
|
38
|
+
* Computes SHA-256 hash of the agent source
|
|
39
|
+
*
|
|
40
|
+
* @public exported from `@promptbook/core`
|
|
41
|
+
*/
|
|
42
|
+
function computeAgentHash(agentSource) {
|
|
43
|
+
return cryptoJs.SHA256(hexEncoder__default["default"].parse(agentSource /* <- TODO: !!!!! spaceTrim */)).toString( /* hex */);
|
|
44
|
+
}
|
|
45
|
+
|
|
37
46
|
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [š] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [š] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"ā Convert Knowledge-piece to title\" but \"ā Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"ā Convert Knowledge-piece to title\" but \"ā Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
38
47
|
|
|
39
48
|
/**
|
|
@@ -4326,6 +4335,8 @@
|
|
|
4326
4335
|
/**
|
|
4327
4336
|
* Converts a given text to kebab-case format.
|
|
4328
4337
|
*
|
|
4338
|
+
* Note: [š] This function is idempotent.
|
|
4339
|
+
*
|
|
4329
4340
|
* @param text The text to be converted.
|
|
4330
4341
|
* @returns The kebab-case formatted string.
|
|
4331
4342
|
* @example 'hello-world'
|
|
@@ -4481,6 +4492,8 @@
|
|
|
4481
4492
|
/**
|
|
4482
4493
|
* Converts a title string into a normalized name.
|
|
4483
4494
|
*
|
|
4495
|
+
* Note: [š] This function is idempotent.
|
|
4496
|
+
*
|
|
4484
4497
|
* @param value The title string to be converted to a name.
|
|
4485
4498
|
* @returns A normalized name derived from the input title.
|
|
4486
4499
|
* @example 'Hello World!' -> 'hello-world'
|
|
@@ -8760,6 +8773,7 @@
|
|
|
8760
8773
|
// Keep everything after the PERSONA section
|
|
8761
8774
|
cleanedMessage = lines.slice(personaEndIndex).join('\n').trim();
|
|
8762
8775
|
}
|
|
8776
|
+
// TODO: [š] There should be `agentFullname` not `agentName`
|
|
8763
8777
|
// Create new system message with persona at the beginning
|
|
8764
8778
|
// Format: "You are {agentName}\n{personaContent}"
|
|
8765
8779
|
// The # PERSONA comment will be removed later by removeCommentsFromSystemMessage
|
|
@@ -9581,6 +9595,8 @@
|
|
|
9581
9595
|
/**
|
|
9582
9596
|
* Normalizes a given text to camelCase format.
|
|
9583
9597
|
*
|
|
9598
|
+
* Note: [š] This function is idempotent.
|
|
9599
|
+
*
|
|
9584
9600
|
* @param text The text to be normalized.
|
|
9585
9601
|
* @param _isFirstLetterCapital Whether the first letter should be capitalized.
|
|
9586
9602
|
* @returns The camelCase formatted string.
|
|
@@ -9669,134 +9685,545 @@
|
|
|
9669
9685
|
*/
|
|
9670
9686
|
|
|
9671
9687
|
/**
|
|
9672
|
-
*
|
|
9688
|
+
* Creates a Mermaid graph based on the promptbook
|
|
9673
9689
|
*
|
|
9674
|
-
*
|
|
9675
|
-
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
9676
|
-
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
|
|
9690
|
+
* Note: The result is not wrapped in a Markdown code block
|
|
9677
9691
|
*
|
|
9678
|
-
* @public exported from `@promptbook/
|
|
9692
|
+
* @public exported from `@promptbook/utils`
|
|
9679
9693
|
*/
|
|
9680
|
-
function
|
|
9681
|
-
const
|
|
9682
|
-
|
|
9683
|
-
|
|
9684
|
-
|
|
9685
|
-
|
|
9686
|
-
|
|
9694
|
+
function renderPromptbookMermaid(pipelineJson, options) {
|
|
9695
|
+
const { linkTask = () => null } = options || {};
|
|
9696
|
+
const MERMAID_PREFIX = 'pipeline_';
|
|
9697
|
+
const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
|
|
9698
|
+
const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
|
|
9699
|
+
const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
|
|
9700
|
+
const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
|
|
9701
|
+
const parameterNameToTaskName = (parameterName) => {
|
|
9702
|
+
if (parameterName === 'knowledge') {
|
|
9703
|
+
return MERMAID_KNOWLEDGE_NAME;
|
|
9687
9704
|
}
|
|
9688
|
-
if (
|
|
9689
|
-
|
|
9705
|
+
else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
|
|
9706
|
+
return MERMAID_RESERVED_NAME;
|
|
9690
9707
|
}
|
|
9691
|
-
|
|
9692
|
-
|
|
9708
|
+
const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
|
|
9709
|
+
if (!parameter) {
|
|
9710
|
+
throw new UnexpectedError(`Could not find {${parameterName}}`);
|
|
9711
|
+
// <- TODO: This causes problems when {knowledge} and other reserved parameters are used
|
|
9693
9712
|
}
|
|
9694
|
-
|
|
9695
|
-
|
|
9696
|
-
const meta = {};
|
|
9697
|
-
for (const commitment of parseResult.commitments) {
|
|
9698
|
-
if (commitment.type !== 'META') {
|
|
9699
|
-
continue;
|
|
9713
|
+
if (parameter.isInput) {
|
|
9714
|
+
return MERMAID_INPUT_NAME;
|
|
9700
9715
|
}
|
|
9701
|
-
|
|
9702
|
-
|
|
9703
|
-
|
|
9704
|
-
|
|
9705
|
-
|
|
9706
|
-
// Generate gravatar fallback if no meta image specified
|
|
9707
|
-
if (!meta.image) {
|
|
9708
|
-
meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
|
|
9709
|
-
}
|
|
9710
|
-
// Parse parameters using unified approach - both @Parameter and {parameter} notations
|
|
9711
|
-
// are treated as the same syntax feature with unified representation
|
|
9712
|
-
const parameters = parseParameters(agentSource);
|
|
9713
|
-
return {
|
|
9714
|
-
agentName: parseResult.agentName,
|
|
9715
|
-
personaDescription,
|
|
9716
|
-
meta,
|
|
9717
|
-
parameters,
|
|
9716
|
+
const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
|
|
9717
|
+
if (!task) {
|
|
9718
|
+
throw new Error(`Could not find task for {${parameterName}}`);
|
|
9719
|
+
}
|
|
9720
|
+
return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
|
|
9718
9721
|
};
|
|
9722
|
+
const inputAndIntermediateParametersMermaid = pipelineJson.tasks
|
|
9723
|
+
.flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
|
|
9724
|
+
`${parameterNameToTaskName(resultingParameterName)}("${title}")`,
|
|
9725
|
+
...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
|
|
9726
|
+
])
|
|
9727
|
+
.join('\n');
|
|
9728
|
+
const outputParametersMermaid = pipelineJson.parameters
|
|
9729
|
+
.filter(({ isOutput }) => isOutput)
|
|
9730
|
+
.map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
|
|
9731
|
+
.join('\n');
|
|
9732
|
+
const linksMermaid = pipelineJson.tasks
|
|
9733
|
+
.map((task) => {
|
|
9734
|
+
const link = linkTask(task);
|
|
9735
|
+
if (link === null) {
|
|
9736
|
+
return '';
|
|
9737
|
+
}
|
|
9738
|
+
const { href, title } = link;
|
|
9739
|
+
const taskName = parameterNameToTaskName(task.resultingParameterName);
|
|
9740
|
+
return `click ${taskName} href "${href}" "${title}";`;
|
|
9741
|
+
})
|
|
9742
|
+
.filter((line) => line !== '')
|
|
9743
|
+
.join('\n');
|
|
9744
|
+
const interactionPointsMermaid = Object.entries({
|
|
9745
|
+
[MERMAID_INPUT_NAME]: 'Input',
|
|
9746
|
+
[MERMAID_OUTPUT_NAME]: 'Output',
|
|
9747
|
+
[MERMAID_RESERVED_NAME]: 'Other',
|
|
9748
|
+
[MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
|
|
9749
|
+
})
|
|
9750
|
+
.filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
|
|
9751
|
+
.map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
|
|
9752
|
+
.join('\n');
|
|
9753
|
+
const promptbookMermaid = spaceTrim$1.spaceTrim((block) => `
|
|
9754
|
+
|
|
9755
|
+
%% š® Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
|
|
9756
|
+
|
|
9757
|
+
flowchart LR
|
|
9758
|
+
subgraph "${pipelineJson.title}"
|
|
9759
|
+
|
|
9760
|
+
%% Basic configuration
|
|
9761
|
+
direction TB
|
|
9762
|
+
|
|
9763
|
+
%% Interaction points from pipeline to outside
|
|
9764
|
+
${block(interactionPointsMermaid)}
|
|
9765
|
+
|
|
9766
|
+
%% Input and intermediate parameters
|
|
9767
|
+
${block(inputAndIntermediateParametersMermaid)}
|
|
9768
|
+
|
|
9769
|
+
|
|
9770
|
+
%% Output parameters
|
|
9771
|
+
${block(outputParametersMermaid)}
|
|
9772
|
+
|
|
9773
|
+
%% Links
|
|
9774
|
+
${block(linksMermaid)}
|
|
9775
|
+
|
|
9776
|
+
%% Styles
|
|
9777
|
+
classDef ${MERMAID_INPUT_NAME} color: grey;
|
|
9778
|
+
classDef ${MERMAID_OUTPUT_NAME} color: grey;
|
|
9779
|
+
classDef ${MERMAID_RESERVED_NAME} color: grey;
|
|
9780
|
+
classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
|
|
9781
|
+
|
|
9782
|
+
end;
|
|
9783
|
+
|
|
9784
|
+
`);
|
|
9785
|
+
return promptbookMermaid;
|
|
9719
9786
|
}
|
|
9720
9787
|
/**
|
|
9721
|
-
* TODO: [
|
|
9788
|
+
* TODO: [š§ ] FOREACH in mermaid graph
|
|
9789
|
+
* TODO: [š§ ] Knowledge in mermaid graph
|
|
9790
|
+
* TODO: [š§ ] Personas in mermaid graph
|
|
9791
|
+
* TODO: Maybe use some Mermaid package instead of string templating
|
|
9792
|
+
* TODO: [š] When more than 2 functionalities, split into separate functions
|
|
9722
9793
|
*/
|
|
9723
9794
|
|
|
9724
9795
|
/**
|
|
9725
|
-
*
|
|
9796
|
+
* Tag function for notating a prompt as template literal
|
|
9726
9797
|
*
|
|
9727
|
-
* There are
|
|
9728
|
-
*
|
|
9729
|
-
*
|
|
9798
|
+
* Note: There are 3 similar functions:
|
|
9799
|
+
* 1) `prompt` for notating single prompt exported from `@promptbook/utils`
|
|
9800
|
+
* 2) `promptTemplate` alias for `prompt`
|
|
9801
|
+
* 3) `book` for notating and validating entire books exported from `@promptbook/utils`
|
|
9730
9802
|
*
|
|
9731
|
-
* @
|
|
9803
|
+
* @param strings
|
|
9804
|
+
* @param values
|
|
9805
|
+
* @returns the prompt string
|
|
9806
|
+
* @public exported from `@promptbook/utils`
|
|
9732
9807
|
*/
|
|
9733
|
-
|
|
9734
|
-
|
|
9735
|
-
|
|
9736
|
-
if (availableModels && !modelName && llmTools) {
|
|
9737
|
-
const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
|
|
9738
|
-
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
|
|
9808
|
+
function prompt(strings, ...values) {
|
|
9809
|
+
if (values.length === 0) {
|
|
9810
|
+
return spaceTrim__default["default"](strings.join(''));
|
|
9739
9811
|
}
|
|
9740
|
-
|
|
9741
|
-
|
|
9742
|
-
|
|
9743
|
-
|
|
9744
|
-
|
|
9745
|
-
|
|
9746
|
-
|
|
9747
|
-
|
|
9748
|
-
|
|
9749
|
-
|
|
9750
|
-
* @private function of `createAgentModelRequirements`
|
|
9751
|
-
*/
|
|
9752
|
-
async function selectBestModelUsingPersona(agentSource, llmTools) {
|
|
9753
|
-
var _a;
|
|
9754
|
-
// Parse agent source to get persona description
|
|
9755
|
-
const { agentName, personaDescription } = parseAgentSource(agentSource);
|
|
9756
|
-
// Use agent name as fallback if no persona description is available
|
|
9757
|
-
const description = personaDescription || agentName || 'AI Agent';
|
|
9812
|
+
const stringsWithHiddenParameters = strings.map((stringsItem) =>
|
|
9813
|
+
// TODO: [0] DRY
|
|
9814
|
+
stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
|
|
9815
|
+
const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
|
|
9816
|
+
const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
|
|
9817
|
+
// Combine strings and values
|
|
9818
|
+
let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
|
|
9819
|
+
? `${result}${stringsItem}`
|
|
9820
|
+
: `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
|
|
9821
|
+
pipelineString = spaceTrim__default["default"](pipelineString);
|
|
9758
9822
|
try {
|
|
9759
|
-
|
|
9760
|
-
const { modelsRequirements } = await preparePersona(description, { llm: llmTools }, { isVerbose: false });
|
|
9761
|
-
// Extract the first model name from the requirements
|
|
9762
|
-
if (modelsRequirements.length > 0 && ((_a = modelsRequirements[0]) === null || _a === void 0 ? void 0 : _a.modelName)) {
|
|
9763
|
-
return modelsRequirements[0].modelName;
|
|
9764
|
-
}
|
|
9765
|
-
// Fallback: get available models and return the first CHAT model
|
|
9766
|
-
const availableModels = await llmTools.listModels();
|
|
9767
|
-
const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
|
|
9768
|
-
if (chatModels.length === 0) {
|
|
9769
|
-
throw new Error('No CHAT models available for agent model selection');
|
|
9770
|
-
}
|
|
9771
|
-
return chatModels[0].modelName;
|
|
9823
|
+
pipelineString = templateParameters(pipelineString, parameters);
|
|
9772
9824
|
}
|
|
9773
9825
|
catch (error) {
|
|
9774
|
-
|
|
9775
|
-
|
|
9776
|
-
const availableModels = await llmTools.listModels();
|
|
9777
|
-
const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
|
|
9778
|
-
if (chatModels.length === 0) {
|
|
9779
|
-
throw new Error('No CHAT models available for agent model selection');
|
|
9826
|
+
if (!(error instanceof PipelineExecutionError)) {
|
|
9827
|
+
throw error;
|
|
9780
9828
|
}
|
|
9781
|
-
|
|
9829
|
+
console.error({ pipelineString, parameters, placeholderParameterNames, error });
|
|
9830
|
+
throw new UnexpectedError(spaceTrim__default["default"]((block) => `
|
|
9831
|
+
Internal error in prompt template literal
|
|
9832
|
+
|
|
9833
|
+
${block(JSON.stringify({ strings, values }, null, 4))}}
|
|
9834
|
+
|
|
9835
|
+
`));
|
|
9782
9836
|
}
|
|
9837
|
+
// TODO: [0] DRY
|
|
9838
|
+
pipelineString = pipelineString
|
|
9839
|
+
.split(`${REPLACING_NONCE}beginbracket`)
|
|
9840
|
+
.join('{')
|
|
9841
|
+
.split(`${REPLACING_NONCE}endbracket`)
|
|
9842
|
+
.join('}');
|
|
9843
|
+
return pipelineString;
|
|
9783
9844
|
}
|
|
9784
9845
|
/**
|
|
9785
|
-
*
|
|
9846
|
+
* TODO: [š§ ][š“] Where is the best location for this file
|
|
9847
|
+
* Note: [š] Ignore a discrepancy between file name and entity name
|
|
9848
|
+
*/
|
|
9849
|
+
|
|
9850
|
+
/**
|
|
9851
|
+
* Detects if the code is running in a browser environment in main thread (Not in a web worker)
|
|
9786
9852
|
*
|
|
9787
|
-
*
|
|
9788
|
-
* @returns Array of MCP server identifiers
|
|
9853
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9789
9854
|
*
|
|
9790
|
-
* @
|
|
9855
|
+
* @public exported from `@promptbook/utils`
|
|
9791
9856
|
*/
|
|
9792
|
-
|
|
9793
|
-
|
|
9794
|
-
|
|
9795
|
-
|
|
9796
|
-
|
|
9797
|
-
|
|
9798
|
-
|
|
9799
|
-
|
|
9857
|
+
const $isRunningInBrowser = new Function(`
|
|
9858
|
+
try {
|
|
9859
|
+
return this === window;
|
|
9860
|
+
} catch (e) {
|
|
9861
|
+
return false;
|
|
9862
|
+
}
|
|
9863
|
+
`);
|
|
9864
|
+
/**
|
|
9865
|
+
* TODO: [šŗ]
|
|
9866
|
+
*/
|
|
9867
|
+
|
|
9868
|
+
/**
|
|
9869
|
+
* Detects if the code is running in jest environment
|
|
9870
|
+
*
|
|
9871
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9872
|
+
*
|
|
9873
|
+
* @public exported from `@promptbook/utils`
|
|
9874
|
+
*/
|
|
9875
|
+
const $isRunningInJest = new Function(`
|
|
9876
|
+
try {
|
|
9877
|
+
return process.env.JEST_WORKER_ID !== undefined;
|
|
9878
|
+
} catch (e) {
|
|
9879
|
+
return false;
|
|
9880
|
+
}
|
|
9881
|
+
`);
|
|
9882
|
+
/**
|
|
9883
|
+
* TODO: [šŗ]
|
|
9884
|
+
*/
|
|
9885
|
+
|
|
9886
|
+
/**
|
|
9887
|
+
* Detects if the code is running in a Node.js environment
|
|
9888
|
+
*
|
|
9889
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9890
|
+
*
|
|
9891
|
+
* @public exported from `@promptbook/utils`
|
|
9892
|
+
*/
|
|
9893
|
+
const $isRunningInNode = new Function(`
|
|
9894
|
+
try {
|
|
9895
|
+
return this === global;
|
|
9896
|
+
} catch (e) {
|
|
9897
|
+
return false;
|
|
9898
|
+
}
|
|
9899
|
+
`);
|
|
9900
|
+
/**
|
|
9901
|
+
* TODO: [šŗ]
|
|
9902
|
+
*/
|
|
9903
|
+
|
|
9904
|
+
/**
|
|
9905
|
+
* Detects if the code is running in a web worker
|
|
9906
|
+
*
|
|
9907
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9908
|
+
*
|
|
9909
|
+
* @public exported from `@promptbook/utils`
|
|
9910
|
+
*/
|
|
9911
|
+
const $isRunningInWebWorker = new Function(`
|
|
9912
|
+
try {
|
|
9913
|
+
if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
|
|
9914
|
+
return true;
|
|
9915
|
+
} else {
|
|
9916
|
+
return false;
|
|
9917
|
+
}
|
|
9918
|
+
} catch (e) {
|
|
9919
|
+
return false;
|
|
9920
|
+
}
|
|
9921
|
+
`);
|
|
9922
|
+
/**
|
|
9923
|
+
* TODO: [šŗ]
|
|
9924
|
+
*/
|
|
9925
|
+
|
|
9926
|
+
/**
|
|
9927
|
+
* Returns information about the current runtime environment
|
|
9928
|
+
*
|
|
9929
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
|
|
9930
|
+
*
|
|
9931
|
+
* @public exported from `@promptbook/utils`
|
|
9932
|
+
*/
|
|
9933
|
+
function $detectRuntimeEnvironment() {
|
|
9934
|
+
return {
|
|
9935
|
+
isRunningInBrowser: $isRunningInBrowser(),
|
|
9936
|
+
isRunningInJest: $isRunningInJest(),
|
|
9937
|
+
isRunningInNode: $isRunningInNode(),
|
|
9938
|
+
isRunningInWebWorker: $isRunningInWebWorker(),
|
|
9939
|
+
};
|
|
9940
|
+
}
|
|
9941
|
+
/**
|
|
9942
|
+
* TODO: [šŗ] Also detect and report node version here
|
|
9943
|
+
*/
|
|
9944
|
+
|
|
9945
|
+
/**
|
|
9946
|
+
* Simple wrapper `new Date().toISOString()`
|
|
9947
|
+
*
|
|
9948
|
+
* Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
|
|
9949
|
+
*
|
|
9950
|
+
* @returns string_date branded type
|
|
9951
|
+
* @public exported from `@promptbook/utils`
|
|
9952
|
+
*/
|
|
9953
|
+
function $getCurrentDate() {
|
|
9954
|
+
return new Date().toISOString();
|
|
9955
|
+
}
|
|
9956
|
+
|
|
9957
|
+
/**
|
|
9958
|
+
* Function parseNumber will parse number from string
|
|
9959
|
+
*
|
|
9960
|
+
* Note: [š] This function is idempotent.
|
|
9961
|
+
* Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
|
|
9962
|
+
* Note: it also works only with decimal numbers
|
|
9963
|
+
*
|
|
9964
|
+
* @returns parsed number
|
|
9965
|
+
* @throws {ParseError} if the value is not a number
|
|
9966
|
+
*
|
|
9967
|
+
* @public exported from `@promptbook/utils`
|
|
9968
|
+
*/
|
|
9969
|
+
function parseNumber(value) {
|
|
9970
|
+
const originalValue = value;
|
|
9971
|
+
if (typeof value === 'number') {
|
|
9972
|
+
value = value.toString(); // <- TODO: Maybe more efficient way to do this
|
|
9973
|
+
}
|
|
9974
|
+
if (typeof value !== 'string') {
|
|
9975
|
+
return 0;
|
|
9976
|
+
}
|
|
9977
|
+
value = value.trim();
|
|
9978
|
+
if (value.startsWith('+')) {
|
|
9979
|
+
return parseNumber(value.substring(1));
|
|
9980
|
+
}
|
|
9981
|
+
if (value.startsWith('-')) {
|
|
9982
|
+
const number = parseNumber(value.substring(1));
|
|
9983
|
+
if (number === 0) {
|
|
9984
|
+
return 0; // <- Note: To prevent -0
|
|
9985
|
+
}
|
|
9986
|
+
return -number;
|
|
9987
|
+
}
|
|
9988
|
+
value = value.replace(/,/g, '.');
|
|
9989
|
+
value = value.toUpperCase();
|
|
9990
|
+
if (value === '') {
|
|
9991
|
+
return 0;
|
|
9992
|
+
}
|
|
9993
|
+
if (value === 'ā¾' || value.startsWith('INF')) {
|
|
9994
|
+
return Infinity;
|
|
9995
|
+
}
|
|
9996
|
+
if (value.includes('/')) {
|
|
9997
|
+
const [numerator_, denominator_] = value.split('/');
|
|
9998
|
+
const numerator = parseNumber(numerator_);
|
|
9999
|
+
const denominator = parseNumber(denominator_);
|
|
10000
|
+
if (denominator === 0) {
|
|
10001
|
+
throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
|
|
10002
|
+
}
|
|
10003
|
+
return numerator / denominator;
|
|
10004
|
+
}
|
|
10005
|
+
if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
|
|
10006
|
+
return 0;
|
|
10007
|
+
}
|
|
10008
|
+
if (value.includes('E')) {
|
|
10009
|
+
const [significand, exponent] = value.split('E');
|
|
10010
|
+
return parseNumber(significand) * 10 ** parseNumber(exponent);
|
|
10011
|
+
}
|
|
10012
|
+
if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
|
|
10013
|
+
throw new ParseError(`Unable to parse number from "${originalValue}"`);
|
|
10014
|
+
}
|
|
10015
|
+
const num = parseFloat(value);
|
|
10016
|
+
if (isNaN(num)) {
|
|
10017
|
+
throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
|
|
10018
|
+
}
|
|
10019
|
+
return num;
|
|
10020
|
+
}
|
|
10021
|
+
/**
|
|
10022
|
+
* TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
|
|
10023
|
+
* TODO: [š§ ][š»] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
|
|
10024
|
+
*/
|
|
10025
|
+
|
|
10026
|
+
/**
|
|
10027
|
+
* Removes quotes from a string
|
|
10028
|
+
*
|
|
10029
|
+
* Note: [š] This function is idempotent.
|
|
10030
|
+
* Tip: This is very useful for post-processing of the result of the LLM model
|
|
10031
|
+
* Note: This function removes only the same quotes from the beginning and the end of the string
|
|
10032
|
+
* Note: There are two similar functions:
|
|
10033
|
+
* - `removeQuotes` which removes only bounding quotes
|
|
10034
|
+
* - `unwrapResult` which removes whole introduce sentence
|
|
10035
|
+
*
|
|
10036
|
+
* @param text optionally quoted text
|
|
10037
|
+
* @returns text without quotes
|
|
10038
|
+
* @public exported from `@promptbook/utils`
|
|
10039
|
+
*/
|
|
10040
|
+
function removeQuotes(text) {
|
|
10041
|
+
if (text.startsWith('"') && text.endsWith('"')) {
|
|
10042
|
+
return text.slice(1, -1);
|
|
10043
|
+
}
|
|
10044
|
+
if (text.startsWith("'") && text.endsWith("'")) {
|
|
10045
|
+
return text.slice(1, -1);
|
|
10046
|
+
}
|
|
10047
|
+
return text;
|
|
10048
|
+
}
|
|
10049
|
+
|
|
10050
|
+
/**
|
|
10051
|
+
* Trims string from all 4 sides
|
|
10052
|
+
*
|
|
10053
|
+
* Note: This is a re-exported function from the `spacetrim` package which is
|
|
10054
|
+
* Developed by same author @hejny as this package
|
|
10055
|
+
*
|
|
10056
|
+
* @public exported from `@promptbook/utils`
|
|
10057
|
+
* @see https://github.com/hejny/spacetrim#usage
|
|
10058
|
+
*/
|
|
10059
|
+
const spaceTrim = spaceTrim$1.spaceTrim;
|
|
10060
|
+
|
|
10061
|
+
/**
|
|
10062
|
+
* Checks if the given value is a valid JavaScript identifier name.
|
|
10063
|
+
*
|
|
10064
|
+
* @param javascriptName The value to check for JavaScript identifier validity.
|
|
10065
|
+
* @returns `true` if the value is a valid JavaScript name, false otherwise.
|
|
10066
|
+
* @public exported from `@promptbook/utils`
|
|
10067
|
+
*/
|
|
10068
|
+
function isValidJavascriptName(javascriptName) {
|
|
10069
|
+
if (typeof javascriptName !== 'string') {
|
|
10070
|
+
return false;
|
|
10071
|
+
}
|
|
10072
|
+
return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
|
|
10073
|
+
}
|
|
10074
|
+
|
|
10075
|
+
/**
|
|
10076
|
+
* Normalizes agent name from arbitrary string to valid agent name
|
|
10077
|
+
*
|
|
10078
|
+
* Note: [š] This function is idempotent.
|
|
10079
|
+
*
|
|
10080
|
+
* @public exported from `@promptbook/core`
|
|
10081
|
+
*/
|
|
10082
|
+
function normalizeAgentName(rawAgentName) {
|
|
10083
|
+
return titleToName(spaceTrim__default["default"](rawAgentName));
|
|
10084
|
+
}
|
|
10085
|
+
|
|
10086
|
+
/**
|
|
10087
|
+
* Creates temporary default agent name based on agent source hash
|
|
10088
|
+
*
|
|
10089
|
+
* @public exported from `@promptbook/core`
|
|
10090
|
+
*/
|
|
10091
|
+
function createDefaultAgentName(agentSource) {
|
|
10092
|
+
const agentHash = computeAgentHash(agentSource);
|
|
10093
|
+
return normalizeAgentName(`Agent ${agentHash.substring(0, 6)}`);
|
|
10094
|
+
}
|
|
10095
|
+
|
|
10096
|
+
/**
|
|
10097
|
+
* Parses basic information from agent source
|
|
10098
|
+
*
|
|
10099
|
+
* There are 2 similar functions:
|
|
10100
|
+
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
10101
|
+
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
|
|
10102
|
+
*
|
|
10103
|
+
* @public exported from `@promptbook/core`
|
|
10104
|
+
*/
|
|
10105
|
+
function parseAgentSource(agentSource) {
|
|
10106
|
+
const parseResult = parseAgentSourceWithCommitments(agentSource);
|
|
10107
|
+
// Find PERSONA and META commitments
|
|
10108
|
+
let personaDescription = null;
|
|
10109
|
+
for (const commitment of parseResult.commitments) {
|
|
10110
|
+
if (commitment.type !== 'PERSONA') {
|
|
10111
|
+
continue;
|
|
10112
|
+
}
|
|
10113
|
+
if (personaDescription === null) {
|
|
10114
|
+
personaDescription = '';
|
|
10115
|
+
}
|
|
10116
|
+
else {
|
|
10117
|
+
personaDescription += `\n\n${personaDescription}`;
|
|
10118
|
+
}
|
|
10119
|
+
personaDescription += commitment.content;
|
|
10120
|
+
}
|
|
10121
|
+
const meta = {};
|
|
10122
|
+
for (const commitment of parseResult.commitments) {
|
|
10123
|
+
if (commitment.type !== 'META') {
|
|
10124
|
+
continue;
|
|
10125
|
+
}
|
|
10126
|
+
// Parse META commitments - format is "META TYPE content"
|
|
10127
|
+
const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
|
|
10128
|
+
const metaType = normalizeTo_camelCase(metaTypeRaw);
|
|
10129
|
+
meta[metaType] = spaceTrim__default["default"](commitment.content.substring(metaTypeRaw.length));
|
|
10130
|
+
}
|
|
10131
|
+
// Generate gravatar fallback if no meta image specified
|
|
10132
|
+
if (!meta.image) {
|
|
10133
|
+
meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
|
|
10134
|
+
}
|
|
10135
|
+
// Parse parameters using unified approach - both @Parameter and {parameter} notations
|
|
10136
|
+
// are treated as the same syntax feature with unified representation
|
|
10137
|
+
const parameters = parseParameters(agentSource);
|
|
10138
|
+
const agentHash = computeAgentHash(agentSource);
|
|
10139
|
+
return {
|
|
10140
|
+
agentName: normalizeAgentName(parseResult.agentName || createDefaultAgentName(agentSource)),
|
|
10141
|
+
agentHash,
|
|
10142
|
+
personaDescription,
|
|
10143
|
+
meta,
|
|
10144
|
+
parameters,
|
|
10145
|
+
};
|
|
10146
|
+
}
|
|
10147
|
+
/**
|
|
10148
|
+
* TODO: [š] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
10149
|
+
*/
|
|
10150
|
+
|
|
10151
|
+
/**
|
|
10152
|
+
* Creates model requirements for an agent based on its source
|
|
10153
|
+
*
|
|
10154
|
+
* There are 2 similar functions:
|
|
10155
|
+
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
10156
|
+
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
|
|
10157
|
+
*
|
|
10158
|
+
* @public exported from `@promptbook/core`
|
|
10159
|
+
*/
|
|
10160
|
+
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
|
|
10161
|
+
// If availableModels are provided and no specific modelName is given,
|
|
10162
|
+
// use preparePersona to select the best model
|
|
10163
|
+
if (availableModels && !modelName && llmTools) {
|
|
10164
|
+
const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
|
|
10165
|
+
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
|
|
10166
|
+
}
|
|
10167
|
+
// Use the new commitment-based system with provided or default model
|
|
10168
|
+
return createAgentModelRequirementsWithCommitments(agentSource, modelName);
|
|
10169
|
+
}
|
|
10170
|
+
/**
|
|
10171
|
+
* Selects the best model using the preparePersona function
|
|
10172
|
+
* This directly uses preparePersona to ensure DRY principle
|
|
10173
|
+
*
|
|
10174
|
+
* @param agentSource The agent source to derive persona description from
|
|
10175
|
+
* @param llmTools LLM tools for preparing persona
|
|
10176
|
+
* @returns The name of the best selected model
|
|
10177
|
+
* @private function of `createAgentModelRequirements`
|
|
10178
|
+
*/
|
|
10179
|
+
async function selectBestModelUsingPersona(agentSource, llmTools) {
|
|
10180
|
+
var _a;
|
|
10181
|
+
// Parse agent source to get persona description
|
|
10182
|
+
const { agentName, personaDescription } = parseAgentSource(agentSource);
|
|
10183
|
+
// Use agent name as fallback if no persona description is available
|
|
10184
|
+
const description = personaDescription || agentName || 'AI Agent';
|
|
10185
|
+
try {
|
|
10186
|
+
// Use preparePersona directly
|
|
10187
|
+
const { modelsRequirements } = await preparePersona(description, { llm: llmTools }, { isVerbose: false });
|
|
10188
|
+
// Extract the first model name from the requirements
|
|
10189
|
+
if (modelsRequirements.length > 0 && ((_a = modelsRequirements[0]) === null || _a === void 0 ? void 0 : _a.modelName)) {
|
|
10190
|
+
return modelsRequirements[0].modelName;
|
|
10191
|
+
}
|
|
10192
|
+
// Fallback: get available models and return the first CHAT model
|
|
10193
|
+
const availableModels = await llmTools.listModels();
|
|
10194
|
+
const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
|
|
10195
|
+
if (chatModels.length === 0) {
|
|
10196
|
+
throw new Error('No CHAT models available for agent model selection');
|
|
10197
|
+
}
|
|
10198
|
+
return chatModels[0].modelName;
|
|
10199
|
+
}
|
|
10200
|
+
catch (error) {
|
|
10201
|
+
console.warn('Failed to use preparePersona for model selection, falling back to first available model:', error);
|
|
10202
|
+
// Fallback: get available models and return the first CHAT model
|
|
10203
|
+
const availableModels = await llmTools.listModels();
|
|
10204
|
+
const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
|
|
10205
|
+
if (chatModels.length === 0) {
|
|
10206
|
+
throw new Error('No CHAT models available for agent model selection');
|
|
10207
|
+
}
|
|
10208
|
+
return chatModels[0].modelName;
|
|
10209
|
+
}
|
|
10210
|
+
}
|
|
10211
|
+
/**
|
|
10212
|
+
* Extracts MCP servers from agent source
|
|
10213
|
+
*
|
|
10214
|
+
* @param agentSource The agent source string that may contain MCP lines
|
|
10215
|
+
* @returns Array of MCP server identifiers
|
|
10216
|
+
*
|
|
10217
|
+
* @private TODO: [š§ ] Maybe should be public
|
|
10218
|
+
*/
|
|
10219
|
+
function extractMcpServers(agentSource) {
|
|
10220
|
+
if (!agentSource) {
|
|
10221
|
+
return [];
|
|
10222
|
+
}
|
|
10223
|
+
const lines = agentSource.split('\n');
|
|
10224
|
+
const mcpRegex = /^\s*MCP\s+(.+)$/i;
|
|
10225
|
+
const mcpServers = [];
|
|
10226
|
+
// Look for MCP lines
|
|
9800
10227
|
for (const line of lines) {
|
|
9801
10228
|
const match = line.match(mcpRegex);
|
|
9802
10229
|
if (match && match[1]) {
|
|
@@ -9888,17 +10315,6 @@
|
|
|
9888
10315
|
// <- !!! Buttons into genesis book
|
|
9889
10316
|
// <- TODO: !!! generateBookBoilerplate and deprecate `DEFAULT_BOOK`
|
|
9890
10317
|
|
|
9891
|
-
/**
|
|
9892
|
-
* Trims string from all 4 sides
|
|
9893
|
-
*
|
|
9894
|
-
* Note: This is a re-exported function from the `spacetrim` package which is
|
|
9895
|
-
* Developed by same author @hejny as this package
|
|
9896
|
-
*
|
|
9897
|
-
* @public exported from `@promptbook/utils`
|
|
9898
|
-
* @see https://github.com/hejny/spacetrim#usage
|
|
9899
|
-
*/
|
|
9900
|
-
const spaceTrim = spaceTrim$1.spaceTrim;
|
|
9901
|
-
|
|
9902
10318
|
/**
|
|
9903
10319
|
* Agent collection stored in Supabase table
|
|
9904
10320
|
*
|
|
@@ -9907,7 +10323,7 @@
|
|
|
9907
10323
|
* @public exported from `@promptbook/core`
|
|
9908
10324
|
* <- TODO: !!! Move to `@promptbook/supabase` package
|
|
9909
10325
|
*/
|
|
9910
|
-
class AgentCollectionInSupabase /* TODO:
|
|
10326
|
+
class AgentCollectionInSupabase /* TODO: !!!!!! implements Agent */ {
|
|
9911
10327
|
/**
|
|
9912
10328
|
* @param rootPath - path to the directory with agents
|
|
9913
10329
|
* @param tools - Execution tools to be used in !!! `Agent` itself and listing the agents
|
|
@@ -9928,9 +10344,7 @@
|
|
|
9928
10344
|
*/
|
|
9929
10345
|
async listAgents( /* TODO: [š§ ] Allow to pass some condition here */) {
|
|
9930
10346
|
const { isVerbose = exports.DEFAULT_IS_VERBOSE } = this.options || {};
|
|
9931
|
-
const selectResult = await this.supabaseClient
|
|
9932
|
-
.from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
|
|
9933
|
-
.select('agentProfile');
|
|
10347
|
+
const selectResult = await this.supabaseClient.from('Agent').select('agentName,agentProfile');
|
|
9934
10348
|
if (selectResult.error) {
|
|
9935
10349
|
throw new DatabaseError(spaceTrim((block) => `
|
|
9936
10350
|
|
|
@@ -9942,14 +10356,27 @@
|
|
|
9942
10356
|
if (isVerbose) {
|
|
9943
10357
|
console.info(`Found ${selectResult.data.length} agents in directory`);
|
|
9944
10358
|
}
|
|
9945
|
-
return selectResult.data.map((
|
|
10359
|
+
return selectResult.data.map(({ agentName, agentProfile }) => {
|
|
10360
|
+
if (isVerbose && agentProfile.agentName !== agentName) {
|
|
10361
|
+
console.warn(spaceTrim(`
|
|
10362
|
+
Agent name mismatch for agent "${agentName}". Using name from database.
|
|
10363
|
+
|
|
10364
|
+
agentName: "${agentName}"
|
|
10365
|
+
agentProfile.agentName: "${agentProfile.agentName}"
|
|
10366
|
+
`));
|
|
10367
|
+
}
|
|
10368
|
+
return {
|
|
10369
|
+
...agentProfile,
|
|
10370
|
+
agentName,
|
|
10371
|
+
};
|
|
10372
|
+
});
|
|
9946
10373
|
}
|
|
9947
10374
|
/**
|
|
9948
10375
|
* !!!@@@
|
|
9949
10376
|
*/
|
|
9950
10377
|
async getAgentSource(agentName) {
|
|
9951
10378
|
const selectResult = await this.supabaseClient
|
|
9952
|
-
.from('
|
|
10379
|
+
.from('Agent')
|
|
9953
10380
|
.select('agentSource')
|
|
9954
10381
|
.eq('agentName', agentName)
|
|
9955
10382
|
.single();
|
|
@@ -9977,65 +10404,88 @@
|
|
|
9977
10404
|
async createAgent(agentSource) {
|
|
9978
10405
|
const agentProfile = parseAgentSource(agentSource);
|
|
9979
10406
|
// <- TODO: [š]
|
|
9980
|
-
const
|
|
9981
|
-
|
|
9982
|
-
|
|
9983
|
-
|
|
10407
|
+
const { agentName, agentHash } = agentProfile;
|
|
10408
|
+
const insertAgentResult = await this.supabaseClient.from('Agent').insert({
|
|
10409
|
+
agentName,
|
|
10410
|
+
agentHash,
|
|
9984
10411
|
agentProfile,
|
|
9985
10412
|
createdAt: new Date().toISOString(),
|
|
9986
10413
|
updatedAt: null,
|
|
9987
|
-
agentVersion: 0,
|
|
9988
10414
|
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
9989
10415
|
usage: ZERO_USAGE,
|
|
9990
10416
|
agentSource: agentSource,
|
|
9991
10417
|
});
|
|
9992
|
-
if (
|
|
10418
|
+
if (insertAgentResult.error) {
|
|
9993
10419
|
throw new DatabaseError(spaceTrim((block) => `
|
|
9994
10420
|
Error creating agent "${agentProfile.agentName}" in Supabase:
|
|
9995
10421
|
|
|
9996
|
-
${block(
|
|
10422
|
+
${block(insertAgentResult.error.message)}
|
|
9997
10423
|
`));
|
|
9998
10424
|
}
|
|
10425
|
+
await this.supabaseClient.from('AgentHistory').insert({
|
|
10426
|
+
createdAt: new Date().toISOString(),
|
|
10427
|
+
agentName,
|
|
10428
|
+
agentHash,
|
|
10429
|
+
previousAgentHash: null,
|
|
10430
|
+
agentSource,
|
|
10431
|
+
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
10432
|
+
});
|
|
10433
|
+
// <- TODO: [š§ ] What to do with `insertAgentHistoryResult.error`, ignore? wait?
|
|
9999
10434
|
return agentProfile;
|
|
10000
10435
|
}
|
|
10001
10436
|
/**
|
|
10002
10437
|
* Updates an existing agent in the collection
|
|
10003
10438
|
*/
|
|
10004
10439
|
async updateAgentSource(agentName, agentSource) {
|
|
10005
|
-
const
|
|
10006
|
-
.from('
|
|
10007
|
-
.select('
|
|
10440
|
+
const selectPreviousAgentResult = await this.supabaseClient
|
|
10441
|
+
.from('Agent')
|
|
10442
|
+
.select('agentHash,agentName')
|
|
10008
10443
|
.eq('agentName', agentName)
|
|
10009
10444
|
.single();
|
|
10010
|
-
if (
|
|
10011
|
-
throw new
|
|
10445
|
+
if (selectPreviousAgentResult.error) {
|
|
10446
|
+
throw new DatabaseError(spaceTrim((block) => `
|
|
10447
|
+
|
|
10448
|
+
Error fetching agent "${agentName}" from Supabase:
|
|
10449
|
+
|
|
10450
|
+
${block(selectPreviousAgentResult.error.message)}
|
|
10451
|
+
`));
|
|
10452
|
+
// <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
|
|
10012
10453
|
}
|
|
10454
|
+
selectPreviousAgentResult.data.agentName;
|
|
10455
|
+
const previousAgentHash = selectPreviousAgentResult.data.agentHash;
|
|
10013
10456
|
const agentProfile = parseAgentSource(agentSource);
|
|
10014
|
-
// TODO:
|
|
10015
|
-
|
|
10016
|
-
const
|
|
10017
|
-
|
|
10018
|
-
.from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
|
|
10457
|
+
// <- TODO: [š]
|
|
10458
|
+
const { agentHash } = agentProfile;
|
|
10459
|
+
const updateAgentResult = await this.supabaseClient
|
|
10460
|
+
.from('Agent')
|
|
10019
10461
|
.update({
|
|
10020
10462
|
// TODO: !!!! Compare not update> agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
|
|
10021
10463
|
agentProfile,
|
|
10022
10464
|
updatedAt: new Date().toISOString(),
|
|
10023
|
-
|
|
10465
|
+
agentHash: agentProfile.agentHash,
|
|
10024
10466
|
agentSource,
|
|
10025
10467
|
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
10026
10468
|
})
|
|
10027
10469
|
.eq('agentName', agentName);
|
|
10028
|
-
|
|
10029
|
-
console.log('!!!
|
|
10030
|
-
console.log('!!!
|
|
10031
|
-
|
|
10032
|
-
if (updateResult.error) {
|
|
10470
|
+
// console.log('!!! updateAgent', updateResult);
|
|
10471
|
+
// console.log('!!! old', oldAgentSource);
|
|
10472
|
+
// console.log('!!! new', newAgentSource);
|
|
10473
|
+
if (updateAgentResult.error) {
|
|
10033
10474
|
throw new DatabaseError(spaceTrim((block) => `
|
|
10034
10475
|
Error updating agent "${agentName}" in Supabase:
|
|
10035
10476
|
|
|
10036
|
-
${block(
|
|
10477
|
+
${block(updateAgentResult.error.message)}
|
|
10037
10478
|
`));
|
|
10038
10479
|
}
|
|
10480
|
+
await this.supabaseClient.from('AgentHistory').insert({
|
|
10481
|
+
createdAt: new Date().toISOString(),
|
|
10482
|
+
agentName,
|
|
10483
|
+
agentHash,
|
|
10484
|
+
previousAgentHash,
|
|
10485
|
+
agentSource,
|
|
10486
|
+
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
10487
|
+
});
|
|
10488
|
+
// <- TODO: [š§ ] What to do with `insertAgentHistoryResult.error`, ignore? wait?
|
|
10039
10489
|
}
|
|
10040
10490
|
// TODO: !!!! public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
|
|
10041
10491
|
// Use Supabase realtime logic
|
|
@@ -10681,83 +11131,14 @@
|
|
|
10681
11131
|
};
|
|
10682
11132
|
|
|
10683
11133
|
/**
|
|
10684
|
-
* Units of text measurement
|
|
10685
|
-
*
|
|
10686
|
-
* @see https://github.com/webgptorg/promptbook/discussions/30
|
|
10687
|
-
* @public exported from `@promptbook/core`
|
|
10688
|
-
*/
|
|
10689
|
-
const EXPECTATION_UNITS = ['CHARACTERS', 'WORDS', 'SENTENCES', 'LINES', 'PARAGRAPHS', 'PAGES'];
|
|
10690
|
-
/**
|
|
10691
|
-
* TODO: [š] Unite object for expecting amount and format - remove format
|
|
10692
|
-
*/
|
|
10693
|
-
|
|
10694
|
-
/**
|
|
10695
|
-
* Function parseNumber will parse number from string
|
|
10696
|
-
*
|
|
10697
|
-
* Note: [š] This function is idempotent.
|
|
10698
|
-
* Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
|
|
10699
|
-
* Note: it also works only with decimal numbers
|
|
10700
|
-
*
|
|
10701
|
-
* @returns parsed number
|
|
10702
|
-
* @throws {ParseError} if the value is not a number
|
|
11134
|
+
* Units of text measurement
|
|
10703
11135
|
*
|
|
10704
|
-
* @
|
|
11136
|
+
* @see https://github.com/webgptorg/promptbook/discussions/30
|
|
11137
|
+
* @public exported from `@promptbook/core`
|
|
10705
11138
|
*/
|
|
10706
|
-
|
|
10707
|
-
const originalValue = value;
|
|
10708
|
-
if (typeof value === 'number') {
|
|
10709
|
-
value = value.toString(); // <- TODO: Maybe more efficient way to do this
|
|
10710
|
-
}
|
|
10711
|
-
if (typeof value !== 'string') {
|
|
10712
|
-
return 0;
|
|
10713
|
-
}
|
|
10714
|
-
value = value.trim();
|
|
10715
|
-
if (value.startsWith('+')) {
|
|
10716
|
-
return parseNumber(value.substring(1));
|
|
10717
|
-
}
|
|
10718
|
-
if (value.startsWith('-')) {
|
|
10719
|
-
const number = parseNumber(value.substring(1));
|
|
10720
|
-
if (number === 0) {
|
|
10721
|
-
return 0; // <- Note: To prevent -0
|
|
10722
|
-
}
|
|
10723
|
-
return -number;
|
|
10724
|
-
}
|
|
10725
|
-
value = value.replace(/,/g, '.');
|
|
10726
|
-
value = value.toUpperCase();
|
|
10727
|
-
if (value === '') {
|
|
10728
|
-
return 0;
|
|
10729
|
-
}
|
|
10730
|
-
if (value === 'ā¾' || value.startsWith('INF')) {
|
|
10731
|
-
return Infinity;
|
|
10732
|
-
}
|
|
10733
|
-
if (value.includes('/')) {
|
|
10734
|
-
const [numerator_, denominator_] = value.split('/');
|
|
10735
|
-
const numerator = parseNumber(numerator_);
|
|
10736
|
-
const denominator = parseNumber(denominator_);
|
|
10737
|
-
if (denominator === 0) {
|
|
10738
|
-
throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
|
|
10739
|
-
}
|
|
10740
|
-
return numerator / denominator;
|
|
10741
|
-
}
|
|
10742
|
-
if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
|
|
10743
|
-
return 0;
|
|
10744
|
-
}
|
|
10745
|
-
if (value.includes('E')) {
|
|
10746
|
-
const [significand, exponent] = value.split('E');
|
|
10747
|
-
return parseNumber(significand) * 10 ** parseNumber(exponent);
|
|
10748
|
-
}
|
|
10749
|
-
if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
|
|
10750
|
-
throw new ParseError(`Unable to parse number from "${originalValue}"`);
|
|
10751
|
-
}
|
|
10752
|
-
const num = parseFloat(value);
|
|
10753
|
-
if (isNaN(num)) {
|
|
10754
|
-
throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
|
|
10755
|
-
}
|
|
10756
|
-
return num;
|
|
10757
|
-
}
|
|
11139
|
+
const EXPECTATION_UNITS = ['CHARACTERS', 'WORDS', 'SENTENCES', 'LINES', 'PARAGRAPHS', 'PAGES'];
|
|
10758
11140
|
/**
|
|
10759
|
-
* TODO:
|
|
10760
|
-
* TODO: [š§ ][š»] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
|
|
11141
|
+
* TODO: [š] Unite object for expecting amount and format - remove format
|
|
10761
11142
|
*/
|
|
10762
11143
|
|
|
10763
11144
|
/**
|
|
@@ -10902,30 +11283,6 @@
|
|
|
10902
11283
|
},
|
|
10903
11284
|
};
|
|
10904
11285
|
|
|
10905
|
-
/**
|
|
10906
|
-
* Removes quotes from a string
|
|
10907
|
-
*
|
|
10908
|
-
* Note: [š] This function is idempotent.
|
|
10909
|
-
* Tip: This is very useful for post-processing of the result of the LLM model
|
|
10910
|
-
* Note: This function removes only the same quotes from the beginning and the end of the string
|
|
10911
|
-
* Note: There are two similar functions:
|
|
10912
|
-
* - `removeQuotes` which removes only bounding quotes
|
|
10913
|
-
* - `unwrapResult` which removes whole introduce sentence
|
|
10914
|
-
*
|
|
10915
|
-
* @param text optionally quoted text
|
|
10916
|
-
* @returns text without quotes
|
|
10917
|
-
* @public exported from `@promptbook/utils`
|
|
10918
|
-
*/
|
|
10919
|
-
function removeQuotes(text) {
|
|
10920
|
-
if (text.startsWith('"') && text.endsWith('"')) {
|
|
10921
|
-
return text.slice(1, -1);
|
|
10922
|
-
}
|
|
10923
|
-
if (text.startsWith("'") && text.endsWith("'")) {
|
|
10924
|
-
return text.slice(1, -1);
|
|
10925
|
-
}
|
|
10926
|
-
return text;
|
|
10927
|
-
}
|
|
10928
|
-
|
|
10929
11286
|
/**
|
|
10930
11287
|
* Function `validateParameterName` will normalize and validate a parameter name for use in pipelines.
|
|
10931
11288
|
* It removes diacritics, emojis, and quotes, normalizes to camelCase, and checks for reserved names and invalid characters.
|
|
@@ -12112,20 +12469,6 @@
|
|
|
12112
12469
|
persona.description += spaceTrim__default["default"]('\n\n' + personaDescription);
|
|
12113
12470
|
}
|
|
12114
12471
|
|
|
12115
|
-
/**
|
|
12116
|
-
* Checks if the given value is a valid JavaScript identifier name.
|
|
12117
|
-
*
|
|
12118
|
-
* @param javascriptName The value to check for JavaScript identifier validity.
|
|
12119
|
-
* @returns `true` if the value is a valid JavaScript name, false otherwise.
|
|
12120
|
-
* @public exported from `@promptbook/utils`
|
|
12121
|
-
*/
|
|
12122
|
-
function isValidJavascriptName(javascriptName) {
|
|
12123
|
-
if (typeof javascriptName !== 'string') {
|
|
12124
|
-
return false;
|
|
12125
|
-
}
|
|
12126
|
-
return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
|
|
12127
|
-
}
|
|
12128
|
-
|
|
12129
12472
|
/**
|
|
12130
12473
|
* Parses the postprocess command
|
|
12131
12474
|
*
|
|
@@ -13694,114 +14037,6 @@
|
|
|
13694
14037
|
* TODO: [š] This can be part of markdown builder
|
|
13695
14038
|
*/
|
|
13696
14039
|
|
|
13697
|
-
/**
|
|
13698
|
-
* Creates a Mermaid graph based on the promptbook
|
|
13699
|
-
*
|
|
13700
|
-
* Note: The result is not wrapped in a Markdown code block
|
|
13701
|
-
*
|
|
13702
|
-
* @public exported from `@promptbook/utils`
|
|
13703
|
-
*/
|
|
13704
|
-
function renderPromptbookMermaid(pipelineJson, options) {
|
|
13705
|
-
const { linkTask = () => null } = options || {};
|
|
13706
|
-
const MERMAID_PREFIX = 'pipeline_';
|
|
13707
|
-
const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
|
|
13708
|
-
const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
|
|
13709
|
-
const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
|
|
13710
|
-
const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
|
|
13711
|
-
const parameterNameToTaskName = (parameterName) => {
|
|
13712
|
-
if (parameterName === 'knowledge') {
|
|
13713
|
-
return MERMAID_KNOWLEDGE_NAME;
|
|
13714
|
-
}
|
|
13715
|
-
else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
|
|
13716
|
-
return MERMAID_RESERVED_NAME;
|
|
13717
|
-
}
|
|
13718
|
-
const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
|
|
13719
|
-
if (!parameter) {
|
|
13720
|
-
throw new UnexpectedError(`Could not find {${parameterName}}`);
|
|
13721
|
-
// <- TODO: This causes problems when {knowledge} and other reserved parameters are used
|
|
13722
|
-
}
|
|
13723
|
-
if (parameter.isInput) {
|
|
13724
|
-
return MERMAID_INPUT_NAME;
|
|
13725
|
-
}
|
|
13726
|
-
const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
|
|
13727
|
-
if (!task) {
|
|
13728
|
-
throw new Error(`Could not find task for {${parameterName}}`);
|
|
13729
|
-
}
|
|
13730
|
-
return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
|
|
13731
|
-
};
|
|
13732
|
-
const inputAndIntermediateParametersMermaid = pipelineJson.tasks
|
|
13733
|
-
.flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
|
|
13734
|
-
`${parameterNameToTaskName(resultingParameterName)}("${title}")`,
|
|
13735
|
-
...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
|
|
13736
|
-
])
|
|
13737
|
-
.join('\n');
|
|
13738
|
-
const outputParametersMermaid = pipelineJson.parameters
|
|
13739
|
-
.filter(({ isOutput }) => isOutput)
|
|
13740
|
-
.map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
|
|
13741
|
-
.join('\n');
|
|
13742
|
-
const linksMermaid = pipelineJson.tasks
|
|
13743
|
-
.map((task) => {
|
|
13744
|
-
const link = linkTask(task);
|
|
13745
|
-
if (link === null) {
|
|
13746
|
-
return '';
|
|
13747
|
-
}
|
|
13748
|
-
const { href, title } = link;
|
|
13749
|
-
const taskName = parameterNameToTaskName(task.resultingParameterName);
|
|
13750
|
-
return `click ${taskName} href "${href}" "${title}";`;
|
|
13751
|
-
})
|
|
13752
|
-
.filter((line) => line !== '')
|
|
13753
|
-
.join('\n');
|
|
13754
|
-
const interactionPointsMermaid = Object.entries({
|
|
13755
|
-
[MERMAID_INPUT_NAME]: 'Input',
|
|
13756
|
-
[MERMAID_OUTPUT_NAME]: 'Output',
|
|
13757
|
-
[MERMAID_RESERVED_NAME]: 'Other',
|
|
13758
|
-
[MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
|
|
13759
|
-
})
|
|
13760
|
-
.filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
|
|
13761
|
-
.map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
|
|
13762
|
-
.join('\n');
|
|
13763
|
-
const promptbookMermaid = spaceTrim$1.spaceTrim((block) => `
|
|
13764
|
-
|
|
13765
|
-
%% š® Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
|
|
13766
|
-
|
|
13767
|
-
flowchart LR
|
|
13768
|
-
subgraph "${pipelineJson.title}"
|
|
13769
|
-
|
|
13770
|
-
%% Basic configuration
|
|
13771
|
-
direction TB
|
|
13772
|
-
|
|
13773
|
-
%% Interaction points from pipeline to outside
|
|
13774
|
-
${block(interactionPointsMermaid)}
|
|
13775
|
-
|
|
13776
|
-
%% Input and intermediate parameters
|
|
13777
|
-
${block(inputAndIntermediateParametersMermaid)}
|
|
13778
|
-
|
|
13779
|
-
|
|
13780
|
-
%% Output parameters
|
|
13781
|
-
${block(outputParametersMermaid)}
|
|
13782
|
-
|
|
13783
|
-
%% Links
|
|
13784
|
-
${block(linksMermaid)}
|
|
13785
|
-
|
|
13786
|
-
%% Styles
|
|
13787
|
-
classDef ${MERMAID_INPUT_NAME} color: grey;
|
|
13788
|
-
classDef ${MERMAID_OUTPUT_NAME} color: grey;
|
|
13789
|
-
classDef ${MERMAID_RESERVED_NAME} color: grey;
|
|
13790
|
-
classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
|
|
13791
|
-
|
|
13792
|
-
end;
|
|
13793
|
-
|
|
13794
|
-
`);
|
|
13795
|
-
return promptbookMermaid;
|
|
13796
|
-
}
|
|
13797
|
-
/**
|
|
13798
|
-
* TODO: [š§ ] FOREACH in mermaid graph
|
|
13799
|
-
* TODO: [š§ ] Knowledge in mermaid graph
|
|
13800
|
-
* TODO: [š§ ] Personas in mermaid graph
|
|
13801
|
-
* TODO: Maybe use some Mermaid package instead of string templating
|
|
13802
|
-
* TODO: [š] When more than 2 functionalities, split into separate functions
|
|
13803
|
-
*/
|
|
13804
|
-
|
|
13805
14040
|
/**
|
|
13806
14041
|
* Prettyfies Promptbook string and adds Mermaid graph
|
|
13807
14042
|
*
|
|
@@ -14353,71 +14588,13 @@
|
|
|
14353
14588
|
/**
|
|
14354
14589
|
* Register for LLM tools.
|
|
14355
14590
|
*
|
|
14356
|
-
* Note: `$` is used to indicate that this interacts with the global scope
|
|
14357
|
-
* @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
|
|
14358
|
-
* @public exported from `@promptbook/core`
|
|
14359
|
-
*/
|
|
14360
|
-
const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
14361
|
-
/**
|
|
14362
|
-
* TODO: [Ā®] DRY Register logic
|
|
14363
|
-
*/
|
|
14364
|
-
|
|
14365
|
-
/**
|
|
14366
|
-
* Detects if the code is running in a browser environment in main thread (Not in a web worker)
|
|
14367
|
-
*
|
|
14368
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
14369
|
-
*
|
|
14370
|
-
* @public exported from `@promptbook/utils`
|
|
14371
|
-
*/
|
|
14372
|
-
const $isRunningInBrowser = new Function(`
|
|
14373
|
-
try {
|
|
14374
|
-
return this === window;
|
|
14375
|
-
} catch (e) {
|
|
14376
|
-
return false;
|
|
14377
|
-
}
|
|
14378
|
-
`);
|
|
14379
|
-
/**
|
|
14380
|
-
* TODO: [šŗ]
|
|
14381
|
-
*/
|
|
14382
|
-
|
|
14383
|
-
/**
|
|
14384
|
-
* Detects if the code is running in a Node.js environment
|
|
14385
|
-
*
|
|
14386
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
14387
|
-
*
|
|
14388
|
-
* @public exported from `@promptbook/utils`
|
|
14389
|
-
*/
|
|
14390
|
-
const $isRunningInNode = new Function(`
|
|
14391
|
-
try {
|
|
14392
|
-
return this === global;
|
|
14393
|
-
} catch (e) {
|
|
14394
|
-
return false;
|
|
14395
|
-
}
|
|
14396
|
-
`);
|
|
14397
|
-
/**
|
|
14398
|
-
* TODO: [šŗ]
|
|
14399
|
-
*/
|
|
14400
|
-
|
|
14401
|
-
/**
|
|
14402
|
-
* Detects if the code is running in a web worker
|
|
14403
|
-
*
|
|
14404
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
14405
|
-
*
|
|
14406
|
-
* @public exported from `@promptbook/utils`
|
|
14591
|
+
* Note: `$` is used to indicate that this interacts with the global scope
|
|
14592
|
+
* @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
|
|
14593
|
+
* @public exported from `@promptbook/core`
|
|
14407
14594
|
*/
|
|
14408
|
-
const $
|
|
14409
|
-
try {
|
|
14410
|
-
if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
|
|
14411
|
-
return true;
|
|
14412
|
-
} else {
|
|
14413
|
-
return false;
|
|
14414
|
-
}
|
|
14415
|
-
} catch (e) {
|
|
14416
|
-
return false;
|
|
14417
|
-
}
|
|
14418
|
-
`);
|
|
14595
|
+
const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
14419
14596
|
/**
|
|
14420
|
-
* TODO: [
|
|
14597
|
+
* TODO: [Ā®] DRY Register logic
|
|
14421
14598
|
*/
|
|
14422
14599
|
|
|
14423
14600
|
/**
|
|
@@ -14653,18 +14830,6 @@
|
|
|
14653
14830
|
}
|
|
14654
14831
|
}
|
|
14655
14832
|
|
|
14656
|
-
/**
|
|
14657
|
-
* Simple wrapper `new Date().toISOString()`
|
|
14658
|
-
*
|
|
14659
|
-
* Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
|
|
14660
|
-
*
|
|
14661
|
-
* @returns string_date branded type
|
|
14662
|
-
* @public exported from `@promptbook/utils`
|
|
14663
|
-
*/
|
|
14664
|
-
function $getCurrentDate() {
|
|
14665
|
-
return new Date().toISOString();
|
|
14666
|
-
}
|
|
14667
|
-
|
|
14668
14833
|
/**
|
|
14669
14834
|
* Intercepts LLM tools and counts total usage of the tools
|
|
14670
14835
|
*
|
|
@@ -15291,17 +15456,17 @@
|
|
|
15291
15456
|
},
|
|
15292
15457
|
/**/
|
|
15293
15458
|
/*/
|
|
15294
|
-
|
|
15295
|
-
|
|
15296
|
-
|
|
15297
|
-
|
|
15298
|
-
|
|
15459
|
+
{
|
|
15460
|
+
modelTitle: 'tts-1-hd-1106',
|
|
15461
|
+
modelName: 'tts-1-hd-1106',
|
|
15462
|
+
},
|
|
15463
|
+
/**/
|
|
15299
15464
|
/*/
|
|
15300
|
-
|
|
15301
|
-
|
|
15302
|
-
|
|
15303
|
-
|
|
15304
|
-
|
|
15465
|
+
{
|
|
15466
|
+
modelTitle: 'tts-1-hd',
|
|
15467
|
+
modelName: 'tts-1-hd',
|
|
15468
|
+
},
|
|
15469
|
+
/**/
|
|
15305
15470
|
/**/
|
|
15306
15471
|
{
|
|
15307
15472
|
modelVariant: 'CHAT',
|
|
@@ -16662,15 +16827,19 @@
|
|
|
16662
16827
|
},
|
|
16663
16828
|
});
|
|
16664
16829
|
}
|
|
16665
|
-
|
|
16830
|
+
/*
|
|
16831
|
+
public async playground() {
|
|
16666
16832
|
const client = await this.getClient();
|
|
16833
|
+
|
|
16667
16834
|
// List all assistants
|
|
16668
16835
|
const assistants = await client.beta.assistants.list();
|
|
16669
16836
|
console.log('!!! Assistants:', assistants);
|
|
16837
|
+
|
|
16670
16838
|
// Get details of a specific assistant
|
|
16671
16839
|
const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
|
|
16672
16840
|
const assistant = await client.beta.assistants.retrieve(assistantId);
|
|
16673
16841
|
console.log('!!! Assistant Details:', assistant);
|
|
16842
|
+
|
|
16674
16843
|
// Update an assistant
|
|
16675
16844
|
const updatedAssistant = await client.beta.assistants.update(assistantId, {
|
|
16676
16845
|
name: assistant.name + '(M)',
|
|
@@ -16680,7 +16849,18 @@
|
|
|
16680
16849
|
},
|
|
16681
16850
|
});
|
|
16682
16851
|
console.log('!!! Updated Assistant:', updatedAssistant);
|
|
16683
|
-
|
|
16852
|
+
|
|
16853
|
+
await forEver();
|
|
16854
|
+
}
|
|
16855
|
+
*/
|
|
16856
|
+
/**
|
|
16857
|
+
* Get an existing assistant tool wrapper
|
|
16858
|
+
*/
|
|
16859
|
+
getAssistant(assistantId) {
|
|
16860
|
+
return new OpenAiAssistantExecutionTools({
|
|
16861
|
+
...this.options,
|
|
16862
|
+
assistantId,
|
|
16863
|
+
});
|
|
16684
16864
|
}
|
|
16685
16865
|
async createNewAssistant(options) {
|
|
16686
16866
|
if (!this.isCreatingNewAssistantsAllowed) {
|
|
@@ -16776,6 +16956,95 @@
|
|
|
16776
16956
|
assistantId: assistant.id,
|
|
16777
16957
|
});
|
|
16778
16958
|
}
|
|
16959
|
+
async updateAssistant(options) {
|
|
16960
|
+
if (!this.isCreatingNewAssistantsAllowed) {
|
|
16961
|
+
throw new NotAllowed(`Updating assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
|
|
16962
|
+
}
|
|
16963
|
+
const { assistantId, name, instructions, knowledgeSources } = options;
|
|
16964
|
+
const client = await this.getClient();
|
|
16965
|
+
let vectorStoreId;
|
|
16966
|
+
// If knowledge sources are provided, create a vector store with them
|
|
16967
|
+
// TODO: [š§ ] Reuse vector store creation logic from createNewAssistant
|
|
16968
|
+
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
16969
|
+
if (this.options.isVerbose) {
|
|
16970
|
+
console.info(`š Creating vector store for update with ${knowledgeSources.length} knowledge sources...`);
|
|
16971
|
+
}
|
|
16972
|
+
// Create a vector store
|
|
16973
|
+
const vectorStore = await client.beta.vectorStores.create({
|
|
16974
|
+
name: `${name} Knowledge Base`,
|
|
16975
|
+
});
|
|
16976
|
+
vectorStoreId = vectorStore.id;
|
|
16977
|
+
if (this.options.isVerbose) {
|
|
16978
|
+
console.info(`ā
Vector store created: ${vectorStoreId}`);
|
|
16979
|
+
}
|
|
16980
|
+
// Upload files from knowledge sources to the vector store
|
|
16981
|
+
const fileStreams = [];
|
|
16982
|
+
for (const source of knowledgeSources) {
|
|
16983
|
+
try {
|
|
16984
|
+
// Check if it's a URL
|
|
16985
|
+
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
16986
|
+
// Download the file
|
|
16987
|
+
const response = await fetch(source);
|
|
16988
|
+
if (!response.ok) {
|
|
16989
|
+
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
16990
|
+
continue;
|
|
16991
|
+
}
|
|
16992
|
+
const buffer = await response.arrayBuffer();
|
|
16993
|
+
const filename = source.split('/').pop() || 'downloaded-file';
|
|
16994
|
+
const blob = new Blob([buffer]);
|
|
16995
|
+
const file = new File([blob], filename);
|
|
16996
|
+
fileStreams.push(file);
|
|
16997
|
+
}
|
|
16998
|
+
else {
|
|
16999
|
+
// Assume it's a local file path
|
|
17000
|
+
// Note: This will work in Node.js environment
|
|
17001
|
+
// For browser environments, this would need different handling
|
|
17002
|
+
const fs = await import('fs');
|
|
17003
|
+
const fileStream = fs.createReadStream(source);
|
|
17004
|
+
fileStreams.push(fileStream);
|
|
17005
|
+
}
|
|
17006
|
+
}
|
|
17007
|
+
catch (error) {
|
|
17008
|
+
console.error(`Error processing knowledge source ${source}:`, error);
|
|
17009
|
+
}
|
|
17010
|
+
}
|
|
17011
|
+
// Batch upload files to the vector store
|
|
17012
|
+
if (fileStreams.length > 0) {
|
|
17013
|
+
try {
|
|
17014
|
+
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
17015
|
+
files: fileStreams,
|
|
17016
|
+
});
|
|
17017
|
+
if (this.options.isVerbose) {
|
|
17018
|
+
console.info(`ā
Uploaded ${fileStreams.length} files to vector store`);
|
|
17019
|
+
}
|
|
17020
|
+
}
|
|
17021
|
+
catch (error) {
|
|
17022
|
+
console.error('Error uploading files to vector store:', error);
|
|
17023
|
+
}
|
|
17024
|
+
}
|
|
17025
|
+
}
|
|
17026
|
+
const assistantUpdate = {
|
|
17027
|
+
name,
|
|
17028
|
+
instructions,
|
|
17029
|
+
tools: [/* TODO: [š§ ] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
|
|
17030
|
+
};
|
|
17031
|
+
if (vectorStoreId) {
|
|
17032
|
+
assistantUpdate.tool_resources = {
|
|
17033
|
+
file_search: {
|
|
17034
|
+
vector_store_ids: [vectorStoreId],
|
|
17035
|
+
},
|
|
17036
|
+
};
|
|
17037
|
+
}
|
|
17038
|
+
const assistant = await client.beta.assistants.update(assistantId, assistantUpdate);
|
|
17039
|
+
if (this.options.isVerbose) {
|
|
17040
|
+
console.log(`ā
Assistant updated: ${assistant.id}`);
|
|
17041
|
+
}
|
|
17042
|
+
return new OpenAiAssistantExecutionTools({
|
|
17043
|
+
...this.options,
|
|
17044
|
+
isCreatingNewAssistantsAllowed: false,
|
|
17045
|
+
assistantId: assistant.id,
|
|
17046
|
+
});
|
|
17047
|
+
}
|
|
16779
17048
|
/**
|
|
16780
17049
|
* Discriminant for type guards
|
|
16781
17050
|
*/
|
|
@@ -16917,27 +17186,58 @@
|
|
|
16917
17186
|
const chatPrompt = prompt;
|
|
16918
17187
|
let underlyingLlmResult;
|
|
16919
17188
|
if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
|
|
16920
|
-
|
|
16921
|
-
|
|
17189
|
+
const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
|
|
17190
|
+
const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
|
|
17191
|
+
let assistant;
|
|
17192
|
+
if (cached) {
|
|
17193
|
+
if (cached.requirementsHash === requirementsHash) {
|
|
17194
|
+
if (this.options.isVerbose) {
|
|
17195
|
+
console.log(`1ļøā£ Using cached OpenAI Assistant for agent ${this.title}...`);
|
|
17196
|
+
}
|
|
17197
|
+
assistant = this.options.llmTools.getAssistant(cached.assistantId);
|
|
17198
|
+
}
|
|
17199
|
+
else {
|
|
17200
|
+
if (this.options.isVerbose) {
|
|
17201
|
+
console.log(`1ļøā£ Updating OpenAI Assistant for agent ${this.title}...`);
|
|
17202
|
+
}
|
|
17203
|
+
assistant = await this.options.llmTools.updateAssistant({
|
|
17204
|
+
assistantId: cached.assistantId,
|
|
17205
|
+
name: this.title,
|
|
17206
|
+
instructions: modelRequirements.systemMessage,
|
|
17207
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
17208
|
+
});
|
|
17209
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
17210
|
+
assistantId: assistant.assistantId,
|
|
17211
|
+
requirementsHash,
|
|
17212
|
+
});
|
|
17213
|
+
}
|
|
16922
17214
|
}
|
|
16923
|
-
|
|
16924
|
-
|
|
16925
|
-
|
|
16926
|
-
instructions: modelRequirements.systemMessage,
|
|
16927
|
-
knowledgeSources: modelRequirements.knowledgeSources,
|
|
16928
|
-
/*
|
|
16929
|
-
!!!
|
|
16930
|
-
metadata: {
|
|
16931
|
-
agentModelName: this.modelName,
|
|
17215
|
+
else {
|
|
17216
|
+
if (this.options.isVerbose) {
|
|
17217
|
+
console.log(`1ļøā£ Creating new OpenAI Assistant for agent ${this.title}...`);
|
|
16932
17218
|
}
|
|
16933
|
-
|
|
16934
|
-
|
|
16935
|
-
|
|
17219
|
+
// <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
|
|
17220
|
+
assistant = await this.options.llmTools.createNewAssistant({
|
|
17221
|
+
name: this.title,
|
|
17222
|
+
instructions: modelRequirements.systemMessage,
|
|
17223
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
17224
|
+
/*
|
|
17225
|
+
!!!
|
|
17226
|
+
metadata: {
|
|
17227
|
+
agentModelName: this.modelName,
|
|
17228
|
+
}
|
|
17229
|
+
*/
|
|
17230
|
+
});
|
|
17231
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
17232
|
+
assistantId: assistant.assistantId,
|
|
17233
|
+
requirementsHash,
|
|
17234
|
+
});
|
|
17235
|
+
}
|
|
16936
17236
|
underlyingLlmResult = await assistant.callChatModel(chatPrompt);
|
|
16937
17237
|
}
|
|
16938
17238
|
else {
|
|
16939
17239
|
if (this.options.isVerbose) {
|
|
16940
|
-
console.log(`Creating Assistant ${this.title} on generic LLM execution tools...`);
|
|
17240
|
+
console.log(`2ļøā£ Creating Assistant ${this.title} on generic LLM execution tools...`);
|
|
16941
17241
|
}
|
|
16942
17242
|
// Create modified chat prompt with agent system message
|
|
16943
17243
|
const modifiedChatPrompt = {
|
|
@@ -16967,6 +17267,10 @@
|
|
|
16967
17267
|
return agentResult;
|
|
16968
17268
|
}
|
|
16969
17269
|
}
|
|
17270
|
+
/**
|
|
17271
|
+
* Cache of OpenAI assistants to avoid creating duplicates
|
|
17272
|
+
*/
|
|
17273
|
+
AgentLlmExecutionTools.assistantCache = new Map();
|
|
16970
17274
|
/**
|
|
16971
17275
|
* TODO: [š] Implement Destroyable pattern to free resources
|
|
16972
17276
|
* TODO: [š§ ] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
|
|
@@ -16984,6 +17288,18 @@
|
|
|
16984
17288
|
* @public exported from `@promptbook/core`
|
|
16985
17289
|
*/
|
|
16986
17290
|
class Agent extends AgentLlmExecutionTools {
|
|
17291
|
+
/**
|
|
17292
|
+
* Name of the agent
|
|
17293
|
+
*/
|
|
17294
|
+
get agentName() {
|
|
17295
|
+
return this._agentName || createDefaultAgentName(this.agentSource.value);
|
|
17296
|
+
}
|
|
17297
|
+
/**
|
|
17298
|
+
* Computed hash of the agent source for integrity verification
|
|
17299
|
+
*/
|
|
17300
|
+
get agentHash() {
|
|
17301
|
+
return computeAgentHash(this.agentSource.value);
|
|
17302
|
+
}
|
|
16987
17303
|
/**
|
|
16988
17304
|
* Not used in Agent, always returns empty array
|
|
16989
17305
|
*/
|
|
@@ -16999,10 +17315,7 @@
|
|
|
16999
17315
|
llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
|
|
17000
17316
|
agentSource: agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
17001
17317
|
});
|
|
17002
|
-
|
|
17003
|
-
* Name of the agent
|
|
17004
|
-
*/
|
|
17005
|
-
this.agentName = null;
|
|
17318
|
+
this._agentName = undefined;
|
|
17006
17319
|
/**
|
|
17007
17320
|
* Description of the agent
|
|
17008
17321
|
*/
|
|
@@ -17011,12 +17324,12 @@
|
|
|
17011
17324
|
* Metadata like image or color
|
|
17012
17325
|
*/
|
|
17013
17326
|
this.meta = {};
|
|
17014
|
-
// TODO:
|
|
17015
|
-
// TODO:
|
|
17327
|
+
// TODO: !!!!! Add `Agent` simple "mocked" learning by appending to agent source
|
|
17328
|
+
// TODO: !!!!! Add `Agent` learning by promptbookAgent
|
|
17016
17329
|
this.agentSource = agentSource;
|
|
17017
17330
|
this.agentSource.subscribe((source) => {
|
|
17018
17331
|
const { agentName, personaDescription, meta } = parseAgentSource(source);
|
|
17019
|
-
this.
|
|
17332
|
+
this._agentName = agentName;
|
|
17020
17333
|
this.personaDescription = personaDescription;
|
|
17021
17334
|
this.meta = { ...this.meta, ...meta };
|
|
17022
17335
|
});
|
|
@@ -17090,9 +17403,9 @@
|
|
|
17090
17403
|
/**
|
|
17091
17404
|
* Represents one AI Agent
|
|
17092
17405
|
*
|
|
17093
|
-
*
|
|
17406
|
+
* !!!!!! Note: [š¦] There are several different things in Promptbook:
|
|
17094
17407
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
17095
|
-
*
|
|
17408
|
+
* !!!!!! `RemoteAgent`
|
|
17096
17409
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
17097
17410
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
17098
17411
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
@@ -17107,7 +17420,7 @@
|
|
|
17107
17420
|
// <- TODO: !!!! Maybe use promptbookFetch
|
|
17108
17421
|
const agentSourceValue = (await bookResponse.text());
|
|
17109
17422
|
const agentSource = new rxjs.BehaviorSubject(agentSourceValue);
|
|
17110
|
-
// <- TODO:
|
|
17423
|
+
// <- TODO: !!!! Support updating and self-updating
|
|
17111
17424
|
return new RemoteAgent({
|
|
17112
17425
|
...options,
|
|
17113
17426
|
executionTools: {
|
|
@@ -17168,7 +17481,7 @@
|
|
|
17168
17481
|
reader.releaseLock();
|
|
17169
17482
|
}
|
|
17170
17483
|
}
|
|
17171
|
-
// <- TODO:
|
|
17484
|
+
// <- TODO: !!!! Transfer metadata
|
|
17172
17485
|
const agentResult = {
|
|
17173
17486
|
content,
|
|
17174
17487
|
modelName: this.modelName,
|
|
@@ -17177,7 +17490,7 @@
|
|
|
17177
17490
|
rawPromptContent: {},
|
|
17178
17491
|
rawRequest: {},
|
|
17179
17492
|
rawResponse: {},
|
|
17180
|
-
// <- TODO:
|
|
17493
|
+
// <- TODO: !!!! Transfer and proxy the metadata
|
|
17181
17494
|
};
|
|
17182
17495
|
return agentResult;
|
|
17183
17496
|
}
|
|
@@ -17308,24 +17621,6 @@
|
|
|
17308
17621
|
* Note: [š] Ignore a discrepancy between file name and entity name
|
|
17309
17622
|
*/
|
|
17310
17623
|
|
|
17311
|
-
/**
|
|
17312
|
-
* Detects if the code is running in jest environment
|
|
17313
|
-
*
|
|
17314
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
17315
|
-
*
|
|
17316
|
-
* @public exported from `@promptbook/utils`
|
|
17317
|
-
*/
|
|
17318
|
-
const $isRunningInJest = new Function(`
|
|
17319
|
-
try {
|
|
17320
|
-
return process.env.JEST_WORKER_ID !== undefined;
|
|
17321
|
-
} catch (e) {
|
|
17322
|
-
return false;
|
|
17323
|
-
}
|
|
17324
|
-
`);
|
|
17325
|
-
/**
|
|
17326
|
-
* TODO: [šŗ]
|
|
17327
|
-
*/
|
|
17328
|
-
|
|
17329
17624
|
/**
|
|
17330
17625
|
* Registration of LLM provider metadata
|
|
17331
17626
|
*
|
|
@@ -17678,61 +17973,6 @@
|
|
|
17678
17973
|
* TODO: [š§ ][š“] Where is the best location for this file
|
|
17679
17974
|
*/
|
|
17680
17975
|
|
|
17681
|
-
/**
|
|
17682
|
-
* Tag function for notating a prompt as template literal
|
|
17683
|
-
*
|
|
17684
|
-
* Note: There are 3 similar functions:
|
|
17685
|
-
* 1) `prompt` for notating single prompt exported from `@promptbook/utils`
|
|
17686
|
-
* 2) `promptTemplate` alias for `prompt`
|
|
17687
|
-
* 3) `book` for notating and validating entire books exported from `@promptbook/utils`
|
|
17688
|
-
*
|
|
17689
|
-
* @param strings
|
|
17690
|
-
* @param values
|
|
17691
|
-
* @returns the prompt string
|
|
17692
|
-
* @public exported from `@promptbook/utils`
|
|
17693
|
-
*/
|
|
17694
|
-
function prompt(strings, ...values) {
|
|
17695
|
-
if (values.length === 0) {
|
|
17696
|
-
return spaceTrim__default["default"](strings.join(''));
|
|
17697
|
-
}
|
|
17698
|
-
const stringsWithHiddenParameters = strings.map((stringsItem) =>
|
|
17699
|
-
// TODO: [0] DRY
|
|
17700
|
-
stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
|
|
17701
|
-
const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
|
|
17702
|
-
const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
|
|
17703
|
-
// Combine strings and values
|
|
17704
|
-
let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
|
|
17705
|
-
? `${result}${stringsItem}`
|
|
17706
|
-
: `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
|
|
17707
|
-
pipelineString = spaceTrim__default["default"](pipelineString);
|
|
17708
|
-
try {
|
|
17709
|
-
pipelineString = templateParameters(pipelineString, parameters);
|
|
17710
|
-
}
|
|
17711
|
-
catch (error) {
|
|
17712
|
-
if (!(error instanceof PipelineExecutionError)) {
|
|
17713
|
-
throw error;
|
|
17714
|
-
}
|
|
17715
|
-
console.error({ pipelineString, parameters, placeholderParameterNames, error });
|
|
17716
|
-
throw new UnexpectedError(spaceTrim__default["default"]((block) => `
|
|
17717
|
-
Internal error in prompt template literal
|
|
17718
|
-
|
|
17719
|
-
${block(JSON.stringify({ strings, values }, null, 4))}}
|
|
17720
|
-
|
|
17721
|
-
`));
|
|
17722
|
-
}
|
|
17723
|
-
// TODO: [0] DRY
|
|
17724
|
-
pipelineString = pipelineString
|
|
17725
|
-
.split(`${REPLACING_NONCE}beginbracket`)
|
|
17726
|
-
.join('{')
|
|
17727
|
-
.split(`${REPLACING_NONCE}endbracket`)
|
|
17728
|
-
.join('}');
|
|
17729
|
-
return pipelineString;
|
|
17730
|
-
}
|
|
17731
|
-
/**
|
|
17732
|
-
* TODO: [š§ ][š“] Where is the best location for this file
|
|
17733
|
-
* Note: [š] Ignore a discrepancy between file name and entity name
|
|
17734
|
-
*/
|
|
17735
|
-
|
|
17736
17976
|
/**
|
|
17737
17977
|
* Tag function for notating a pipeline with a book\`...\ notation as template literal
|
|
17738
17978
|
*
|
|
@@ -18268,7 +18508,7 @@
|
|
|
18268
18508
|
});
|
|
18269
18509
|
|
|
18270
18510
|
const answer = response.choices[0].message.content;
|
|
18271
|
-
console.log('\\nš§ ${agentName}:', answer, '\\n');
|
|
18511
|
+
console.log('\\nš§ ${agentName /* <- TODO: [š] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
|
|
18272
18512
|
|
|
18273
18513
|
chatHistory.push({ role: 'assistant', content: answer });
|
|
18274
18514
|
promptUser();
|
|
@@ -18287,7 +18527,7 @@
|
|
|
18287
18527
|
|
|
18288
18528
|
(async () => {
|
|
18289
18529
|
await setupKnowledge();
|
|
18290
|
-
console.log("š¤ Chat with ${agentName} (type 'exit' to quit)\\n");
|
|
18530
|
+
console.log("š¤ Chat with ${agentName /* <- TODO: [š] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
|
|
18291
18531
|
promptUser();
|
|
18292
18532
|
})();
|
|
18293
18533
|
`);
|
|
@@ -18334,7 +18574,7 @@
|
|
|
18334
18574
|
});
|
|
18335
18575
|
|
|
18336
18576
|
const answer = response.choices[0].message.content;
|
|
18337
|
-
console.log('\\nš§ ${agentName}:', answer, '\\n');
|
|
18577
|
+
console.log('\\nš§ ${agentName /* <- TODO: [š] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
|
|
18338
18578
|
|
|
18339
18579
|
chatHistory.push({ role: 'assistant', content: answer });
|
|
18340
18580
|
promptUser();
|
|
@@ -18351,7 +18591,7 @@
|
|
|
18351
18591
|
});
|
|
18352
18592
|
}
|
|
18353
18593
|
|
|
18354
|
-
console.log("š¤ Chat with ${agentName} (type 'exit' to quit)\\n");
|
|
18594
|
+
console.log("š¤ Chat with ${agentName /* <- TODO: [š] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
|
|
18355
18595
|
promptUser();
|
|
18356
18596
|
|
|
18357
18597
|
`);
|
|
@@ -18359,25 +18599,6 @@
|
|
|
18359
18599
|
},
|
|
18360
18600
|
};
|
|
18361
18601
|
|
|
18362
|
-
/**
|
|
18363
|
-
* Returns information about the current runtime environment
|
|
18364
|
-
*
|
|
18365
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
|
|
18366
|
-
*
|
|
18367
|
-
* @public exported from `@promptbook/utils`
|
|
18368
|
-
*/
|
|
18369
|
-
function $detectRuntimeEnvironment() {
|
|
18370
|
-
return {
|
|
18371
|
-
isRunningInBrowser: $isRunningInBrowser(),
|
|
18372
|
-
isRunningInJest: $isRunningInJest(),
|
|
18373
|
-
isRunningInNode: $isRunningInNode(),
|
|
18374
|
-
isRunningInWebWorker: $isRunningInWebWorker(),
|
|
18375
|
-
};
|
|
18376
|
-
}
|
|
18377
|
-
/**
|
|
18378
|
-
* TODO: [šŗ] Also detect and report node version here
|
|
18379
|
-
*/
|
|
18380
|
-
|
|
18381
18602
|
/**
|
|
18382
18603
|
* Provide information about Promptbook, engine version, book language version, servers, ...
|
|
18383
18604
|
*
|
|
@@ -18556,7 +18777,7 @@
|
|
|
18556
18777
|
const agentSource = validateBook(spaceTrim__default["default"]((block) => `
|
|
18557
18778
|
${agentName}
|
|
18558
18779
|
|
|
18559
|
-
META COLOR ${color || '#3498db' /* <- TODO: !!!! Best default color */}
|
|
18780
|
+
META COLOR ${color || '#3498db' /* <- TODO: [š§ ] !!!! Best default color */}
|
|
18560
18781
|
PERSONA ${block(personaDescription)}
|
|
18561
18782
|
`));
|
|
18562
18783
|
return agentSource;
|
|
@@ -18699,12 +18920,14 @@
|
|
|
18699
18920
|
exports.book = book;
|
|
18700
18921
|
exports.cacheLlmTools = cacheLlmTools;
|
|
18701
18922
|
exports.compilePipeline = compilePipeline;
|
|
18923
|
+
exports.computeAgentHash = computeAgentHash;
|
|
18702
18924
|
exports.computeCosineSimilarity = computeCosineSimilarity;
|
|
18703
18925
|
exports.countUsage = countUsage;
|
|
18704
18926
|
exports.createAgentLlmExecutionTools = createAgentLlmExecutionTools;
|
|
18705
18927
|
exports.createAgentModelRequirements = createAgentModelRequirements;
|
|
18706
18928
|
exports.createAgentModelRequirementsWithCommitments = createAgentModelRequirementsWithCommitments;
|
|
18707
18929
|
exports.createBasicAgentModelRequirements = createBasicAgentModelRequirements;
|
|
18930
|
+
exports.createDefaultAgentName = createDefaultAgentName;
|
|
18708
18931
|
exports.createEmptyAgentModelRequirements = createEmptyAgentModelRequirements;
|
|
18709
18932
|
exports.createLlmToolsFromConfiguration = createLlmToolsFromConfiguration;
|
|
18710
18933
|
exports.createPipelineCollectionFromJson = createPipelineCollectionFromJson;
|
|
@@ -18734,6 +18957,7 @@
|
|
|
18734
18957
|
exports.limitTotalUsage = limitTotalUsage;
|
|
18735
18958
|
exports.makeKnowledgeSourceHandler = makeKnowledgeSourceHandler;
|
|
18736
18959
|
exports.migratePipeline = migratePipeline;
|
|
18960
|
+
exports.normalizeAgentName = normalizeAgentName;
|
|
18737
18961
|
exports.padBook = padBook;
|
|
18738
18962
|
exports.parseAgentSource = parseAgentSource;
|
|
18739
18963
|
exports.parseParameters = parseParameters;
|