@promptbook/core 0.103.0-46 → 0.103.0-48
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1043 -779
- package/esm/index.es.js.map +1 -1
- package/esm/typings/servers.d.ts +1 -7
- package/esm/typings/src/_packages/components.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +22 -14
- package/esm/typings/src/_packages/types.index.d.ts +14 -6
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +7 -3
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +6 -1
- package/esm/typings/src/book-2.0/agent-source/AgentSourceParseResult.d.ts +3 -2
- package/esm/typings/src/book-2.0/agent-source/computeAgentHash.d.ts +8 -0
- package/esm/typings/src/book-2.0/agent-source/computeAgentHash.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createCommitmentRegex.d.ts +1 -1
- package/esm/typings/src/book-2.0/agent-source/createDefaultAgentName.d.ts +8 -0
- package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.d.ts +9 -0
- package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.d.ts +1 -1
- package/esm/typings/src/book-components/Chat/AgentChat/AgentChat.d.ts +14 -0
- package/esm/typings/src/book-components/Chat/AgentChat/AgentChat.test.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/AgentChat/AgentChatProps.d.ts +13 -0
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +1 -60
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +57 -32
- package/esm/typings/src/{book-2.0/commitments → commitments}/ACTION/ACTION.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/DELETE/DELETE.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/FORMAT/FORMAT.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/GOAL/GOAL.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/KNOWLEDGE/KNOWLEDGE.d.ts +1 -5
- package/esm/typings/src/{book-2.0/commitments → commitments}/MEMORY/MEMORY.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/MESSAGE/MESSAGE.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/META/META.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/META_IMAGE/META_IMAGE.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/META_LINK/META_LINK.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/MODEL/MODEL.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/NOTE/NOTE.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/PERSONA/PERSONA.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/RULE/RULE.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/SAMPLE/SAMPLE.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/SCENARIO/SCENARIO.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/STYLE/STYLE.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/_base/BaseCommitmentDefinition.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/_base/CommitmentDefinition.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/_base/NotYetImplementedCommitmentDefinition.d.ts +1 -1
- package/esm/typings/src/{book-2.0/commitments → commitments}/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/utils/assertUniqueModels.d.ts +12 -0
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +10 -9
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
- package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +32 -0
- package/esm/typings/src/llm-providers/agent/RemoteAgentOptions.d.ts +11 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +29 -4
- package/esm/typings/src/llm-providers/openai/openai-models.test.d.ts +4 -0
- package/esm/typings/src/remote-server/startAgentServer.d.ts +1 -1
- package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
- package/esm/typings/src/storage/_common/PromptbookStorage.d.ts +1 -0
- package/esm/typings/src/transpilers/openai-sdk/register.d.ts +1 -1
- package/esm/typings/src/types/typeAliases.d.ts +12 -0
- package/esm/typings/src/utils/color/internal-utils/checkChannelValue.d.ts +0 -3
- package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +2 -0
- package/esm/typings/src/utils/normalization/normalizeTo_PascalCase.d.ts +3 -0
- package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +2 -0
- package/esm/typings/src/utils/normalization/titleToName.d.ts +2 -0
- package/esm/typings/src/utils/random/$generateBookBoilerplate.d.ts +2 -2
- package/esm/typings/src/utils/random/$randomFullnameWithColor.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +1051 -783
- package/umd/index.umd.js.map +1 -1
- /package/esm/typings/src/{book-2.0/commitments → commitments}/_base/BookCommitment.d.ts +0 -0
- /package/esm/typings/src/{book-2.0/commitments → commitments}/_base/ParsedCommitment.d.ts +0 -0
- /package/esm/typings/src/{book-2.0/commitments → commitments}/index.d.ts +0 -0
package/esm/index.es.js
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
+
import { SHA256 } from 'crypto-js';
|
|
2
|
+
import hexEncoder from 'crypto-js/enc-hex';
|
|
1
3
|
import spaceTrim$1, { spaceTrim as spaceTrim$2 } from 'spacetrim';
|
|
2
4
|
import { randomBytes } from 'crypto';
|
|
3
5
|
import { Subject, BehaviorSubject } from 'rxjs';
|
|
4
|
-
import { forTime
|
|
5
|
-
import hexEncoder from 'crypto-js/enc-hex';
|
|
6
|
+
import { forTime } from 'waitasecond';
|
|
6
7
|
import sha256 from 'crypto-js/sha256';
|
|
7
8
|
import { basename, join, dirname, isAbsolute } from 'path';
|
|
8
|
-
import { SHA256 } from 'crypto-js';
|
|
9
9
|
import { lookup, extension } from 'mime-types';
|
|
10
10
|
import { parse, unparse } from 'papaparse';
|
|
11
11
|
import moment from 'moment';
|
|
@@ -27,12 +27,21 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
27
27
|
* @generated
|
|
28
28
|
* @see https://github.com/webgptorg/promptbook
|
|
29
29
|
*/
|
|
30
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-
|
|
30
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-48';
|
|
31
31
|
/**
|
|
32
32
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
33
33
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
34
34
|
*/
|
|
35
35
|
|
|
36
|
+
/**
|
|
37
|
+
* Computes SHA-256 hash of the agent source
|
|
38
|
+
*
|
|
39
|
+
* @public exported from `@promptbook/core`
|
|
40
|
+
*/
|
|
41
|
+
function computeAgentHash(agentSource) {
|
|
42
|
+
return SHA256(hexEncoder.parse(agentSource /* <- TODO: !!!!! spaceTrim */)).toString( /* hex */);
|
|
43
|
+
}
|
|
44
|
+
|
|
36
45
|
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
37
46
|
|
|
38
47
|
/**
|
|
@@ -159,15 +168,20 @@ class ParseError extends Error {
|
|
|
159
168
|
*/
|
|
160
169
|
const REMOTE_SERVER_URLS = [
|
|
161
170
|
{
|
|
162
|
-
title: 'Promptbook',
|
|
163
|
-
description: `
|
|
171
|
+
title: 'Promptbook.Studio',
|
|
172
|
+
description: `Server of Promptbook.studio`,
|
|
164
173
|
owner: 'AI Web, LLC <legal@ptbk.io> (https://www.ptbk.io/)',
|
|
165
|
-
isAnonymousModeAllowed: true,
|
|
166
174
|
urls: [
|
|
167
175
|
'https://promptbook.s5.ptbk.io/',
|
|
168
176
|
// Note: Servers 1-4 are not running
|
|
169
177
|
],
|
|
170
178
|
},
|
|
179
|
+
{
|
|
180
|
+
title: 'Testing Agents',
|
|
181
|
+
description: `Testing Agents server on Vercel`,
|
|
182
|
+
owner: 'AI Web, LLC <legal@ptbk.io> (https://www.ptbk.io/)',
|
|
183
|
+
urls: ['https://s6.ptbk.io/'],
|
|
184
|
+
},
|
|
171
185
|
/*
|
|
172
186
|
Note: Working on older version of Promptbook and not supported anymore
|
|
173
187
|
{
|
|
@@ -412,9 +426,6 @@ function checkChannelValue(channelName, value) {
|
|
|
412
426
|
throw new Error(`${channelName} channel is greater than 255, it is ${value}`);
|
|
413
427
|
}
|
|
414
428
|
}
|
|
415
|
-
/**
|
|
416
|
-
* TODO: [🧠][🚓] Is/which combination it better to use asserts/check, validate or is utility function?
|
|
417
|
-
*/
|
|
418
429
|
|
|
419
430
|
/**
|
|
420
431
|
* Color object represents an RGB color with alpha channel
|
|
@@ -4323,6 +4334,8 @@ function removeDiacritics(input) {
|
|
|
4323
4334
|
/**
|
|
4324
4335
|
* Converts a given text to kebab-case format.
|
|
4325
4336
|
*
|
|
4337
|
+
* Note: [🔂] This function is idempotent.
|
|
4338
|
+
*
|
|
4326
4339
|
* @param text The text to be converted.
|
|
4327
4340
|
* @returns The kebab-case formatted string.
|
|
4328
4341
|
* @example 'hello-world'
|
|
@@ -4478,6 +4491,8 @@ function removeEmojis(text) {
|
|
|
4478
4491
|
/**
|
|
4479
4492
|
* Converts a title string into a normalized name.
|
|
4480
4493
|
*
|
|
4494
|
+
* Note: [🔂] This function is idempotent.
|
|
4495
|
+
*
|
|
4481
4496
|
* @param value The title string to be converted to a name.
|
|
4482
4497
|
* @returns A normalized name derived from the input title.
|
|
4483
4498
|
* @example 'Hello World!' -> 'hello-world'
|
|
@@ -7390,40 +7405,6 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
7390
7405
|
* TODO: [🏢] Check validity of `temperature` in pipeline
|
|
7391
7406
|
*/
|
|
7392
7407
|
|
|
7393
|
-
/**
|
|
7394
|
-
* Creates an empty/basic agent model requirements object
|
|
7395
|
-
* This serves as the starting point for the reduce-like pattern
|
|
7396
|
-
* where each commitment applies its changes to build the final requirements
|
|
7397
|
-
*
|
|
7398
|
-
* @public exported from `@promptbook/core`
|
|
7399
|
-
*/
|
|
7400
|
-
function createEmptyAgentModelRequirements() {
|
|
7401
|
-
return {
|
|
7402
|
-
systemMessage: '',
|
|
7403
|
-
// modelName: 'gpt-5',
|
|
7404
|
-
modelName: 'gemini-2.5-flash-lite',
|
|
7405
|
-
temperature: 0.7,
|
|
7406
|
-
topP: 0.9,
|
|
7407
|
-
topK: 50,
|
|
7408
|
-
};
|
|
7409
|
-
}
|
|
7410
|
-
/**
|
|
7411
|
-
* Creates a basic agent model requirements with just the agent name
|
|
7412
|
-
* This is used when we have an agent name but no commitments
|
|
7413
|
-
*
|
|
7414
|
-
* @public exported from `@promptbook/core`
|
|
7415
|
-
*/
|
|
7416
|
-
function createBasicAgentModelRequirements(agentName) {
|
|
7417
|
-
const empty = createEmptyAgentModelRequirements();
|
|
7418
|
-
return {
|
|
7419
|
-
...empty,
|
|
7420
|
-
systemMessage: `You are ${agentName || 'AI Agent'}`,
|
|
7421
|
-
};
|
|
7422
|
-
}
|
|
7423
|
-
/**
|
|
7424
|
-
* TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
|
|
7425
|
-
*/
|
|
7426
|
-
|
|
7427
7408
|
/**
|
|
7428
7409
|
* Generates a regex pattern to match a specific commitment
|
|
7429
7410
|
*
|
|
@@ -7957,23 +7938,19 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
7957
7938
|
`);
|
|
7958
7939
|
}
|
|
7959
7940
|
applyToAgentModelRequirements(requirements, content) {
|
|
7960
|
-
var _a;
|
|
7961
7941
|
const trimmedContent = content.trim();
|
|
7962
7942
|
if (!trimmedContent) {
|
|
7963
7943
|
return requirements;
|
|
7964
7944
|
}
|
|
7965
7945
|
// Check if content is a URL (external knowledge source)
|
|
7966
|
-
if (
|
|
7946
|
+
if (isValidUrl(trimmedContent)) {
|
|
7967
7947
|
// Store the URL for later async processing
|
|
7968
7948
|
const updatedRequirements = {
|
|
7969
7949
|
...requirements,
|
|
7970
|
-
|
|
7971
|
-
...requirements.
|
|
7972
|
-
|
|
7973
|
-
|
|
7974
|
-
trimmedContent,
|
|
7975
|
-
],
|
|
7976
|
-
},
|
|
7950
|
+
knowledgeSources: [
|
|
7951
|
+
...(requirements.knowledgeSources || []),
|
|
7952
|
+
trimmedContent,
|
|
7953
|
+
],
|
|
7977
7954
|
};
|
|
7978
7955
|
// Add placeholder information about knowledge sources to system message
|
|
7979
7956
|
const knowledgeInfo = `Knowledge Source URL: ${trimmedContent} (will be processed for retrieval during chat)`;
|
|
@@ -7985,18 +7962,6 @@ class KnowledgeCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
7985
7962
|
return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
|
|
7986
7963
|
}
|
|
7987
7964
|
}
|
|
7988
|
-
/**
|
|
7989
|
-
* Check if content is a URL
|
|
7990
|
-
*/
|
|
7991
|
-
isUrl(content) {
|
|
7992
|
-
try {
|
|
7993
|
-
new URL(content);
|
|
7994
|
-
return true;
|
|
7995
|
-
}
|
|
7996
|
-
catch (_a) {
|
|
7997
|
-
return false;
|
|
7998
|
-
}
|
|
7999
|
-
}
|
|
8000
7965
|
}
|
|
8001
7966
|
/**
|
|
8002
7967
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -8807,6 +8772,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
|
|
|
8807
8772
|
// Keep everything after the PERSONA section
|
|
8808
8773
|
cleanedMessage = lines.slice(personaEndIndex).join('\n').trim();
|
|
8809
8774
|
}
|
|
8775
|
+
// TODO: [🕛] There should be `agentFullname` not `agentName`
|
|
8810
8776
|
// Create new system message with persona at the beginning
|
|
8811
8777
|
// Format: "You are {agentName}\n{personaContent}"
|
|
8812
8778
|
// The # PERSONA comment will be removed later by removeCommentsFromSystemMessage
|
|
@@ -9322,6 +9288,40 @@ function isCommitmentSupported(type) {
|
|
|
9322
9288
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
9323
9289
|
*/
|
|
9324
9290
|
|
|
9291
|
+
/**
|
|
9292
|
+
* Creates an empty/basic agent model requirements object
|
|
9293
|
+
* This serves as the starting point for the reduce-like pattern
|
|
9294
|
+
* where each commitment applies its changes to build the final requirements
|
|
9295
|
+
*
|
|
9296
|
+
* @public exported from `@promptbook/core`
|
|
9297
|
+
*/
|
|
9298
|
+
function createEmptyAgentModelRequirements() {
|
|
9299
|
+
return {
|
|
9300
|
+
systemMessage: '',
|
|
9301
|
+
// modelName: 'gpt-5',
|
|
9302
|
+
modelName: 'gemini-2.5-flash-lite',
|
|
9303
|
+
temperature: 0.7,
|
|
9304
|
+
topP: 0.9,
|
|
9305
|
+
topK: 50,
|
|
9306
|
+
};
|
|
9307
|
+
}
|
|
9308
|
+
/**
|
|
9309
|
+
* Creates a basic agent model requirements with just the agent name
|
|
9310
|
+
* This is used when we have an agent name but no commitments
|
|
9311
|
+
*
|
|
9312
|
+
* @public exported from `@promptbook/core`
|
|
9313
|
+
*/
|
|
9314
|
+
function createBasicAgentModelRequirements(agentName) {
|
|
9315
|
+
const empty = createEmptyAgentModelRequirements();
|
|
9316
|
+
return {
|
|
9317
|
+
...empty,
|
|
9318
|
+
systemMessage: `You are ${agentName || 'AI Agent'}`,
|
|
9319
|
+
};
|
|
9320
|
+
}
|
|
9321
|
+
/**
|
|
9322
|
+
* TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
|
|
9323
|
+
*/
|
|
9324
|
+
|
|
9325
9325
|
/**
|
|
9326
9326
|
* Parses agent source using the new commitment system with multiline support
|
|
9327
9327
|
* This function replaces the hardcoded commitment parsing in the original parseAgentSource
|
|
@@ -9412,29 +9412,6 @@ function parseAgentSourceWithCommitments(agentSource) {
|
|
|
9412
9412
|
};
|
|
9413
9413
|
}
|
|
9414
9414
|
|
|
9415
|
-
/**
|
|
9416
|
-
* Removes comment lines (lines starting with #) from a system message
|
|
9417
|
-
* This is used to clean up the final system message before sending it to the AI model
|
|
9418
|
-
* while preserving the original content with comments in metadata
|
|
9419
|
-
*
|
|
9420
|
-
* @param systemMessage The system message that may contain comment lines
|
|
9421
|
-
* @returns The system message with comment lines removed
|
|
9422
|
-
*
|
|
9423
|
-
* @private - TODO: [🧠] Maybe should be public?
|
|
9424
|
-
*/
|
|
9425
|
-
function removeCommentsFromSystemMessage(systemMessage) {
|
|
9426
|
-
if (!systemMessage) {
|
|
9427
|
-
return systemMessage;
|
|
9428
|
-
}
|
|
9429
|
-
const lines = systemMessage.split('\n');
|
|
9430
|
-
const filteredLines = lines.filter((line) => {
|
|
9431
|
-
const trimmedLine = line.trim();
|
|
9432
|
-
// Remove lines that start with # (comments)
|
|
9433
|
-
return !trimmedLine.startsWith('#');
|
|
9434
|
-
});
|
|
9435
|
-
return filteredLines.join('\n').trim();
|
|
9436
|
-
}
|
|
9437
|
-
|
|
9438
9415
|
/**
|
|
9439
9416
|
* Parses parameters from text using both supported notations:
|
|
9440
9417
|
* 1. @Parameter - single word parameter starting with @
|
|
@@ -9493,6 +9470,29 @@ function parseParameters(text) {
|
|
|
9493
9470
|
return uniqueParameters;
|
|
9494
9471
|
}
|
|
9495
9472
|
|
|
9473
|
+
/**
|
|
9474
|
+
* Removes comment lines (lines starting with #) from a system message
|
|
9475
|
+
* This is used to clean up the final system message before sending it to the AI model
|
|
9476
|
+
* while preserving the original content with comments in metadata
|
|
9477
|
+
*
|
|
9478
|
+
* @param systemMessage The system message that may contain comment lines
|
|
9479
|
+
* @returns The system message with comment lines removed
|
|
9480
|
+
*
|
|
9481
|
+
* @private - TODO: [🧠] Maybe should be public?
|
|
9482
|
+
*/
|
|
9483
|
+
function removeCommentsFromSystemMessage(systemMessage) {
|
|
9484
|
+
if (!systemMessage) {
|
|
9485
|
+
return systemMessage;
|
|
9486
|
+
}
|
|
9487
|
+
const lines = systemMessage.split('\n');
|
|
9488
|
+
const filteredLines = lines.filter((line) => {
|
|
9489
|
+
const trimmedLine = line.trim();
|
|
9490
|
+
// Remove lines that start with # (comments)
|
|
9491
|
+
return !trimmedLine.startsWith('#');
|
|
9492
|
+
});
|
|
9493
|
+
return filteredLines.join('\n').trim();
|
|
9494
|
+
}
|
|
9495
|
+
|
|
9496
9496
|
/**
|
|
9497
9497
|
* Creates agent model requirements using the new commitment system
|
|
9498
9498
|
* This function uses a reduce-like pattern where each commitment applies its changes
|
|
@@ -9594,6 +9594,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
|
|
|
9594
9594
|
/**
|
|
9595
9595
|
* Normalizes a given text to camelCase format.
|
|
9596
9596
|
*
|
|
9597
|
+
* Note: [🔂] This function is idempotent.
|
|
9598
|
+
*
|
|
9597
9599
|
* @param text The text to be normalized.
|
|
9598
9600
|
* @param _isFirstLetterCapital Whether the first letter should be capitalized.
|
|
9599
9601
|
* @returns The camelCase formatted string.
|
|
@@ -9682,66 +9684,477 @@ function generatePlaceholderAgentProfileImageUrl(agentName) {
|
|
|
9682
9684
|
*/
|
|
9683
9685
|
|
|
9684
9686
|
/**
|
|
9685
|
-
*
|
|
9687
|
+
* Creates a Mermaid graph based on the promptbook
|
|
9686
9688
|
*
|
|
9687
|
-
*
|
|
9688
|
-
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
9689
|
-
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
|
|
9689
|
+
* Note: The result is not wrapped in a Markdown code block
|
|
9690
9690
|
*
|
|
9691
|
-
* @public exported from `@promptbook/
|
|
9691
|
+
* @public exported from `@promptbook/utils`
|
|
9692
9692
|
*/
|
|
9693
|
-
function
|
|
9694
|
-
const
|
|
9695
|
-
|
|
9696
|
-
|
|
9697
|
-
|
|
9698
|
-
|
|
9699
|
-
|
|
9693
|
+
function renderPromptbookMermaid(pipelineJson, options) {
|
|
9694
|
+
const { linkTask = () => null } = options || {};
|
|
9695
|
+
const MERMAID_PREFIX = 'pipeline_';
|
|
9696
|
+
const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
|
|
9697
|
+
const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
|
|
9698
|
+
const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
|
|
9699
|
+
const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
|
|
9700
|
+
const parameterNameToTaskName = (parameterName) => {
|
|
9701
|
+
if (parameterName === 'knowledge') {
|
|
9702
|
+
return MERMAID_KNOWLEDGE_NAME;
|
|
9700
9703
|
}
|
|
9701
|
-
if (
|
|
9702
|
-
|
|
9704
|
+
else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
|
|
9705
|
+
return MERMAID_RESERVED_NAME;
|
|
9703
9706
|
}
|
|
9704
|
-
|
|
9705
|
-
|
|
9707
|
+
const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
|
|
9708
|
+
if (!parameter) {
|
|
9709
|
+
throw new UnexpectedError(`Could not find {${parameterName}}`);
|
|
9710
|
+
// <- TODO: This causes problems when {knowledge} and other reserved parameters are used
|
|
9706
9711
|
}
|
|
9707
|
-
|
|
9708
|
-
|
|
9709
|
-
const meta = {};
|
|
9710
|
-
for (const commitment of parseResult.commitments) {
|
|
9711
|
-
if (commitment.type !== 'META') {
|
|
9712
|
-
continue;
|
|
9712
|
+
if (parameter.isInput) {
|
|
9713
|
+
return MERMAID_INPUT_NAME;
|
|
9713
9714
|
}
|
|
9714
|
-
|
|
9715
|
-
|
|
9716
|
-
|
|
9717
|
-
|
|
9718
|
-
|
|
9719
|
-
// Generate gravatar fallback if no meta image specified
|
|
9720
|
-
if (!meta.image) {
|
|
9721
|
-
meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
|
|
9722
|
-
}
|
|
9723
|
-
// Parse parameters using unified approach - both @Parameter and {parameter} notations
|
|
9724
|
-
// are treated as the same syntax feature with unified representation
|
|
9725
|
-
const parameters = parseParameters(agentSource);
|
|
9726
|
-
return {
|
|
9727
|
-
agentName: parseResult.agentName,
|
|
9728
|
-
personaDescription,
|
|
9729
|
-
meta,
|
|
9730
|
-
parameters,
|
|
9715
|
+
const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
|
|
9716
|
+
if (!task) {
|
|
9717
|
+
throw new Error(`Could not find task for {${parameterName}}`);
|
|
9718
|
+
}
|
|
9719
|
+
return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
|
|
9731
9720
|
};
|
|
9732
|
-
|
|
9733
|
-
|
|
9734
|
-
|
|
9735
|
-
|
|
9721
|
+
const inputAndIntermediateParametersMermaid = pipelineJson.tasks
|
|
9722
|
+
.flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
|
|
9723
|
+
`${parameterNameToTaskName(resultingParameterName)}("${title}")`,
|
|
9724
|
+
...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
|
|
9725
|
+
])
|
|
9726
|
+
.join('\n');
|
|
9727
|
+
const outputParametersMermaid = pipelineJson.parameters
|
|
9728
|
+
.filter(({ isOutput }) => isOutput)
|
|
9729
|
+
.map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
|
|
9730
|
+
.join('\n');
|
|
9731
|
+
const linksMermaid = pipelineJson.tasks
|
|
9732
|
+
.map((task) => {
|
|
9733
|
+
const link = linkTask(task);
|
|
9734
|
+
if (link === null) {
|
|
9735
|
+
return '';
|
|
9736
|
+
}
|
|
9737
|
+
const { href, title } = link;
|
|
9738
|
+
const taskName = parameterNameToTaskName(task.resultingParameterName);
|
|
9739
|
+
return `click ${taskName} href "${href}" "${title}";`;
|
|
9740
|
+
})
|
|
9741
|
+
.filter((line) => line !== '')
|
|
9742
|
+
.join('\n');
|
|
9743
|
+
const interactionPointsMermaid = Object.entries({
|
|
9744
|
+
[MERMAID_INPUT_NAME]: 'Input',
|
|
9745
|
+
[MERMAID_OUTPUT_NAME]: 'Output',
|
|
9746
|
+
[MERMAID_RESERVED_NAME]: 'Other',
|
|
9747
|
+
[MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
|
|
9748
|
+
})
|
|
9749
|
+
.filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
|
|
9750
|
+
.map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
|
|
9751
|
+
.join('\n');
|
|
9752
|
+
const promptbookMermaid = spaceTrim$2((block) => `
|
|
9736
9753
|
|
|
9737
|
-
|
|
9738
|
-
|
|
9739
|
-
|
|
9740
|
-
|
|
9741
|
-
|
|
9742
|
-
|
|
9743
|
-
|
|
9744
|
-
|
|
9754
|
+
%% 🔮 Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
|
|
9755
|
+
|
|
9756
|
+
flowchart LR
|
|
9757
|
+
subgraph "${pipelineJson.title}"
|
|
9758
|
+
|
|
9759
|
+
%% Basic configuration
|
|
9760
|
+
direction TB
|
|
9761
|
+
|
|
9762
|
+
%% Interaction points from pipeline to outside
|
|
9763
|
+
${block(interactionPointsMermaid)}
|
|
9764
|
+
|
|
9765
|
+
%% Input and intermediate parameters
|
|
9766
|
+
${block(inputAndIntermediateParametersMermaid)}
|
|
9767
|
+
|
|
9768
|
+
|
|
9769
|
+
%% Output parameters
|
|
9770
|
+
${block(outputParametersMermaid)}
|
|
9771
|
+
|
|
9772
|
+
%% Links
|
|
9773
|
+
${block(linksMermaid)}
|
|
9774
|
+
|
|
9775
|
+
%% Styles
|
|
9776
|
+
classDef ${MERMAID_INPUT_NAME} color: grey;
|
|
9777
|
+
classDef ${MERMAID_OUTPUT_NAME} color: grey;
|
|
9778
|
+
classDef ${MERMAID_RESERVED_NAME} color: grey;
|
|
9779
|
+
classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
|
|
9780
|
+
|
|
9781
|
+
end;
|
|
9782
|
+
|
|
9783
|
+
`);
|
|
9784
|
+
return promptbookMermaid;
|
|
9785
|
+
}
|
|
9786
|
+
/**
|
|
9787
|
+
* TODO: [🧠] FOREACH in mermaid graph
|
|
9788
|
+
* TODO: [🧠] Knowledge in mermaid graph
|
|
9789
|
+
* TODO: [🧠] Personas in mermaid graph
|
|
9790
|
+
* TODO: Maybe use some Mermaid package instead of string templating
|
|
9791
|
+
* TODO: [🕌] When more than 2 functionalities, split into separate functions
|
|
9792
|
+
*/
|
|
9793
|
+
|
|
9794
|
+
/**
|
|
9795
|
+
* Tag function for notating a prompt as template literal
|
|
9796
|
+
*
|
|
9797
|
+
* Note: There are 3 similar functions:
|
|
9798
|
+
* 1) `prompt` for notating single prompt exported from `@promptbook/utils`
|
|
9799
|
+
* 2) `promptTemplate` alias for `prompt`
|
|
9800
|
+
* 3) `book` for notating and validating entire books exported from `@promptbook/utils`
|
|
9801
|
+
*
|
|
9802
|
+
* @param strings
|
|
9803
|
+
* @param values
|
|
9804
|
+
* @returns the prompt string
|
|
9805
|
+
* @public exported from `@promptbook/utils`
|
|
9806
|
+
*/
|
|
9807
|
+
function prompt(strings, ...values) {
|
|
9808
|
+
if (values.length === 0) {
|
|
9809
|
+
return spaceTrim$1(strings.join(''));
|
|
9810
|
+
}
|
|
9811
|
+
const stringsWithHiddenParameters = strings.map((stringsItem) =>
|
|
9812
|
+
// TODO: [0] DRY
|
|
9813
|
+
stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
|
|
9814
|
+
const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
|
|
9815
|
+
const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
|
|
9816
|
+
// Combine strings and values
|
|
9817
|
+
let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
|
|
9818
|
+
? `${result}${stringsItem}`
|
|
9819
|
+
: `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
|
|
9820
|
+
pipelineString = spaceTrim$1(pipelineString);
|
|
9821
|
+
try {
|
|
9822
|
+
pipelineString = templateParameters(pipelineString, parameters);
|
|
9823
|
+
}
|
|
9824
|
+
catch (error) {
|
|
9825
|
+
if (!(error instanceof PipelineExecutionError)) {
|
|
9826
|
+
throw error;
|
|
9827
|
+
}
|
|
9828
|
+
console.error({ pipelineString, parameters, placeholderParameterNames, error });
|
|
9829
|
+
throw new UnexpectedError(spaceTrim$1((block) => `
|
|
9830
|
+
Internal error in prompt template literal
|
|
9831
|
+
|
|
9832
|
+
${block(JSON.stringify({ strings, values }, null, 4))}}
|
|
9833
|
+
|
|
9834
|
+
`));
|
|
9835
|
+
}
|
|
9836
|
+
// TODO: [0] DRY
|
|
9837
|
+
pipelineString = pipelineString
|
|
9838
|
+
.split(`${REPLACING_NONCE}beginbracket`)
|
|
9839
|
+
.join('{')
|
|
9840
|
+
.split(`${REPLACING_NONCE}endbracket`)
|
|
9841
|
+
.join('}');
|
|
9842
|
+
return pipelineString;
|
|
9843
|
+
}
|
|
9844
|
+
/**
|
|
9845
|
+
* TODO: [🧠][🈴] Where is the best location for this file
|
|
9846
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
9847
|
+
*/
|
|
9848
|
+
|
|
9849
|
+
/**
|
|
9850
|
+
* Detects if the code is running in a browser environment in main thread (Not in a web worker)
|
|
9851
|
+
*
|
|
9852
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9853
|
+
*
|
|
9854
|
+
* @public exported from `@promptbook/utils`
|
|
9855
|
+
*/
|
|
9856
|
+
const $isRunningInBrowser = new Function(`
|
|
9857
|
+
try {
|
|
9858
|
+
return this === window;
|
|
9859
|
+
} catch (e) {
|
|
9860
|
+
return false;
|
|
9861
|
+
}
|
|
9862
|
+
`);
|
|
9863
|
+
/**
|
|
9864
|
+
* TODO: [🎺]
|
|
9865
|
+
*/
|
|
9866
|
+
|
|
9867
|
+
/**
|
|
9868
|
+
* Detects if the code is running in jest environment
|
|
9869
|
+
*
|
|
9870
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9871
|
+
*
|
|
9872
|
+
* @public exported from `@promptbook/utils`
|
|
9873
|
+
*/
|
|
9874
|
+
const $isRunningInJest = new Function(`
|
|
9875
|
+
try {
|
|
9876
|
+
return process.env.JEST_WORKER_ID !== undefined;
|
|
9877
|
+
} catch (e) {
|
|
9878
|
+
return false;
|
|
9879
|
+
}
|
|
9880
|
+
`);
|
|
9881
|
+
/**
|
|
9882
|
+
* TODO: [🎺]
|
|
9883
|
+
*/
|
|
9884
|
+
|
|
9885
|
+
/**
|
|
9886
|
+
* Detects if the code is running in a Node.js environment
|
|
9887
|
+
*
|
|
9888
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9889
|
+
*
|
|
9890
|
+
* @public exported from `@promptbook/utils`
|
|
9891
|
+
*/
|
|
9892
|
+
const $isRunningInNode = new Function(`
|
|
9893
|
+
try {
|
|
9894
|
+
return this === global;
|
|
9895
|
+
} catch (e) {
|
|
9896
|
+
return false;
|
|
9897
|
+
}
|
|
9898
|
+
`);
|
|
9899
|
+
/**
|
|
9900
|
+
* TODO: [🎺]
|
|
9901
|
+
*/
|
|
9902
|
+
|
|
9903
|
+
/**
|
|
9904
|
+
* Detects if the code is running in a web worker
|
|
9905
|
+
*
|
|
9906
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
9907
|
+
*
|
|
9908
|
+
* @public exported from `@promptbook/utils`
|
|
9909
|
+
*/
|
|
9910
|
+
const $isRunningInWebWorker = new Function(`
|
|
9911
|
+
try {
|
|
9912
|
+
if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
|
|
9913
|
+
return true;
|
|
9914
|
+
} else {
|
|
9915
|
+
return false;
|
|
9916
|
+
}
|
|
9917
|
+
} catch (e) {
|
|
9918
|
+
return false;
|
|
9919
|
+
}
|
|
9920
|
+
`);
|
|
9921
|
+
/**
|
|
9922
|
+
* TODO: [🎺]
|
|
9923
|
+
*/
|
|
9924
|
+
|
|
9925
|
+
/**
|
|
9926
|
+
* Returns information about the current runtime environment
|
|
9927
|
+
*
|
|
9928
|
+
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
|
|
9929
|
+
*
|
|
9930
|
+
* @public exported from `@promptbook/utils`
|
|
9931
|
+
*/
|
|
9932
|
+
function $detectRuntimeEnvironment() {
|
|
9933
|
+
return {
|
|
9934
|
+
isRunningInBrowser: $isRunningInBrowser(),
|
|
9935
|
+
isRunningInJest: $isRunningInJest(),
|
|
9936
|
+
isRunningInNode: $isRunningInNode(),
|
|
9937
|
+
isRunningInWebWorker: $isRunningInWebWorker(),
|
|
9938
|
+
};
|
|
9939
|
+
}
|
|
9940
|
+
/**
|
|
9941
|
+
* TODO: [🎺] Also detect and report node version here
|
|
9942
|
+
*/
|
|
9943
|
+
|
|
9944
|
+
/**
|
|
9945
|
+
* Simple wrapper `new Date().toISOString()`
|
|
9946
|
+
*
|
|
9947
|
+
* Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
|
|
9948
|
+
*
|
|
9949
|
+
* @returns string_date branded type
|
|
9950
|
+
* @public exported from `@promptbook/utils`
|
|
9951
|
+
*/
|
|
9952
|
+
function $getCurrentDate() {
|
|
9953
|
+
return new Date().toISOString();
|
|
9954
|
+
}
|
|
9955
|
+
|
|
9956
|
+
/**
|
|
9957
|
+
* Function parseNumber will parse number from string
|
|
9958
|
+
*
|
|
9959
|
+
* Note: [🔂] This function is idempotent.
|
|
9960
|
+
* Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
|
|
9961
|
+
* Note: it also works only with decimal numbers
|
|
9962
|
+
*
|
|
9963
|
+
* @returns parsed number
|
|
9964
|
+
* @throws {ParseError} if the value is not a number
|
|
9965
|
+
*
|
|
9966
|
+
* @public exported from `@promptbook/utils`
|
|
9967
|
+
*/
|
|
9968
|
+
function parseNumber(value) {
|
|
9969
|
+
const originalValue = value;
|
|
9970
|
+
if (typeof value === 'number') {
|
|
9971
|
+
value = value.toString(); // <- TODO: Maybe more efficient way to do this
|
|
9972
|
+
}
|
|
9973
|
+
if (typeof value !== 'string') {
|
|
9974
|
+
return 0;
|
|
9975
|
+
}
|
|
9976
|
+
value = value.trim();
|
|
9977
|
+
if (value.startsWith('+')) {
|
|
9978
|
+
return parseNumber(value.substring(1));
|
|
9979
|
+
}
|
|
9980
|
+
if (value.startsWith('-')) {
|
|
9981
|
+
const number = parseNumber(value.substring(1));
|
|
9982
|
+
if (number === 0) {
|
|
9983
|
+
return 0; // <- Note: To prevent -0
|
|
9984
|
+
}
|
|
9985
|
+
return -number;
|
|
9986
|
+
}
|
|
9987
|
+
value = value.replace(/,/g, '.');
|
|
9988
|
+
value = value.toUpperCase();
|
|
9989
|
+
if (value === '') {
|
|
9990
|
+
return 0;
|
|
9991
|
+
}
|
|
9992
|
+
if (value === '♾' || value.startsWith('INF')) {
|
|
9993
|
+
return Infinity;
|
|
9994
|
+
}
|
|
9995
|
+
if (value.includes('/')) {
|
|
9996
|
+
const [numerator_, denominator_] = value.split('/');
|
|
9997
|
+
const numerator = parseNumber(numerator_);
|
|
9998
|
+
const denominator = parseNumber(denominator_);
|
|
9999
|
+
if (denominator === 0) {
|
|
10000
|
+
throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
|
|
10001
|
+
}
|
|
10002
|
+
return numerator / denominator;
|
|
10003
|
+
}
|
|
10004
|
+
if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
|
|
10005
|
+
return 0;
|
|
10006
|
+
}
|
|
10007
|
+
if (value.includes('E')) {
|
|
10008
|
+
const [significand, exponent] = value.split('E');
|
|
10009
|
+
return parseNumber(significand) * 10 ** parseNumber(exponent);
|
|
10010
|
+
}
|
|
10011
|
+
if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
|
|
10012
|
+
throw new ParseError(`Unable to parse number from "${originalValue}"`);
|
|
10013
|
+
}
|
|
10014
|
+
const num = parseFloat(value);
|
|
10015
|
+
if (isNaN(num)) {
|
|
10016
|
+
throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
|
|
10017
|
+
}
|
|
10018
|
+
return num;
|
|
10019
|
+
}
|
|
10020
|
+
/**
|
|
10021
|
+
* TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
|
|
10022
|
+
* TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
|
|
10023
|
+
*/
|
|
10024
|
+
|
|
10025
|
+
/**
|
|
10026
|
+
* Removes quotes from a string
|
|
10027
|
+
*
|
|
10028
|
+
* Note: [🔂] This function is idempotent.
|
|
10029
|
+
* Tip: This is very useful for post-processing of the result of the LLM model
|
|
10030
|
+
* Note: This function removes only the same quotes from the beginning and the end of the string
|
|
10031
|
+
* Note: There are two similar functions:
|
|
10032
|
+
* - `removeQuotes` which removes only bounding quotes
|
|
10033
|
+
* - `unwrapResult` which removes whole introduce sentence
|
|
10034
|
+
*
|
|
10035
|
+
* @param text optionally quoted text
|
|
10036
|
+
* @returns text without quotes
|
|
10037
|
+
* @public exported from `@promptbook/utils`
|
|
10038
|
+
*/
|
|
10039
|
+
function removeQuotes(text) {
|
|
10040
|
+
if (text.startsWith('"') && text.endsWith('"')) {
|
|
10041
|
+
return text.slice(1, -1);
|
|
10042
|
+
}
|
|
10043
|
+
if (text.startsWith("'") && text.endsWith("'")) {
|
|
10044
|
+
return text.slice(1, -1);
|
|
10045
|
+
}
|
|
10046
|
+
return text;
|
|
10047
|
+
}
|
|
10048
|
+
|
|
10049
|
+
/**
|
|
10050
|
+
* Trims string from all 4 sides
|
|
10051
|
+
*
|
|
10052
|
+
* Note: This is a re-exported function from the `spacetrim` package which is
|
|
10053
|
+
* Developed by same author @hejny as this package
|
|
10054
|
+
*
|
|
10055
|
+
* @public exported from `@promptbook/utils`
|
|
10056
|
+
* @see https://github.com/hejny/spacetrim#usage
|
|
10057
|
+
*/
|
|
10058
|
+
const spaceTrim = spaceTrim$2;
|
|
10059
|
+
|
|
10060
|
+
/**
|
|
10061
|
+
* Checks if the given value is a valid JavaScript identifier name.
|
|
10062
|
+
*
|
|
10063
|
+
* @param javascriptName The value to check for JavaScript identifier validity.
|
|
10064
|
+
* @returns `true` if the value is a valid JavaScript name, false otherwise.
|
|
10065
|
+
* @public exported from `@promptbook/utils`
|
|
10066
|
+
*/
|
|
10067
|
+
function isValidJavascriptName(javascriptName) {
|
|
10068
|
+
if (typeof javascriptName !== 'string') {
|
|
10069
|
+
return false;
|
|
10070
|
+
}
|
|
10071
|
+
return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
|
|
10072
|
+
}
|
|
10073
|
+
|
|
10074
|
+
/**
|
|
10075
|
+
* Normalizes agent name from arbitrary string to valid agent name
|
|
10076
|
+
*
|
|
10077
|
+
* Note: [🔂] This function is idempotent.
|
|
10078
|
+
*
|
|
10079
|
+
* @public exported from `@promptbook/core`
|
|
10080
|
+
*/
|
|
10081
|
+
function normalizeAgentName(rawAgentName) {
|
|
10082
|
+
return titleToName(spaceTrim$1(rawAgentName));
|
|
10083
|
+
}
|
|
10084
|
+
|
|
10085
|
+
/**
|
|
10086
|
+
* Creates temporary default agent name based on agent source hash
|
|
10087
|
+
*
|
|
10088
|
+
* @public exported from `@promptbook/core`
|
|
10089
|
+
*/
|
|
10090
|
+
function createDefaultAgentName(agentSource) {
|
|
10091
|
+
const agentHash = computeAgentHash(agentSource);
|
|
10092
|
+
return normalizeAgentName(`Agent ${agentHash.substring(0, 6)}`);
|
|
10093
|
+
}
|
|
10094
|
+
|
|
10095
|
+
/**
|
|
10096
|
+
* Parses basic information from agent source
|
|
10097
|
+
*
|
|
10098
|
+
* There are 2 similar functions:
|
|
10099
|
+
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
10100
|
+
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
|
|
10101
|
+
*
|
|
10102
|
+
* @public exported from `@promptbook/core`
|
|
10103
|
+
*/
|
|
10104
|
+
function parseAgentSource(agentSource) {
|
|
10105
|
+
const parseResult = parseAgentSourceWithCommitments(agentSource);
|
|
10106
|
+
// Find PERSONA and META commitments
|
|
10107
|
+
let personaDescription = null;
|
|
10108
|
+
for (const commitment of parseResult.commitments) {
|
|
10109
|
+
if (commitment.type !== 'PERSONA') {
|
|
10110
|
+
continue;
|
|
10111
|
+
}
|
|
10112
|
+
if (personaDescription === null) {
|
|
10113
|
+
personaDescription = '';
|
|
10114
|
+
}
|
|
10115
|
+
else {
|
|
10116
|
+
personaDescription += `\n\n${personaDescription}`;
|
|
10117
|
+
}
|
|
10118
|
+
personaDescription += commitment.content;
|
|
10119
|
+
}
|
|
10120
|
+
const meta = {};
|
|
10121
|
+
for (const commitment of parseResult.commitments) {
|
|
10122
|
+
if (commitment.type !== 'META') {
|
|
10123
|
+
continue;
|
|
10124
|
+
}
|
|
10125
|
+
// Parse META commitments - format is "META TYPE content"
|
|
10126
|
+
const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
|
|
10127
|
+
const metaType = normalizeTo_camelCase(metaTypeRaw);
|
|
10128
|
+
meta[metaType] = spaceTrim$1(commitment.content.substring(metaTypeRaw.length));
|
|
10129
|
+
}
|
|
10130
|
+
// Generate gravatar fallback if no meta image specified
|
|
10131
|
+
if (!meta.image) {
|
|
10132
|
+
meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
|
|
10133
|
+
}
|
|
10134
|
+
// Parse parameters using unified approach - both @Parameter and {parameter} notations
|
|
10135
|
+
// are treated as the same syntax feature with unified representation
|
|
10136
|
+
const parameters = parseParameters(agentSource);
|
|
10137
|
+
const agentHash = computeAgentHash(agentSource);
|
|
10138
|
+
return {
|
|
10139
|
+
agentName: normalizeAgentName(parseResult.agentName || createDefaultAgentName(agentSource)),
|
|
10140
|
+
agentHash,
|
|
10141
|
+
personaDescription,
|
|
10142
|
+
meta,
|
|
10143
|
+
parameters,
|
|
10144
|
+
};
|
|
10145
|
+
}
|
|
10146
|
+
/**
|
|
10147
|
+
* TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
10148
|
+
*/
|
|
10149
|
+
|
|
10150
|
+
/**
|
|
10151
|
+
* Creates model requirements for an agent based on its source
|
|
10152
|
+
*
|
|
10153
|
+
* There are 2 similar functions:
|
|
10154
|
+
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
10155
|
+
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
|
|
10156
|
+
*
|
|
10157
|
+
* @public exported from `@promptbook/core`
|
|
9745
10158
|
*/
|
|
9746
10159
|
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
|
|
9747
10160
|
// If availableModels are provided and no specific modelName is given,
|
|
@@ -9901,17 +10314,6 @@ const DEFAULT_BOOK = padBook(validateBook(spaceTrim$1(`
|
|
|
9901
10314
|
// <- !!! Buttons into genesis book
|
|
9902
10315
|
// <- TODO: !!! generateBookBoilerplate and deprecate `DEFAULT_BOOK`
|
|
9903
10316
|
|
|
9904
|
-
/**
|
|
9905
|
-
* Trims string from all 4 sides
|
|
9906
|
-
*
|
|
9907
|
-
* Note: This is a re-exported function from the `spacetrim` package which is
|
|
9908
|
-
* Developed by same author @hejny as this package
|
|
9909
|
-
*
|
|
9910
|
-
* @public exported from `@promptbook/utils`
|
|
9911
|
-
* @see https://github.com/hejny/spacetrim#usage
|
|
9912
|
-
*/
|
|
9913
|
-
const spaceTrim = spaceTrim$2;
|
|
9914
|
-
|
|
9915
10317
|
/**
|
|
9916
10318
|
* Agent collection stored in Supabase table
|
|
9917
10319
|
*
|
|
@@ -9920,7 +10322,7 @@ const spaceTrim = spaceTrim$2;
|
|
|
9920
10322
|
* @public exported from `@promptbook/core`
|
|
9921
10323
|
* <- TODO: !!! Move to `@promptbook/supabase` package
|
|
9922
10324
|
*/
|
|
9923
|
-
class AgentCollectionInSupabase /* TODO:
|
|
10325
|
+
class AgentCollectionInSupabase /* TODO: !!!!!! implements Agent */ {
|
|
9924
10326
|
/**
|
|
9925
10327
|
* @param rootPath - path to the directory with agents
|
|
9926
10328
|
* @param tools - Execution tools to be used in !!! `Agent` itself and listing the agents
|
|
@@ -9936,125 +10338,62 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
|
|
|
9936
10338
|
console.info(`Creating pipeline collection from supabase...`);
|
|
9937
10339
|
}
|
|
9938
10340
|
}
|
|
9939
|
-
/**
|
|
9940
|
-
* Cached defined execution tools
|
|
9941
|
-
*/
|
|
9942
|
-
// !!! private _definedTools: ExecutionTools | null = null;
|
|
9943
|
-
/*
|
|
9944
|
-
TODO: !!! Use or remove
|
|
9945
|
-
/**
|
|
9946
|
-
* Gets or creates execution tools for the collection
|
|
9947
|
-
* /
|
|
9948
|
-
private async getTools(): Promise<ExecutionTools> {
|
|
9949
|
-
if (this._definedTools !== null) {
|
|
9950
|
-
return this._definedTools;
|
|
9951
|
-
}
|
|
9952
|
-
|
|
9953
|
-
this._definedTools = {
|
|
9954
|
-
...(this.tools === undefined || this.tools.fs === undefined ? await $provideExecutionToolsForNode() : {}),
|
|
9955
|
-
...this.tools,
|
|
9956
|
-
};
|
|
9957
|
-
return this._definedTools;
|
|
9958
|
-
}
|
|
9959
|
-
// <- TODO: [👪] Maybe create some common abstraction *(or parent abstract class)*
|
|
9960
|
-
*/
|
|
9961
10341
|
/**
|
|
9962
10342
|
* Gets all agents in the collection
|
|
9963
10343
|
*/
|
|
9964
10344
|
async listAgents( /* TODO: [🧠] Allow to pass some condition here */) {
|
|
9965
10345
|
const { isVerbose = DEFAULT_IS_VERBOSE } = this.options || {};
|
|
9966
|
-
const
|
|
9967
|
-
|
|
9968
|
-
.select('agentProfile');
|
|
9969
|
-
if (result.error) {
|
|
10346
|
+
const selectResult = await this.supabaseClient.from('Agent').select('agentName,agentProfile');
|
|
10347
|
+
if (selectResult.error) {
|
|
9970
10348
|
throw new DatabaseError(spaceTrim((block) => `
|
|
9971
10349
|
|
|
9972
10350
|
Error fetching agents from Supabase:
|
|
9973
10351
|
|
|
9974
|
-
${block(
|
|
10352
|
+
${block(selectResult.error.message)}
|
|
9975
10353
|
`));
|
|
9976
10354
|
}
|
|
9977
10355
|
if (isVerbose) {
|
|
9978
|
-
console.info(`Found ${
|
|
10356
|
+
console.info(`Found ${selectResult.data.length} agents in directory`);
|
|
9979
10357
|
}
|
|
9980
|
-
return
|
|
9981
|
-
|
|
9982
|
-
|
|
9983
|
-
|
|
9984
|
-
* /
|
|
9985
|
-
public async spawnAgent(agentName: string_agent_name): Promise<Agent> {
|
|
9986
|
-
|
|
9987
|
-
// <- TODO: !!! ENOENT: no such file or directory, open 'C:\Users\me\work\ai\promptbook\agents\examples\Asistent pro LŠVP.book
|
|
9988
|
-
const { isVerbose = DEFAULT_IS_VERBOSE } = this.options || {};
|
|
9989
|
-
const tools = await this.getTools();
|
|
9990
|
-
|
|
9991
|
-
const agentSourceValue = validateBook(await tools.fs!.readFile(agentSourcePath, 'utf-8'));
|
|
9992
|
-
const agentSource = new BehaviorSubject(agentSourceValue);
|
|
9993
|
-
|
|
9994
|
-
// Note: Write file whenever agent source changes
|
|
9995
|
-
agentSource.subscribe(async (newSource) => {
|
|
9996
|
-
if (isVerbose) {
|
|
9997
|
-
console.info(colors.cyan(`Writing agent source to file ${agentSourcePath}`));
|
|
9998
|
-
}
|
|
9999
|
-
await forTime(500); // <- TODO: [🙌] !!! Remove
|
|
10000
|
-
await tools.fs!.writeFile(agentSourcePath, newSource, 'utf-8');
|
|
10001
|
-
});
|
|
10002
|
-
|
|
10003
|
-
// Note: Watch file for external changes
|
|
10004
|
-
for await (const event of tools.fs!.watch(agentSourcePath)) {
|
|
10005
|
-
// <- TODO: !!!! Solve the memory freeing when the watching is no longer needed
|
|
10006
|
-
|
|
10007
|
-
if (event.eventType !== 'change') {
|
|
10008
|
-
continue;
|
|
10009
|
-
}
|
|
10358
|
+
return selectResult.data.map(({ agentName, agentProfile }) => {
|
|
10359
|
+
if (isVerbose && agentProfile.agentName !== agentName) {
|
|
10360
|
+
console.warn(spaceTrim(`
|
|
10361
|
+
Agent name mismatch for agent "${agentName}". Using name from database.
|
|
10010
10362
|
|
|
10011
|
-
|
|
10012
|
-
|
|
10013
|
-
|
|
10014
|
-
);
|
|
10363
|
+
agentName: "${agentName}"
|
|
10364
|
+
agentProfile.agentName: "${agentProfile.agentName}"
|
|
10365
|
+
`));
|
|
10015
10366
|
}
|
|
10016
|
-
|
|
10017
|
-
|
|
10018
|
-
|
|
10019
|
-
|
|
10020
|
-
|
|
10021
|
-
// TODO: [🙌] !!!! Debug the infinite loop when file is changed externally and agent source is updated which causes file to be written again
|
|
10022
|
-
|
|
10023
|
-
const agent = new Agent({
|
|
10024
|
-
...this.options,
|
|
10025
|
-
agentSource,
|
|
10026
|
-
executionTools: this.tools || {},
|
|
10367
|
+
return {
|
|
10368
|
+
...agentProfile,
|
|
10369
|
+
agentName,
|
|
10370
|
+
};
|
|
10027
10371
|
});
|
|
10028
|
-
|
|
10029
|
-
if (isVerbose) {
|
|
10030
|
-
console.info(colors.cyan(`Created agent "${agent.agentName}" from source file ${agentSourcePath}`));
|
|
10031
|
-
}
|
|
10032
|
-
|
|
10033
|
-
return agent;
|
|
10034
|
-
* /
|
|
10035
10372
|
}
|
|
10036
|
-
*/
|
|
10037
10373
|
/**
|
|
10038
10374
|
* !!!@@@
|
|
10039
10375
|
*/
|
|
10040
10376
|
async getAgentSource(agentName) {
|
|
10041
|
-
const
|
|
10042
|
-
.from('
|
|
10377
|
+
const selectResult = await this.supabaseClient
|
|
10378
|
+
.from('Agent')
|
|
10043
10379
|
.select('agentSource')
|
|
10044
10380
|
.eq('agentName', agentName)
|
|
10045
10381
|
.single();
|
|
10046
|
-
|
|
10382
|
+
/*
|
|
10383
|
+
if (selectResult.data===null) {
|
|
10384
|
+
throw new NotFoundError(`Agent "${agentName}" not found`);
|
|
10385
|
+
}
|
|
10386
|
+
*/
|
|
10387
|
+
if (selectResult.error) {
|
|
10047
10388
|
throw new DatabaseError(spaceTrim((block) => `
|
|
10048
10389
|
|
|
10049
10390
|
Error fetching agent "${agentName}" from Supabase:
|
|
10050
10391
|
|
|
10051
|
-
${block(
|
|
10392
|
+
${block(selectResult.error.message)}
|
|
10052
10393
|
`));
|
|
10053
10394
|
// <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
|
|
10054
10395
|
}
|
|
10055
|
-
|
|
10056
|
-
// <- TODO: !!!! Dynamic updates
|
|
10057
|
-
return agentSource;
|
|
10396
|
+
return selectResult.data.agentSource;
|
|
10058
10397
|
}
|
|
10059
10398
|
/**
|
|
10060
10399
|
* Creates a new agent in the collection
|
|
@@ -10064,56 +10403,91 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
|
|
|
10064
10403
|
async createAgent(agentSource) {
|
|
10065
10404
|
const agentProfile = parseAgentSource(agentSource);
|
|
10066
10405
|
// <- TODO: [🕛]
|
|
10067
|
-
const
|
|
10068
|
-
|
|
10406
|
+
const { agentName, agentHash } = agentProfile;
|
|
10407
|
+
const insertAgentResult = await this.supabaseClient.from('Agent').insert({
|
|
10408
|
+
agentName,
|
|
10409
|
+
agentHash,
|
|
10069
10410
|
agentProfile,
|
|
10070
10411
|
createdAt: new Date().toISOString(),
|
|
10071
10412
|
updatedAt: null,
|
|
10072
|
-
agentVersion: 0,
|
|
10073
10413
|
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
10074
10414
|
usage: ZERO_USAGE,
|
|
10075
10415
|
agentSource: agentSource,
|
|
10076
10416
|
});
|
|
10077
|
-
if (
|
|
10417
|
+
if (insertAgentResult.error) {
|
|
10078
10418
|
throw new DatabaseError(spaceTrim((block) => `
|
|
10079
10419
|
Error creating agent "${agentProfile.agentName}" in Supabase:
|
|
10080
10420
|
|
|
10081
|
-
${block(
|
|
10421
|
+
${block(insertAgentResult.error.message)}
|
|
10082
10422
|
`));
|
|
10083
10423
|
}
|
|
10424
|
+
await this.supabaseClient.from('AgentHistory').insert({
|
|
10425
|
+
createdAt: new Date().toISOString(),
|
|
10426
|
+
agentName,
|
|
10427
|
+
agentHash,
|
|
10428
|
+
previousAgentHash: null,
|
|
10429
|
+
agentSource,
|
|
10430
|
+
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
10431
|
+
});
|
|
10432
|
+
// <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
|
|
10084
10433
|
return agentProfile;
|
|
10085
10434
|
}
|
|
10086
10435
|
/**
|
|
10087
10436
|
* Updates an existing agent in the collection
|
|
10088
10437
|
*/
|
|
10089
10438
|
async updateAgentSource(agentName, agentSource) {
|
|
10439
|
+
const selectPreviousAgentResult = await this.supabaseClient
|
|
10440
|
+
.from('Agent')
|
|
10441
|
+
.select('agentHash,agentName')
|
|
10442
|
+
.eq('agentName', agentName)
|
|
10443
|
+
.single();
|
|
10444
|
+
if (selectPreviousAgentResult.error) {
|
|
10445
|
+
throw new DatabaseError(spaceTrim((block) => `
|
|
10446
|
+
|
|
10447
|
+
Error fetching agent "${agentName}" from Supabase:
|
|
10448
|
+
|
|
10449
|
+
${block(selectPreviousAgentResult.error.message)}
|
|
10450
|
+
`));
|
|
10451
|
+
// <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
|
|
10452
|
+
}
|
|
10453
|
+
selectPreviousAgentResult.data.agentName;
|
|
10454
|
+
const previousAgentHash = selectPreviousAgentResult.data.agentHash;
|
|
10090
10455
|
const agentProfile = parseAgentSource(agentSource);
|
|
10091
|
-
// TODO:
|
|
10092
|
-
|
|
10093
|
-
const
|
|
10094
|
-
|
|
10095
|
-
.from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
|
|
10456
|
+
// <- TODO: [🕛]
|
|
10457
|
+
const { agentHash } = agentProfile;
|
|
10458
|
+
const updateAgentResult = await this.supabaseClient
|
|
10459
|
+
.from('Agent')
|
|
10096
10460
|
.update({
|
|
10097
10461
|
// TODO: !!!! Compare not update> agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
|
|
10098
10462
|
agentProfile,
|
|
10099
10463
|
updatedAt: new Date().toISOString(),
|
|
10100
|
-
|
|
10101
|
-
agentSource
|
|
10464
|
+
agentHash: agentProfile.agentHash,
|
|
10465
|
+
agentSource,
|
|
10466
|
+
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
10102
10467
|
})
|
|
10103
10468
|
.eq('agentName', agentName);
|
|
10104
|
-
|
|
10105
|
-
console.log('!!!
|
|
10106
|
-
console.log('!!!
|
|
10107
|
-
|
|
10108
|
-
if (result.error) {
|
|
10469
|
+
// console.log('!!! updateAgent', updateResult);
|
|
10470
|
+
// console.log('!!! old', oldAgentSource);
|
|
10471
|
+
// console.log('!!! new', newAgentSource);
|
|
10472
|
+
if (updateAgentResult.error) {
|
|
10109
10473
|
throw new DatabaseError(spaceTrim((block) => `
|
|
10110
10474
|
Error updating agent "${agentName}" in Supabase:
|
|
10111
10475
|
|
|
10112
|
-
${block(
|
|
10476
|
+
${block(updateAgentResult.error.message)}
|
|
10113
10477
|
`));
|
|
10114
10478
|
}
|
|
10479
|
+
await this.supabaseClient.from('AgentHistory').insert({
|
|
10480
|
+
createdAt: new Date().toISOString(),
|
|
10481
|
+
agentName,
|
|
10482
|
+
agentHash,
|
|
10483
|
+
previousAgentHash,
|
|
10484
|
+
agentSource,
|
|
10485
|
+
promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
|
|
10486
|
+
});
|
|
10487
|
+
// <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
|
|
10115
10488
|
}
|
|
10116
|
-
// TODO: !!!! getAgentSourceSubject
|
|
10489
|
+
// TODO: !!!! public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
|
|
10490
|
+
// Use Supabase realtime logic
|
|
10117
10491
|
/**
|
|
10118
10492
|
* Deletes an agent from the collection
|
|
10119
10493
|
*/
|
|
@@ -10766,75 +11140,6 @@ const EXPECTATION_UNITS = ['CHARACTERS', 'WORDS', 'SENTENCES', 'LINES', 'PARAGRA
|
|
|
10766
11140
|
* TODO: [💝] Unite object for expecting amount and format - remove format
|
|
10767
11141
|
*/
|
|
10768
11142
|
|
|
10769
|
-
/**
|
|
10770
|
-
* Function parseNumber will parse number from string
|
|
10771
|
-
*
|
|
10772
|
-
* Note: [🔂] This function is idempotent.
|
|
10773
|
-
* Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
|
|
10774
|
-
* Note: it also works only with decimal numbers
|
|
10775
|
-
*
|
|
10776
|
-
* @returns parsed number
|
|
10777
|
-
* @throws {ParseError} if the value is not a number
|
|
10778
|
-
*
|
|
10779
|
-
* @public exported from `@promptbook/utils`
|
|
10780
|
-
*/
|
|
10781
|
-
function parseNumber(value) {
|
|
10782
|
-
const originalValue = value;
|
|
10783
|
-
if (typeof value === 'number') {
|
|
10784
|
-
value = value.toString(); // <- TODO: Maybe more efficient way to do this
|
|
10785
|
-
}
|
|
10786
|
-
if (typeof value !== 'string') {
|
|
10787
|
-
return 0;
|
|
10788
|
-
}
|
|
10789
|
-
value = value.trim();
|
|
10790
|
-
if (value.startsWith('+')) {
|
|
10791
|
-
return parseNumber(value.substring(1));
|
|
10792
|
-
}
|
|
10793
|
-
if (value.startsWith('-')) {
|
|
10794
|
-
const number = parseNumber(value.substring(1));
|
|
10795
|
-
if (number === 0) {
|
|
10796
|
-
return 0; // <- Note: To prevent -0
|
|
10797
|
-
}
|
|
10798
|
-
return -number;
|
|
10799
|
-
}
|
|
10800
|
-
value = value.replace(/,/g, '.');
|
|
10801
|
-
value = value.toUpperCase();
|
|
10802
|
-
if (value === '') {
|
|
10803
|
-
return 0;
|
|
10804
|
-
}
|
|
10805
|
-
if (value === '♾' || value.startsWith('INF')) {
|
|
10806
|
-
return Infinity;
|
|
10807
|
-
}
|
|
10808
|
-
if (value.includes('/')) {
|
|
10809
|
-
const [numerator_, denominator_] = value.split('/');
|
|
10810
|
-
const numerator = parseNumber(numerator_);
|
|
10811
|
-
const denominator = parseNumber(denominator_);
|
|
10812
|
-
if (denominator === 0) {
|
|
10813
|
-
throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
|
|
10814
|
-
}
|
|
10815
|
-
return numerator / denominator;
|
|
10816
|
-
}
|
|
10817
|
-
if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
|
|
10818
|
-
return 0;
|
|
10819
|
-
}
|
|
10820
|
-
if (value.includes('E')) {
|
|
10821
|
-
const [significand, exponent] = value.split('E');
|
|
10822
|
-
return parseNumber(significand) * 10 ** parseNumber(exponent);
|
|
10823
|
-
}
|
|
10824
|
-
if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
|
|
10825
|
-
throw new ParseError(`Unable to parse number from "${originalValue}"`);
|
|
10826
|
-
}
|
|
10827
|
-
const num = parseFloat(value);
|
|
10828
|
-
if (isNaN(num)) {
|
|
10829
|
-
throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
|
|
10830
|
-
}
|
|
10831
|
-
return num;
|
|
10832
|
-
}
|
|
10833
|
-
/**
|
|
10834
|
-
* TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
|
|
10835
|
-
* TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
|
|
10836
|
-
*/
|
|
10837
|
-
|
|
10838
11143
|
/**
|
|
10839
11144
|
import { WrappedError } from '../../errors/WrappedError';
|
|
10840
11145
|
import { assertsError } from '../../errors/assertsError';
|
|
@@ -10977,30 +11282,6 @@ const expectCommandParser = {
|
|
|
10977
11282
|
},
|
|
10978
11283
|
};
|
|
10979
11284
|
|
|
10980
|
-
/**
|
|
10981
|
-
* Removes quotes from a string
|
|
10982
|
-
*
|
|
10983
|
-
* Note: [🔂] This function is idempotent.
|
|
10984
|
-
* Tip: This is very useful for post-processing of the result of the LLM model
|
|
10985
|
-
* Note: This function removes only the same quotes from the beginning and the end of the string
|
|
10986
|
-
* Note: There are two similar functions:
|
|
10987
|
-
* - `removeQuotes` which removes only bounding quotes
|
|
10988
|
-
* - `unwrapResult` which removes whole introduce sentence
|
|
10989
|
-
*
|
|
10990
|
-
* @param text optionally quoted text
|
|
10991
|
-
* @returns text without quotes
|
|
10992
|
-
* @public exported from `@promptbook/utils`
|
|
10993
|
-
*/
|
|
10994
|
-
function removeQuotes(text) {
|
|
10995
|
-
if (text.startsWith('"') && text.endsWith('"')) {
|
|
10996
|
-
return text.slice(1, -1);
|
|
10997
|
-
}
|
|
10998
|
-
if (text.startsWith("'") && text.endsWith("'")) {
|
|
10999
|
-
return text.slice(1, -1);
|
|
11000
|
-
}
|
|
11001
|
-
return text;
|
|
11002
|
-
}
|
|
11003
|
-
|
|
11004
11285
|
/**
|
|
11005
11286
|
* Function `validateParameterName` will normalize and validate a parameter name for use in pipelines.
|
|
11006
11287
|
* It removes diacritics, emojis, and quotes, normalizes to camelCase, and checks for reserved names and invalid characters.
|
|
@@ -12180,25 +12461,11 @@ function $applyToTaskJson(command, $taskJson, $pipelineJson) {
|
|
|
12180
12461
|
First definition:
|
|
12181
12462
|
${persona.description}
|
|
12182
12463
|
|
|
12183
|
-
Second definition:
|
|
12184
|
-
${personaDescription}
|
|
12185
|
-
|
|
12186
|
-
`));
|
|
12187
|
-
persona.description += spaceTrim$1('\n\n' + personaDescription);
|
|
12188
|
-
}
|
|
12189
|
-
|
|
12190
|
-
/**
|
|
12191
|
-
* Checks if the given value is a valid JavaScript identifier name.
|
|
12192
|
-
*
|
|
12193
|
-
* @param javascriptName The value to check for JavaScript identifier validity.
|
|
12194
|
-
* @returns `true` if the value is a valid JavaScript name, false otherwise.
|
|
12195
|
-
* @public exported from `@promptbook/utils`
|
|
12196
|
-
*/
|
|
12197
|
-
function isValidJavascriptName(javascriptName) {
|
|
12198
|
-
if (typeof javascriptName !== 'string') {
|
|
12199
|
-
return false;
|
|
12200
|
-
}
|
|
12201
|
-
return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
|
|
12464
|
+
Second definition:
|
|
12465
|
+
${personaDescription}
|
|
12466
|
+
|
|
12467
|
+
`));
|
|
12468
|
+
persona.description += spaceTrim$1('\n\n' + personaDescription);
|
|
12202
12469
|
}
|
|
12203
12470
|
|
|
12204
12471
|
/**
|
|
@@ -13769,114 +14036,6 @@ function addAutoGeneratedSection(content, options) {
|
|
|
13769
14036
|
* TODO: [🏛] This can be part of markdown builder
|
|
13770
14037
|
*/
|
|
13771
14038
|
|
|
13772
|
-
/**
|
|
13773
|
-
* Creates a Mermaid graph based on the promptbook
|
|
13774
|
-
*
|
|
13775
|
-
* Note: The result is not wrapped in a Markdown code block
|
|
13776
|
-
*
|
|
13777
|
-
* @public exported from `@promptbook/utils`
|
|
13778
|
-
*/
|
|
13779
|
-
function renderPromptbookMermaid(pipelineJson, options) {
|
|
13780
|
-
const { linkTask = () => null } = options || {};
|
|
13781
|
-
const MERMAID_PREFIX = 'pipeline_';
|
|
13782
|
-
const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
|
|
13783
|
-
const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
|
|
13784
|
-
const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
|
|
13785
|
-
const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
|
|
13786
|
-
const parameterNameToTaskName = (parameterName) => {
|
|
13787
|
-
if (parameterName === 'knowledge') {
|
|
13788
|
-
return MERMAID_KNOWLEDGE_NAME;
|
|
13789
|
-
}
|
|
13790
|
-
else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
|
|
13791
|
-
return MERMAID_RESERVED_NAME;
|
|
13792
|
-
}
|
|
13793
|
-
const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
|
|
13794
|
-
if (!parameter) {
|
|
13795
|
-
throw new UnexpectedError(`Could not find {${parameterName}}`);
|
|
13796
|
-
// <- TODO: This causes problems when {knowledge} and other reserved parameters are used
|
|
13797
|
-
}
|
|
13798
|
-
if (parameter.isInput) {
|
|
13799
|
-
return MERMAID_INPUT_NAME;
|
|
13800
|
-
}
|
|
13801
|
-
const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
|
|
13802
|
-
if (!task) {
|
|
13803
|
-
throw new Error(`Could not find task for {${parameterName}}`);
|
|
13804
|
-
}
|
|
13805
|
-
return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
|
|
13806
|
-
};
|
|
13807
|
-
const inputAndIntermediateParametersMermaid = pipelineJson.tasks
|
|
13808
|
-
.flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
|
|
13809
|
-
`${parameterNameToTaskName(resultingParameterName)}("${title}")`,
|
|
13810
|
-
...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
|
|
13811
|
-
])
|
|
13812
|
-
.join('\n');
|
|
13813
|
-
const outputParametersMermaid = pipelineJson.parameters
|
|
13814
|
-
.filter(({ isOutput }) => isOutput)
|
|
13815
|
-
.map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
|
|
13816
|
-
.join('\n');
|
|
13817
|
-
const linksMermaid = pipelineJson.tasks
|
|
13818
|
-
.map((task) => {
|
|
13819
|
-
const link = linkTask(task);
|
|
13820
|
-
if (link === null) {
|
|
13821
|
-
return '';
|
|
13822
|
-
}
|
|
13823
|
-
const { href, title } = link;
|
|
13824
|
-
const taskName = parameterNameToTaskName(task.resultingParameterName);
|
|
13825
|
-
return `click ${taskName} href "${href}" "${title}";`;
|
|
13826
|
-
})
|
|
13827
|
-
.filter((line) => line !== '')
|
|
13828
|
-
.join('\n');
|
|
13829
|
-
const interactionPointsMermaid = Object.entries({
|
|
13830
|
-
[MERMAID_INPUT_NAME]: 'Input',
|
|
13831
|
-
[MERMAID_OUTPUT_NAME]: 'Output',
|
|
13832
|
-
[MERMAID_RESERVED_NAME]: 'Other',
|
|
13833
|
-
[MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
|
|
13834
|
-
})
|
|
13835
|
-
.filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
|
|
13836
|
-
.map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
|
|
13837
|
-
.join('\n');
|
|
13838
|
-
const promptbookMermaid = spaceTrim$2((block) => `
|
|
13839
|
-
|
|
13840
|
-
%% 🔮 Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
|
|
13841
|
-
|
|
13842
|
-
flowchart LR
|
|
13843
|
-
subgraph "${pipelineJson.title}"
|
|
13844
|
-
|
|
13845
|
-
%% Basic configuration
|
|
13846
|
-
direction TB
|
|
13847
|
-
|
|
13848
|
-
%% Interaction points from pipeline to outside
|
|
13849
|
-
${block(interactionPointsMermaid)}
|
|
13850
|
-
|
|
13851
|
-
%% Input and intermediate parameters
|
|
13852
|
-
${block(inputAndIntermediateParametersMermaid)}
|
|
13853
|
-
|
|
13854
|
-
|
|
13855
|
-
%% Output parameters
|
|
13856
|
-
${block(outputParametersMermaid)}
|
|
13857
|
-
|
|
13858
|
-
%% Links
|
|
13859
|
-
${block(linksMermaid)}
|
|
13860
|
-
|
|
13861
|
-
%% Styles
|
|
13862
|
-
classDef ${MERMAID_INPUT_NAME} color: grey;
|
|
13863
|
-
classDef ${MERMAID_OUTPUT_NAME} color: grey;
|
|
13864
|
-
classDef ${MERMAID_RESERVED_NAME} color: grey;
|
|
13865
|
-
classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
|
|
13866
|
-
|
|
13867
|
-
end;
|
|
13868
|
-
|
|
13869
|
-
`);
|
|
13870
|
-
return promptbookMermaid;
|
|
13871
|
-
}
|
|
13872
|
-
/**
|
|
13873
|
-
* TODO: [🧠] FOREACH in mermaid graph
|
|
13874
|
-
* TODO: [🧠] Knowledge in mermaid graph
|
|
13875
|
-
* TODO: [🧠] Personas in mermaid graph
|
|
13876
|
-
* TODO: Maybe use some Mermaid package instead of string templating
|
|
13877
|
-
* TODO: [🕌] When more than 2 functionalities, split into separate functions
|
|
13878
|
-
*/
|
|
13879
|
-
|
|
13880
14039
|
/**
|
|
13881
14040
|
* Prettyfies Promptbook string and adds Mermaid graph
|
|
13882
14041
|
*
|
|
@@ -14437,64 +14596,6 @@ const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
|
14437
14596
|
* TODO: [®] DRY Register logic
|
|
14438
14597
|
*/
|
|
14439
14598
|
|
|
14440
|
-
/**
|
|
14441
|
-
* Detects if the code is running in a browser environment in main thread (Not in a web worker)
|
|
14442
|
-
*
|
|
14443
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
14444
|
-
*
|
|
14445
|
-
* @public exported from `@promptbook/utils`
|
|
14446
|
-
*/
|
|
14447
|
-
const $isRunningInBrowser = new Function(`
|
|
14448
|
-
try {
|
|
14449
|
-
return this === window;
|
|
14450
|
-
} catch (e) {
|
|
14451
|
-
return false;
|
|
14452
|
-
}
|
|
14453
|
-
`);
|
|
14454
|
-
/**
|
|
14455
|
-
* TODO: [🎺]
|
|
14456
|
-
*/
|
|
14457
|
-
|
|
14458
|
-
/**
|
|
14459
|
-
* Detects if the code is running in a Node.js environment
|
|
14460
|
-
*
|
|
14461
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
14462
|
-
*
|
|
14463
|
-
* @public exported from `@promptbook/utils`
|
|
14464
|
-
*/
|
|
14465
|
-
const $isRunningInNode = new Function(`
|
|
14466
|
-
try {
|
|
14467
|
-
return this === global;
|
|
14468
|
-
} catch (e) {
|
|
14469
|
-
return false;
|
|
14470
|
-
}
|
|
14471
|
-
`);
|
|
14472
|
-
/**
|
|
14473
|
-
* TODO: [🎺]
|
|
14474
|
-
*/
|
|
14475
|
-
|
|
14476
|
-
/**
|
|
14477
|
-
* Detects if the code is running in a web worker
|
|
14478
|
-
*
|
|
14479
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
14480
|
-
*
|
|
14481
|
-
* @public exported from `@promptbook/utils`
|
|
14482
|
-
*/
|
|
14483
|
-
const $isRunningInWebWorker = new Function(`
|
|
14484
|
-
try {
|
|
14485
|
-
if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
|
|
14486
|
-
return true;
|
|
14487
|
-
} else {
|
|
14488
|
-
return false;
|
|
14489
|
-
}
|
|
14490
|
-
} catch (e) {
|
|
14491
|
-
return false;
|
|
14492
|
-
}
|
|
14493
|
-
`);
|
|
14494
|
-
/**
|
|
14495
|
-
* TODO: [🎺]
|
|
14496
|
-
*/
|
|
14497
|
-
|
|
14498
14599
|
/**
|
|
14499
14600
|
* Creates a message with all registered LLM tools
|
|
14500
14601
|
*
|
|
@@ -14728,18 +14829,6 @@ class MemoryStorage {
|
|
|
14728
14829
|
}
|
|
14729
14830
|
}
|
|
14730
14831
|
|
|
14731
|
-
/**
|
|
14732
|
-
* Simple wrapper `new Date().toISOString()`
|
|
14733
|
-
*
|
|
14734
|
-
* Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
|
|
14735
|
-
*
|
|
14736
|
-
* @returns string_date branded type
|
|
14737
|
-
* @public exported from `@promptbook/utils`
|
|
14738
|
-
*/
|
|
14739
|
-
function $getCurrentDate() {
|
|
14740
|
-
return new Date().toISOString();
|
|
14741
|
-
}
|
|
14742
|
-
|
|
14743
14832
|
/**
|
|
14744
14833
|
* Intercepts LLM tools and counts total usage of the tools
|
|
14745
14834
|
*
|
|
@@ -15366,17 +15455,17 @@ const OPENAI_MODELS = exportJson({
|
|
|
15366
15455
|
},
|
|
15367
15456
|
/**/
|
|
15368
15457
|
/*/
|
|
15369
|
-
|
|
15370
|
-
|
|
15371
|
-
|
|
15372
|
-
|
|
15373
|
-
|
|
15458
|
+
{
|
|
15459
|
+
modelTitle: 'tts-1-hd-1106',
|
|
15460
|
+
modelName: 'tts-1-hd-1106',
|
|
15461
|
+
},
|
|
15462
|
+
/**/
|
|
15374
15463
|
/*/
|
|
15375
|
-
|
|
15376
|
-
|
|
15377
|
-
|
|
15378
|
-
|
|
15379
|
-
|
|
15464
|
+
{
|
|
15465
|
+
modelTitle: 'tts-1-hd',
|
|
15466
|
+
modelName: 'tts-1-hd',
|
|
15467
|
+
},
|
|
15468
|
+
/**/
|
|
15380
15469
|
/**/
|
|
15381
15470
|
{
|
|
15382
15471
|
modelVariant: 'CHAT',
|
|
@@ -16562,7 +16651,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
|
16562
16651
|
*
|
|
16563
16652
|
* This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
|
|
16564
16653
|
*
|
|
16565
|
-
* Note: [🦖] There are several different things in Promptbook:
|
|
16654
|
+
* !!! Note: [🦖] There are several different things in Promptbook:
|
|
16566
16655
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
16567
16656
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
16568
16657
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
@@ -16668,17 +16757,21 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
16668
16757
|
console.info('connect', stream.currentEvent);
|
|
16669
16758
|
}
|
|
16670
16759
|
});
|
|
16760
|
+
/*
|
|
16671
16761
|
stream.on('messageDelta', (messageDelta) => {
|
|
16672
|
-
|
|
16673
|
-
|
|
16762
|
+
if (
|
|
16763
|
+
this.options.isVerbose &&
|
|
16674
16764
|
messageDelta &&
|
|
16675
16765
|
messageDelta.content &&
|
|
16676
16766
|
messageDelta.content[0] &&
|
|
16677
|
-
messageDelta.content[0].type === 'text'
|
|
16678
|
-
|
|
16767
|
+
messageDelta.content[0].type === 'text'
|
|
16768
|
+
) {
|
|
16769
|
+
console.info('messageDelta', messageDelta.content[0].text?.value);
|
|
16679
16770
|
}
|
|
16771
|
+
|
|
16680
16772
|
// <- TODO: [🐚] Make streaming and running tasks working
|
|
16681
16773
|
});
|
|
16774
|
+
*/
|
|
16682
16775
|
stream.on('messageCreated', (message) => {
|
|
16683
16776
|
if (this.options.isVerbose) {
|
|
16684
16777
|
console.info('messageCreated', message);
|
|
@@ -16733,15 +16826,19 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
16733
16826
|
},
|
|
16734
16827
|
});
|
|
16735
16828
|
}
|
|
16736
|
-
|
|
16829
|
+
/*
|
|
16830
|
+
public async playground() {
|
|
16737
16831
|
const client = await this.getClient();
|
|
16832
|
+
|
|
16738
16833
|
// List all assistants
|
|
16739
16834
|
const assistants = await client.beta.assistants.list();
|
|
16740
16835
|
console.log('!!! Assistants:', assistants);
|
|
16836
|
+
|
|
16741
16837
|
// Get details of a specific assistant
|
|
16742
16838
|
const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
|
|
16743
16839
|
const assistant = await client.beta.assistants.retrieve(assistantId);
|
|
16744
16840
|
console.log('!!! Assistant Details:', assistant);
|
|
16841
|
+
|
|
16745
16842
|
// Update an assistant
|
|
16746
16843
|
const updatedAssistant = await client.beta.assistants.update(assistantId, {
|
|
16747
16844
|
name: assistant.name + '(M)',
|
|
@@ -16751,75 +16848,196 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
|
16751
16848
|
},
|
|
16752
16849
|
});
|
|
16753
16850
|
console.log('!!! Updated Assistant:', updatedAssistant);
|
|
16851
|
+
|
|
16754
16852
|
await forEver();
|
|
16755
16853
|
}
|
|
16854
|
+
*/
|
|
16855
|
+
/**
|
|
16856
|
+
* Get an existing assistant tool wrapper
|
|
16857
|
+
*/
|
|
16858
|
+
getAssistant(assistantId) {
|
|
16859
|
+
return new OpenAiAssistantExecutionTools({
|
|
16860
|
+
...this.options,
|
|
16861
|
+
assistantId,
|
|
16862
|
+
});
|
|
16863
|
+
}
|
|
16756
16864
|
async createNewAssistant(options) {
|
|
16757
16865
|
if (!this.isCreatingNewAssistantsAllowed) {
|
|
16758
16866
|
throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
|
|
16759
16867
|
}
|
|
16760
16868
|
// await this.playground();
|
|
16761
|
-
const { name, instructions } = options;
|
|
16869
|
+
const { name, instructions, knowledgeSources } = options;
|
|
16762
16870
|
const client = await this.getClient();
|
|
16763
|
-
|
|
16764
|
-
//
|
|
16765
|
-
|
|
16766
|
-
|
|
16767
|
-
|
|
16768
|
-
|
|
16769
|
-
|
|
16770
|
-
|
|
16771
|
-
|
|
16772
|
-
if (!res.ok) throw new Error(`Download error: ${url}`);
|
|
16773
|
-
const buffer = await res.arrayBuffer();
|
|
16774
|
-
fs.writeFileSync(filepath, Buffer.from(buffer));
|
|
16775
|
-
console.log(`📥 File downloaded: ${filename}`);
|
|
16776
|
-
|
|
16777
|
-
return filepath;
|
|
16778
|
-
}
|
|
16779
|
-
|
|
16780
|
-
async function uploadFileToOpenAI(filepath: string) {
|
|
16781
|
-
const file = await client.files.create({
|
|
16782
|
-
file: fs.createReadStream(filepath),
|
|
16783
|
-
purpose: 'assistants',
|
|
16871
|
+
let vectorStoreId;
|
|
16872
|
+
// If knowledge sources are provided, create a vector store with them
|
|
16873
|
+
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
16874
|
+
if (this.options.isVerbose) {
|
|
16875
|
+
console.info(`📚 Creating vector store with ${knowledgeSources.length} knowledge sources...`);
|
|
16876
|
+
}
|
|
16877
|
+
// Create a vector store
|
|
16878
|
+
const vectorStore = await client.beta.vectorStores.create({
|
|
16879
|
+
name: `${name} Knowledge Base`,
|
|
16784
16880
|
});
|
|
16785
|
-
|
|
16786
|
-
|
|
16881
|
+
vectorStoreId = vectorStore.id;
|
|
16882
|
+
if (this.options.isVerbose) {
|
|
16883
|
+
console.info(`✅ Vector store created: ${vectorStoreId}`);
|
|
16884
|
+
}
|
|
16885
|
+
// Upload files from knowledge sources to the vector store
|
|
16886
|
+
const fileStreams = [];
|
|
16887
|
+
for (const source of knowledgeSources) {
|
|
16888
|
+
try {
|
|
16889
|
+
// Check if it's a URL
|
|
16890
|
+
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
16891
|
+
// Download the file
|
|
16892
|
+
const response = await fetch(source);
|
|
16893
|
+
if (!response.ok) {
|
|
16894
|
+
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
16895
|
+
continue;
|
|
16896
|
+
}
|
|
16897
|
+
const buffer = await response.arrayBuffer();
|
|
16898
|
+
const filename = source.split('/').pop() || 'downloaded-file';
|
|
16899
|
+
const blob = new Blob([buffer]);
|
|
16900
|
+
const file = new File([blob], filename);
|
|
16901
|
+
fileStreams.push(file);
|
|
16902
|
+
}
|
|
16903
|
+
else {
|
|
16904
|
+
// Assume it's a local file path
|
|
16905
|
+
// Note: This will work in Node.js environment
|
|
16906
|
+
// For browser environments, this would need different handling
|
|
16907
|
+
const fs = await import('fs');
|
|
16908
|
+
const fileStream = fs.createReadStream(source);
|
|
16909
|
+
fileStreams.push(fileStream);
|
|
16910
|
+
}
|
|
16911
|
+
}
|
|
16912
|
+
catch (error) {
|
|
16913
|
+
console.error(`Error processing knowledge source ${source}:`, error);
|
|
16914
|
+
}
|
|
16915
|
+
}
|
|
16916
|
+
// Batch upload files to the vector store
|
|
16917
|
+
if (fileStreams.length > 0) {
|
|
16918
|
+
try {
|
|
16919
|
+
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
16920
|
+
files: fileStreams,
|
|
16921
|
+
});
|
|
16922
|
+
if (this.options.isVerbose) {
|
|
16923
|
+
console.info(`✅ Uploaded ${fileStreams.length} files to vector store`);
|
|
16924
|
+
}
|
|
16925
|
+
}
|
|
16926
|
+
catch (error) {
|
|
16927
|
+
console.error('Error uploading files to vector store:', error);
|
|
16928
|
+
}
|
|
16929
|
+
}
|
|
16787
16930
|
}
|
|
16788
|
-
|
|
16789
|
-
|
|
16790
|
-
|
|
16791
|
-
'
|
|
16792
|
-
'
|
|
16793
|
-
|
|
16794
|
-
|
|
16795
|
-
|
|
16796
|
-
|
|
16797
|
-
|
|
16798
|
-
|
|
16799
|
-
|
|
16931
|
+
// Create assistant with vector store attached
|
|
16932
|
+
const assistantConfig = {
|
|
16933
|
+
name,
|
|
16934
|
+
description: 'Assistant created via Promptbook',
|
|
16935
|
+
model: 'gpt-4o',
|
|
16936
|
+
instructions,
|
|
16937
|
+
tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
|
|
16938
|
+
};
|
|
16939
|
+
// Attach vector store if created
|
|
16940
|
+
if (vectorStoreId) {
|
|
16941
|
+
assistantConfig.tool_resources = {
|
|
16942
|
+
file_search: {
|
|
16943
|
+
vector_store_ids: [vectorStoreId],
|
|
16944
|
+
},
|
|
16945
|
+
};
|
|
16946
|
+
}
|
|
16947
|
+
const assistant = await client.beta.assistants.create(assistantConfig);
|
|
16948
|
+
console.log(`✅ Assistant created: ${assistant.id}`);
|
|
16949
|
+
// TODO: !!!! Try listing existing assistants
|
|
16950
|
+
// TODO: !!!! Try marking existing assistants by DISCRIMINANT
|
|
16951
|
+
// TODO: !!!! Allow to update and reconnect to existing assistants
|
|
16952
|
+
return new OpenAiAssistantExecutionTools({
|
|
16953
|
+
...this.options,
|
|
16954
|
+
isCreatingNewAssistantsAllowed: false,
|
|
16955
|
+
assistantId: assistant.id,
|
|
16956
|
+
});
|
|
16957
|
+
}
|
|
16958
|
+
async updateAssistant(options) {
|
|
16959
|
+
if (!this.isCreatingNewAssistantsAllowed) {
|
|
16960
|
+
throw new NotAllowed(`Updating assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
|
|
16800
16961
|
}
|
|
16801
|
-
|
|
16802
|
-
|
|
16803
|
-
|
|
16804
|
-
|
|
16805
|
-
|
|
16806
|
-
|
|
16962
|
+
const { assistantId, name, instructions, knowledgeSources } = options;
|
|
16963
|
+
const client = await this.getClient();
|
|
16964
|
+
let vectorStoreId;
|
|
16965
|
+
// If knowledge sources are provided, create a vector store with them
|
|
16966
|
+
// TODO: [🧠] Reuse vector store creation logic from createNewAssistant
|
|
16967
|
+
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
16968
|
+
if (this.options.isVerbose) {
|
|
16969
|
+
console.info(`📚 Creating vector store for update with ${knowledgeSources.length} knowledge sources...`);
|
|
16970
|
+
}
|
|
16971
|
+
// Create a vector store
|
|
16972
|
+
const vectorStore = await client.beta.vectorStores.create({
|
|
16973
|
+
name: `${name} Knowledge Base`,
|
|
16974
|
+
});
|
|
16975
|
+
vectorStoreId = vectorStore.id;
|
|
16976
|
+
if (this.options.isVerbose) {
|
|
16977
|
+
console.info(`✅ Vector store created: ${vectorStoreId}`);
|
|
16978
|
+
}
|
|
16979
|
+
// Upload files from knowledge sources to the vector store
|
|
16980
|
+
const fileStreams = [];
|
|
16981
|
+
for (const source of knowledgeSources) {
|
|
16982
|
+
try {
|
|
16983
|
+
// Check if it's a URL
|
|
16984
|
+
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
16985
|
+
// Download the file
|
|
16986
|
+
const response = await fetch(source);
|
|
16987
|
+
if (!response.ok) {
|
|
16988
|
+
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
16989
|
+
continue;
|
|
16990
|
+
}
|
|
16991
|
+
const buffer = await response.arrayBuffer();
|
|
16992
|
+
const filename = source.split('/').pop() || 'downloaded-file';
|
|
16993
|
+
const blob = new Blob([buffer]);
|
|
16994
|
+
const file = new File([blob], filename);
|
|
16995
|
+
fileStreams.push(file);
|
|
16996
|
+
}
|
|
16997
|
+
else {
|
|
16998
|
+
// Assume it's a local file path
|
|
16999
|
+
// Note: This will work in Node.js environment
|
|
17000
|
+
// For browser environments, this would need different handling
|
|
17001
|
+
const fs = await import('fs');
|
|
17002
|
+
const fileStream = fs.createReadStream(source);
|
|
17003
|
+
fileStreams.push(fileStream);
|
|
17004
|
+
}
|
|
17005
|
+
}
|
|
17006
|
+
catch (error) {
|
|
17007
|
+
console.error(`Error processing knowledge source ${source}:`, error);
|
|
17008
|
+
}
|
|
17009
|
+
}
|
|
17010
|
+
// Batch upload files to the vector store
|
|
17011
|
+
if (fileStreams.length > 0) {
|
|
17012
|
+
try {
|
|
17013
|
+
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
17014
|
+
files: fileStreams,
|
|
17015
|
+
});
|
|
17016
|
+
if (this.options.isVerbose) {
|
|
17017
|
+
console.info(`✅ Uploaded ${fileStreams.length} files to vector store`);
|
|
17018
|
+
}
|
|
17019
|
+
}
|
|
17020
|
+
catch (error) {
|
|
17021
|
+
console.error('Error uploading files to vector store:', error);
|
|
17022
|
+
}
|
|
17023
|
+
}
|
|
16807
17024
|
}
|
|
16808
|
-
|
|
16809
|
-
// alert('!!!! Creating new OpenAI assistant');
|
|
16810
|
-
// 3️⃣ Create assistant with uploaded files
|
|
16811
|
-
const assistant = await client.beta.assistants.create({
|
|
17025
|
+
const assistantUpdate = {
|
|
16812
17026
|
name,
|
|
16813
|
-
description: 'Assistant created via Promptbook',
|
|
16814
|
-
model: 'gpt-4o',
|
|
16815
17027
|
instructions,
|
|
16816
17028
|
tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
|
|
16817
|
-
|
|
16818
|
-
|
|
16819
|
-
|
|
16820
|
-
|
|
16821
|
-
|
|
16822
|
-
|
|
17029
|
+
};
|
|
17030
|
+
if (vectorStoreId) {
|
|
17031
|
+
assistantUpdate.tool_resources = {
|
|
17032
|
+
file_search: {
|
|
17033
|
+
vector_store_ids: [vectorStoreId],
|
|
17034
|
+
},
|
|
17035
|
+
};
|
|
17036
|
+
}
|
|
17037
|
+
const assistant = await client.beta.assistants.update(assistantId, assistantUpdate);
|
|
17038
|
+
if (this.options.isVerbose) {
|
|
17039
|
+
console.log(`✅ Assistant updated: ${assistant.id}`);
|
|
17040
|
+
}
|
|
16823
17041
|
return new OpenAiAssistantExecutionTools({
|
|
16824
17042
|
...this.options,
|
|
16825
17043
|
isCreatingNewAssistantsAllowed: false,
|
|
@@ -16858,7 +17076,7 @@ const DISCRIMINANT = 'OPEN_AI_ASSISTANT_V1';
|
|
|
16858
17076
|
* Execution Tools for calling LLM models with a predefined agent "soul"
|
|
16859
17077
|
* This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
|
|
16860
17078
|
*
|
|
16861
|
-
* Note: [🦖] There are several different things in Promptbook:
|
|
17079
|
+
* !!! Note: [🦖] There are several different things in Promptbook:
|
|
16862
17080
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
16863
17081
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
16864
17082
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
@@ -16967,26 +17185,58 @@ class AgentLlmExecutionTools {
|
|
|
16967
17185
|
const chatPrompt = prompt;
|
|
16968
17186
|
let underlyingLlmResult;
|
|
16969
17187
|
if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
|
|
16970
|
-
|
|
16971
|
-
|
|
17188
|
+
const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
|
|
17189
|
+
const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
|
|
17190
|
+
let assistant;
|
|
17191
|
+
if (cached) {
|
|
17192
|
+
if (cached.requirementsHash === requirementsHash) {
|
|
17193
|
+
if (this.options.isVerbose) {
|
|
17194
|
+
console.log(`1️⃣ Using cached OpenAI Assistant for agent ${this.title}...`);
|
|
17195
|
+
}
|
|
17196
|
+
assistant = this.options.llmTools.getAssistant(cached.assistantId);
|
|
17197
|
+
}
|
|
17198
|
+
else {
|
|
17199
|
+
if (this.options.isVerbose) {
|
|
17200
|
+
console.log(`1️⃣ Updating OpenAI Assistant for agent ${this.title}...`);
|
|
17201
|
+
}
|
|
17202
|
+
assistant = await this.options.llmTools.updateAssistant({
|
|
17203
|
+
assistantId: cached.assistantId,
|
|
17204
|
+
name: this.title,
|
|
17205
|
+
instructions: modelRequirements.systemMessage,
|
|
17206
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
17207
|
+
});
|
|
17208
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
17209
|
+
assistantId: assistant.assistantId,
|
|
17210
|
+
requirementsHash,
|
|
17211
|
+
});
|
|
17212
|
+
}
|
|
16972
17213
|
}
|
|
16973
|
-
|
|
16974
|
-
|
|
16975
|
-
|
|
16976
|
-
instructions: modelRequirements.systemMessage,
|
|
16977
|
-
/*
|
|
16978
|
-
!!!
|
|
16979
|
-
metadata: {
|
|
16980
|
-
agentModelName: this.modelName,
|
|
17214
|
+
else {
|
|
17215
|
+
if (this.options.isVerbose) {
|
|
17216
|
+
console.log(`1️⃣ Creating new OpenAI Assistant for agent ${this.title}...`);
|
|
16981
17217
|
}
|
|
16982
|
-
|
|
16983
|
-
|
|
16984
|
-
|
|
17218
|
+
// <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
|
|
17219
|
+
assistant = await this.options.llmTools.createNewAssistant({
|
|
17220
|
+
name: this.title,
|
|
17221
|
+
instructions: modelRequirements.systemMessage,
|
|
17222
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
17223
|
+
/*
|
|
17224
|
+
!!!
|
|
17225
|
+
metadata: {
|
|
17226
|
+
agentModelName: this.modelName,
|
|
17227
|
+
}
|
|
17228
|
+
*/
|
|
17229
|
+
});
|
|
17230
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
17231
|
+
assistantId: assistant.assistantId,
|
|
17232
|
+
requirementsHash,
|
|
17233
|
+
});
|
|
17234
|
+
}
|
|
16985
17235
|
underlyingLlmResult = await assistant.callChatModel(chatPrompt);
|
|
16986
17236
|
}
|
|
16987
17237
|
else {
|
|
16988
17238
|
if (this.options.isVerbose) {
|
|
16989
|
-
console.log(`Creating Assistant ${this.title} on generic LLM execution tools...`);
|
|
17239
|
+
console.log(`2️⃣ Creating Assistant ${this.title} on generic LLM execution tools...`);
|
|
16990
17240
|
}
|
|
16991
17241
|
// Create modified chat prompt with agent system message
|
|
16992
17242
|
const modifiedChatPrompt = {
|
|
@@ -17016,6 +17266,10 @@ class AgentLlmExecutionTools {
|
|
|
17016
17266
|
return agentResult;
|
|
17017
17267
|
}
|
|
17018
17268
|
}
|
|
17269
|
+
/**
|
|
17270
|
+
* Cache of OpenAI assistants to avoid creating duplicates
|
|
17271
|
+
*/
|
|
17272
|
+
AgentLlmExecutionTools.assistantCache = new Map();
|
|
17019
17273
|
/**
|
|
17020
17274
|
* TODO: [🍚] Implement Destroyable pattern to free resources
|
|
17021
17275
|
* TODO: [🧠] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
|
|
@@ -17024,7 +17278,7 @@ class AgentLlmExecutionTools {
|
|
|
17024
17278
|
/**
|
|
17025
17279
|
* Represents one AI Agent
|
|
17026
17280
|
*
|
|
17027
|
-
* Note: [🦖] There are several different things in Promptbook:
|
|
17281
|
+
* !!! Note: [🦖] There are several different things in Promptbook:
|
|
17028
17282
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
17029
17283
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
17030
17284
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
@@ -17032,7 +17286,19 @@ class AgentLlmExecutionTools {
|
|
|
17032
17286
|
*
|
|
17033
17287
|
* @public exported from `@promptbook/core`
|
|
17034
17288
|
*/
|
|
17035
|
-
class Agent {
|
|
17289
|
+
class Agent extends AgentLlmExecutionTools {
|
|
17290
|
+
/**
|
|
17291
|
+
* Name of the agent
|
|
17292
|
+
*/
|
|
17293
|
+
get agentName() {
|
|
17294
|
+
return this._agentName || createDefaultAgentName(this.agentSource.value);
|
|
17295
|
+
}
|
|
17296
|
+
/**
|
|
17297
|
+
* Computed hash of the agent source for integrity verification
|
|
17298
|
+
*/
|
|
17299
|
+
get agentHash() {
|
|
17300
|
+
return computeAgentHash(this.agentSource.value);
|
|
17301
|
+
}
|
|
17036
17302
|
/**
|
|
17037
17303
|
* Not used in Agent, always returns empty array
|
|
17038
17304
|
*/
|
|
@@ -17042,11 +17308,13 @@ class Agent {
|
|
|
17042
17308
|
];
|
|
17043
17309
|
}
|
|
17044
17310
|
constructor(options) {
|
|
17045
|
-
|
|
17046
|
-
|
|
17047
|
-
|
|
17048
|
-
|
|
17049
|
-
|
|
17311
|
+
const agentSource = asUpdatableSubject(options.agentSource);
|
|
17312
|
+
super({
|
|
17313
|
+
isVerbose: options.isVerbose,
|
|
17314
|
+
llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
|
|
17315
|
+
agentSource: agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
17316
|
+
});
|
|
17317
|
+
this._agentName = undefined;
|
|
17050
17318
|
/**
|
|
17051
17319
|
* Description of the agent
|
|
17052
17320
|
*/
|
|
@@ -17055,27 +17323,16 @@ class Agent {
|
|
|
17055
17323
|
* Metadata like image or color
|
|
17056
17324
|
*/
|
|
17057
17325
|
this.meta = {};
|
|
17058
|
-
|
|
17326
|
+
// TODO: !!!!! Add `Agent` simple "mocked" learning by appending to agent source
|
|
17327
|
+
// TODO: !!!!! Add `Agent` learning by promptbookAgent
|
|
17328
|
+
this.agentSource = agentSource;
|
|
17059
17329
|
this.agentSource.subscribe((source) => {
|
|
17060
17330
|
const { agentName, personaDescription, meta } = parseAgentSource(source);
|
|
17061
|
-
this.
|
|
17331
|
+
this._agentName = agentName;
|
|
17062
17332
|
this.personaDescription = personaDescription;
|
|
17063
17333
|
this.meta = { ...this.meta, ...meta };
|
|
17064
17334
|
});
|
|
17065
17335
|
}
|
|
17066
|
-
/**
|
|
17067
|
-
* Creates LlmExecutionTools which exposes the agent as a model
|
|
17068
|
-
*/
|
|
17069
|
-
getLlmExecutionTools() {
|
|
17070
|
-
const llmTools = new AgentLlmExecutionTools({
|
|
17071
|
-
isVerbose: this.options.isVerbose,
|
|
17072
|
-
llmTools: getSingleLlmExecutionTools(this.options.executionTools.llm),
|
|
17073
|
-
agentSource: this.agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
17074
|
-
});
|
|
17075
|
-
// TODO: !!!! Add `Agent` simple "mocked" learning by appending to agent source
|
|
17076
|
-
// TODO: !!!! Add `Agent` learning by promptbookAgent
|
|
17077
|
-
return llmTools;
|
|
17078
|
-
}
|
|
17079
17336
|
}
|
|
17080
17337
|
/**
|
|
17081
17338
|
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
@@ -17142,6 +17399,106 @@ const _AgentRegistration = $llmToolsRegister.register(createAgentLlmExecutionToo
|
|
|
17142
17399
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
17143
17400
|
*/
|
|
17144
17401
|
|
|
17402
|
+
/**
|
|
17403
|
+
* Represents one AI Agent
|
|
17404
|
+
*
|
|
17405
|
+
* !!!!!! Note: [🦖] There are several different things in Promptbook:
|
|
17406
|
+
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
17407
|
+
* !!!!!! `RemoteAgent`
|
|
17408
|
+
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
17409
|
+
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
17410
|
+
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
17411
|
+
*
|
|
17412
|
+
* @public exported from `@promptbook/core`
|
|
17413
|
+
*/
|
|
17414
|
+
class RemoteAgent extends Agent {
|
|
17415
|
+
static async connect(options) {
|
|
17416
|
+
console.log('!!!!!', `${options.agentUrl}/api/book`);
|
|
17417
|
+
const bookResponse = await fetch(`${options.agentUrl}/api/book`);
|
|
17418
|
+
// <- TODO: !!!! What about closed-source agents?
|
|
17419
|
+
// <- TODO: !!!! Maybe use promptbookFetch
|
|
17420
|
+
const agentSourceValue = (await bookResponse.text());
|
|
17421
|
+
const agentSource = new BehaviorSubject(agentSourceValue);
|
|
17422
|
+
// <- TODO: !!!! Support updating and self-updating
|
|
17423
|
+
return new RemoteAgent({
|
|
17424
|
+
...options,
|
|
17425
|
+
executionTools: {
|
|
17426
|
+
/* Note: These tools are not used */
|
|
17427
|
+
// ---------------------------------------
|
|
17428
|
+
/*
|
|
17429
|
+
TODO: !!! Get rid of
|
|
17430
|
+
|
|
17431
|
+
> You have not provided any `LlmExecutionTools`
|
|
17432
|
+
> This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.
|
|
17433
|
+
>
|
|
17434
|
+
> Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
|
|
17435
|
+
|
|
17436
|
+
*/
|
|
17437
|
+
},
|
|
17438
|
+
agentSource,
|
|
17439
|
+
});
|
|
17440
|
+
}
|
|
17441
|
+
constructor(options) {
|
|
17442
|
+
super(options);
|
|
17443
|
+
this.agentUrl = options.agentUrl;
|
|
17444
|
+
}
|
|
17445
|
+
/**
|
|
17446
|
+
* Calls the agent on agents remote server
|
|
17447
|
+
*/
|
|
17448
|
+
async callChatModel(prompt) {
|
|
17449
|
+
// Ensure we're working with a chat prompt
|
|
17450
|
+
if (prompt.modelRequirements.modelVariant !== 'CHAT') {
|
|
17451
|
+
throw new Error('Agents only supports chat prompts');
|
|
17452
|
+
}
|
|
17453
|
+
const bookResponse = await fetch(`${this.agentUrl}/api/chat?message=${encodeURIComponent(prompt.content)}`);
|
|
17454
|
+
// <- TODO: !!!! What about closed-source agents?
|
|
17455
|
+
// <- TODO: !!!! Maybe use promptbookFetch
|
|
17456
|
+
let content = '';
|
|
17457
|
+
if (!bookResponse.body) {
|
|
17458
|
+
content = await bookResponse.text();
|
|
17459
|
+
}
|
|
17460
|
+
else {
|
|
17461
|
+
// Note: [🐚] Problem with streaming is not here but it is not implemented on server
|
|
17462
|
+
const decoder = new TextDecoder();
|
|
17463
|
+
// Web ReadableStream is not async-iterable in many runtimes; use a reader.
|
|
17464
|
+
const reader = bookResponse.body.getReader();
|
|
17465
|
+
try {
|
|
17466
|
+
let doneReading = false;
|
|
17467
|
+
while (!doneReading) {
|
|
17468
|
+
const { done, value } = await reader.read();
|
|
17469
|
+
doneReading = !!done;
|
|
17470
|
+
if (value) {
|
|
17471
|
+
const textChunk = decoder.decode(value, { stream: true });
|
|
17472
|
+
// console.debug('RemoteAgent chunk:', textChunk);
|
|
17473
|
+
content += textChunk;
|
|
17474
|
+
}
|
|
17475
|
+
}
|
|
17476
|
+
// Flush any remaining decoder internal state
|
|
17477
|
+
content += decoder.decode();
|
|
17478
|
+
}
|
|
17479
|
+
finally {
|
|
17480
|
+
reader.releaseLock();
|
|
17481
|
+
}
|
|
17482
|
+
}
|
|
17483
|
+
// <- TODO: !!!! Transfer metadata
|
|
17484
|
+
const agentResult = {
|
|
17485
|
+
content,
|
|
17486
|
+
modelName: this.modelName,
|
|
17487
|
+
timing: {},
|
|
17488
|
+
usage: {},
|
|
17489
|
+
rawPromptContent: {},
|
|
17490
|
+
rawRequest: {},
|
|
17491
|
+
rawResponse: {},
|
|
17492
|
+
// <- TODO: !!!! Transfer and proxy the metadata
|
|
17493
|
+
};
|
|
17494
|
+
return agentResult;
|
|
17495
|
+
}
|
|
17496
|
+
}
|
|
17497
|
+
/**
|
|
17498
|
+
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
17499
|
+
* TODO: !!! Agent on remote server
|
|
17500
|
+
*/
|
|
17501
|
+
|
|
17145
17502
|
/**
|
|
17146
17503
|
* Registration of LLM provider metadata
|
|
17147
17504
|
*
|
|
@@ -17263,24 +17620,6 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
|
|
|
17263
17620
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
17264
17621
|
*/
|
|
17265
17622
|
|
|
17266
|
-
/**
|
|
17267
|
-
* Detects if the code is running in jest environment
|
|
17268
|
-
*
|
|
17269
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
|
|
17270
|
-
*
|
|
17271
|
-
* @public exported from `@promptbook/utils`
|
|
17272
|
-
*/
|
|
17273
|
-
const $isRunningInJest = new Function(`
|
|
17274
|
-
try {
|
|
17275
|
-
return process.env.JEST_WORKER_ID !== undefined;
|
|
17276
|
-
} catch (e) {
|
|
17277
|
-
return false;
|
|
17278
|
-
}
|
|
17279
|
-
`);
|
|
17280
|
-
/**
|
|
17281
|
-
* TODO: [🎺]
|
|
17282
|
-
*/
|
|
17283
|
-
|
|
17284
17623
|
/**
|
|
17285
17624
|
* Registration of LLM provider metadata
|
|
17286
17625
|
*
|
|
@@ -17633,61 +17972,6 @@ function isValidPipelineString(pipelineString) {
|
|
|
17633
17972
|
* TODO: [🧠][🈴] Where is the best location for this file
|
|
17634
17973
|
*/
|
|
17635
17974
|
|
|
17636
|
-
/**
|
|
17637
|
-
* Tag function for notating a prompt as template literal
|
|
17638
|
-
*
|
|
17639
|
-
* Note: There are 3 similar functions:
|
|
17640
|
-
* 1) `prompt` for notating single prompt exported from `@promptbook/utils`
|
|
17641
|
-
* 2) `promptTemplate` alias for `prompt`
|
|
17642
|
-
* 3) `book` for notating and validating entire books exported from `@promptbook/utils`
|
|
17643
|
-
*
|
|
17644
|
-
* @param strings
|
|
17645
|
-
* @param values
|
|
17646
|
-
* @returns the prompt string
|
|
17647
|
-
* @public exported from `@promptbook/utils`
|
|
17648
|
-
*/
|
|
17649
|
-
function prompt(strings, ...values) {
|
|
17650
|
-
if (values.length === 0) {
|
|
17651
|
-
return spaceTrim$1(strings.join(''));
|
|
17652
|
-
}
|
|
17653
|
-
const stringsWithHiddenParameters = strings.map((stringsItem) =>
|
|
17654
|
-
// TODO: [0] DRY
|
|
17655
|
-
stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
|
|
17656
|
-
const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
|
|
17657
|
-
const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
|
|
17658
|
-
// Combine strings and values
|
|
17659
|
-
let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
|
|
17660
|
-
? `${result}${stringsItem}`
|
|
17661
|
-
: `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
|
|
17662
|
-
pipelineString = spaceTrim$1(pipelineString);
|
|
17663
|
-
try {
|
|
17664
|
-
pipelineString = templateParameters(pipelineString, parameters);
|
|
17665
|
-
}
|
|
17666
|
-
catch (error) {
|
|
17667
|
-
if (!(error instanceof PipelineExecutionError)) {
|
|
17668
|
-
throw error;
|
|
17669
|
-
}
|
|
17670
|
-
console.error({ pipelineString, parameters, placeholderParameterNames, error });
|
|
17671
|
-
throw new UnexpectedError(spaceTrim$1((block) => `
|
|
17672
|
-
Internal error in prompt template literal
|
|
17673
|
-
|
|
17674
|
-
${block(JSON.stringify({ strings, values }, null, 4))}}
|
|
17675
|
-
|
|
17676
|
-
`));
|
|
17677
|
-
}
|
|
17678
|
-
// TODO: [0] DRY
|
|
17679
|
-
pipelineString = pipelineString
|
|
17680
|
-
.split(`${REPLACING_NONCE}beginbracket`)
|
|
17681
|
-
.join('{')
|
|
17682
|
-
.split(`${REPLACING_NONCE}endbracket`)
|
|
17683
|
-
.join('}');
|
|
17684
|
-
return pipelineString;
|
|
17685
|
-
}
|
|
17686
|
-
/**
|
|
17687
|
-
* TODO: [🧠][🈴] Where is the best location for this file
|
|
17688
|
-
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
17689
|
-
*/
|
|
17690
|
-
|
|
17691
17975
|
/**
|
|
17692
17976
|
* Tag function for notating a pipeline with a book\`...\ notation as template literal
|
|
17693
17977
|
*
|
|
@@ -18223,7 +18507,7 @@ const OpenAiSdkTranspiler = {
|
|
|
18223
18507
|
});
|
|
18224
18508
|
|
|
18225
18509
|
const answer = response.choices[0].message.content;
|
|
18226
|
-
console.log('\\n🧠 ${agentName}:', answer, '\\n');
|
|
18510
|
+
console.log('\\n🧠 ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
|
|
18227
18511
|
|
|
18228
18512
|
chatHistory.push({ role: 'assistant', content: answer });
|
|
18229
18513
|
promptUser();
|
|
@@ -18242,7 +18526,7 @@ const OpenAiSdkTranspiler = {
|
|
|
18242
18526
|
|
|
18243
18527
|
(async () => {
|
|
18244
18528
|
await setupKnowledge();
|
|
18245
|
-
console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
|
|
18529
|
+
console.log("🤖 Chat with ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
|
|
18246
18530
|
promptUser();
|
|
18247
18531
|
})();
|
|
18248
18532
|
`);
|
|
@@ -18289,7 +18573,7 @@ const OpenAiSdkTranspiler = {
|
|
|
18289
18573
|
});
|
|
18290
18574
|
|
|
18291
18575
|
const answer = response.choices[0].message.content;
|
|
18292
|
-
console.log('\\n🧠 ${agentName}:', answer, '\\n');
|
|
18576
|
+
console.log('\\n🧠 ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
|
|
18293
18577
|
|
|
18294
18578
|
chatHistory.push({ role: 'assistant', content: answer });
|
|
18295
18579
|
promptUser();
|
|
@@ -18306,7 +18590,7 @@ const OpenAiSdkTranspiler = {
|
|
|
18306
18590
|
});
|
|
18307
18591
|
}
|
|
18308
18592
|
|
|
18309
|
-
console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
|
|
18593
|
+
console.log("🤖 Chat with ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
|
|
18310
18594
|
promptUser();
|
|
18311
18595
|
|
|
18312
18596
|
`);
|
|
@@ -18314,25 +18598,6 @@ const OpenAiSdkTranspiler = {
|
|
|
18314
18598
|
},
|
|
18315
18599
|
};
|
|
18316
18600
|
|
|
18317
|
-
/**
|
|
18318
|
-
* Returns information about the current runtime environment
|
|
18319
|
-
*
|
|
18320
|
-
* Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
|
|
18321
|
-
*
|
|
18322
|
-
* @public exported from `@promptbook/utils`
|
|
18323
|
-
*/
|
|
18324
|
-
function $detectRuntimeEnvironment() {
|
|
18325
|
-
return {
|
|
18326
|
-
isRunningInBrowser: $isRunningInBrowser(),
|
|
18327
|
-
isRunningInJest: $isRunningInJest(),
|
|
18328
|
-
isRunningInNode: $isRunningInNode(),
|
|
18329
|
-
isRunningInWebWorker: $isRunningInWebWorker(),
|
|
18330
|
-
};
|
|
18331
|
-
}
|
|
18332
|
-
/**
|
|
18333
|
-
* TODO: [🎺] Also detect and report node version here
|
|
18334
|
-
*/
|
|
18335
|
-
|
|
18336
18601
|
/**
|
|
18337
18602
|
* Provide information about Promptbook, engine version, book language version, servers, ...
|
|
18338
18603
|
*
|
|
@@ -18360,8 +18625,7 @@ function aboutPromptbookInformation(options) {
|
|
|
18360
18625
|
|
|
18361
18626
|
## Servers
|
|
18362
18627
|
|
|
18363
|
-
${block(REMOTE_SERVER_URLS.map(({ title, urls,
|
|
18364
|
-
${isAnonymousModeAllowed ? '🐱💻 ' : ''} ${urls.join(', ')}
|
|
18628
|
+
${block(REMOTE_SERVER_URLS.map(({ title, urls, description }, index) => `${index + 1}. ${title} ${description} ${urls.join(', ')}
|
|
18365
18629
|
`).join('\n'))}
|
|
18366
18630
|
`);
|
|
18367
18631
|
fullInfoPieces.push(serversInfo);
|
|
@@ -18404,6 +18668,30 @@ function $randomItem(...items) {
|
|
|
18404
18668
|
* TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
|
|
18405
18669
|
*/
|
|
18406
18670
|
|
|
18671
|
+
const PERSONALITIES = [
|
|
18672
|
+
'Friendly and helpful AI agent.',
|
|
18673
|
+
'Professional and efficient virtual assistant.',
|
|
18674
|
+
'Creative and imaginative digital companion.',
|
|
18675
|
+
'Knowledgeable and informative AI guide.',
|
|
18676
|
+
'Empathetic and understanding support bot.',
|
|
18677
|
+
'Energetic and enthusiastic conversational partner.',
|
|
18678
|
+
'Calm and patient virtual helper.',
|
|
18679
|
+
'Curious and inquisitive AI explorer.',
|
|
18680
|
+
'Witty and humorous digital friend.',
|
|
18681
|
+
'Serious and focused AI consultant.',
|
|
18682
|
+
];
|
|
18683
|
+
/**
|
|
18684
|
+
* @@@@
|
|
18685
|
+
*
|
|
18686
|
+
* @private internal helper function
|
|
18687
|
+
*/
|
|
18688
|
+
function $randomAgentPersona() {
|
|
18689
|
+
return $randomItem(...PERSONALITIES);
|
|
18690
|
+
}
|
|
18691
|
+
/**
|
|
18692
|
+
* TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
|
|
18693
|
+
*/
|
|
18694
|
+
|
|
18407
18695
|
const FIRSTNAMES = [
|
|
18408
18696
|
'Paul',
|
|
18409
18697
|
'George',
|
|
@@ -18464,30 +18752,6 @@ function $randomFullnameWithColor() {
|
|
|
18464
18752
|
* TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
|
|
18465
18753
|
*/
|
|
18466
18754
|
|
|
18467
|
-
const PERSONALITIES = [
|
|
18468
|
-
'Friendly and helpful AI agent.',
|
|
18469
|
-
'Professional and efficient virtual assistant.',
|
|
18470
|
-
'Creative and imaginative digital companion.',
|
|
18471
|
-
'Knowledgeable and informative AI guide.',
|
|
18472
|
-
'Empathetic and understanding support bot.',
|
|
18473
|
-
'Energetic and enthusiastic conversational partner.',
|
|
18474
|
-
'Calm and patient virtual helper.',
|
|
18475
|
-
'Curious and inquisitive AI explorer.',
|
|
18476
|
-
'Witty and humorous digital friend.',
|
|
18477
|
-
'Serious and focused AI consultant.',
|
|
18478
|
-
];
|
|
18479
|
-
/**
|
|
18480
|
-
* @@@@
|
|
18481
|
-
*
|
|
18482
|
-
* @private internal helper function
|
|
18483
|
-
*/
|
|
18484
|
-
function $randomAgentPersona() {
|
|
18485
|
-
return $randomItem(...PERSONALITIES);
|
|
18486
|
-
}
|
|
18487
|
-
/**
|
|
18488
|
-
* TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
|
|
18489
|
-
*/
|
|
18490
|
-
|
|
18491
18755
|
/**
|
|
18492
18756
|
* Generates boilerplate for a new agent book
|
|
18493
18757
|
*
|
|
@@ -18512,7 +18776,7 @@ function $generateBookBoilerplate(options) {
|
|
|
18512
18776
|
const agentSource = validateBook(spaceTrim$1((block) => `
|
|
18513
18777
|
${agentName}
|
|
18514
18778
|
|
|
18515
|
-
META COLOR ${color || '#3498db' /* <- TODO: !!!! Best default color */}
|
|
18779
|
+
META COLOR ${color || '#3498db' /* <- TODO: [🧠] !!!! Best default color */}
|
|
18516
18780
|
PERSONA ${block(personaDescription)}
|
|
18517
18781
|
`));
|
|
18518
18782
|
return agentSource;
|
|
@@ -18521,5 +18785,5 @@ function $generateBookBoilerplate(options) {
|
|
|
18521
18785
|
* TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
|
|
18522
18786
|
*/
|
|
18523
18787
|
|
|
18524
|
-
export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
|
|
18788
|
+
export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeAgentHash, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createDefaultAgentName, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, normalizeAgentName, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
|
|
18525
18789
|
//# sourceMappingURL=index.es.js.map
|