@promptbook/core 0.103.0-47 → 0.103.0-48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/esm/index.es.js +796 -575
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  5. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +7 -3
  6. package/esm/typings/src/book-2.0/agent-source/AgentSourceParseResult.d.ts +2 -1
  7. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.d.ts +8 -0
  8. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.test.d.ts +1 -0
  9. package/esm/typings/src/book-2.0/agent-source/createDefaultAgentName.d.ts +8 -0
  10. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.d.ts +9 -0
  11. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.d.ts +1 -1
  13. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +57 -32
  14. package/esm/typings/src/llm-providers/_common/utils/assertUniqueModels.d.ts +12 -0
  15. package/esm/typings/src/llm-providers/agent/Agent.d.ts +7 -2
  16. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +4 -0
  17. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +24 -3
  19. package/esm/typings/src/llm-providers/openai/openai-models.test.d.ts +4 -0
  20. package/esm/typings/src/remote-server/startAgentServer.d.ts +1 -1
  21. package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
  22. package/esm/typings/src/transpilers/openai-sdk/register.d.ts +1 -1
  23. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  24. package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +2 -0
  25. package/esm/typings/src/utils/normalization/normalizeTo_PascalCase.d.ts +3 -0
  26. package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +2 -0
  27. package/esm/typings/src/utils/normalization/titleToName.d.ts +2 -0
  28. package/esm/typings/src/version.d.ts +1 -1
  29. package/package.json +1 -1
  30. package/umd/index.umd.js +803 -579
  31. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -1,11 +1,11 @@
1
+ import { SHA256 } from 'crypto-js';
2
+ import hexEncoder from 'crypto-js/enc-hex';
1
3
  import spaceTrim$1, { spaceTrim as spaceTrim$2 } from 'spacetrim';
2
4
  import { randomBytes } from 'crypto';
3
5
  import { Subject, BehaviorSubject } from 'rxjs';
4
- import { forTime, forEver } from 'waitasecond';
5
- import hexEncoder from 'crypto-js/enc-hex';
6
+ import { forTime } from 'waitasecond';
6
7
  import sha256 from 'crypto-js/sha256';
7
8
  import { basename, join, dirname, isAbsolute } from 'path';
8
- import { SHA256 } from 'crypto-js';
9
9
  import { lookup, extension } from 'mime-types';
10
10
  import { parse, unparse } from 'papaparse';
11
11
  import moment from 'moment';
@@ -27,12 +27,21 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-47';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-48';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
34
34
  */
35
35
 
36
+ /**
37
+ * Computes SHA-256 hash of the agent source
38
+ *
39
+ * @public exported from `@promptbook/core`
40
+ */
41
+ function computeAgentHash(agentSource) {
42
+ return SHA256(hexEncoder.parse(agentSource /* <- TODO: !!!!! spaceTrim */)).toString( /* hex */);
43
+ }
44
+
36
45
  var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [šŸ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [šŸ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"āœ Convert Knowledge-piece to title\" but \"āœ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"āœ Convert Knowledge-piece to title\" but \"āœ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
37
46
 
38
47
  /**
@@ -4325,6 +4334,8 @@ function removeDiacritics(input) {
4325
4334
  /**
4326
4335
  * Converts a given text to kebab-case format.
4327
4336
  *
4337
+ * Note: [šŸ”‚] This function is idempotent.
4338
+ *
4328
4339
  * @param text The text to be converted.
4329
4340
  * @returns The kebab-case formatted string.
4330
4341
  * @example 'hello-world'
@@ -4480,6 +4491,8 @@ function removeEmojis(text) {
4480
4491
  /**
4481
4492
  * Converts a title string into a normalized name.
4482
4493
  *
4494
+ * Note: [šŸ”‚] This function is idempotent.
4495
+ *
4483
4496
  * @param value The title string to be converted to a name.
4484
4497
  * @returns A normalized name derived from the input title.
4485
4498
  * @example 'Hello World!' -> 'hello-world'
@@ -8759,6 +8772,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
8759
8772
  // Keep everything after the PERSONA section
8760
8773
  cleanedMessage = lines.slice(personaEndIndex).join('\n').trim();
8761
8774
  }
8775
+ // TODO: [šŸ•›] There should be `agentFullname` not `agentName`
8762
8776
  // Create new system message with persona at the beginning
8763
8777
  // Format: "You are {agentName}\n{personaContent}"
8764
8778
  // The # PERSONA comment will be removed later by removeCommentsFromSystemMessage
@@ -9580,6 +9594,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
9580
9594
  /**
9581
9595
  * Normalizes a given text to camelCase format.
9582
9596
  *
9597
+ * Note: [šŸ”‚] This function is idempotent.
9598
+ *
9583
9599
  * @param text The text to be normalized.
9584
9600
  * @param _isFirstLetterCapital Whether the first letter should be capitalized.
9585
9601
  * @returns The camelCase formatted string.
@@ -9668,132 +9684,543 @@ function generatePlaceholderAgentProfileImageUrl(agentName) {
9668
9684
  */
9669
9685
 
9670
9686
  /**
9671
- * Parses basic information from agent source
9687
+ * Creates a Mermaid graph based on the promptbook
9672
9688
  *
9673
- * There are 2 similar functions:
9674
- * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
9675
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
9689
+ * Note: The result is not wrapped in a Markdown code block
9676
9690
  *
9677
- * @public exported from `@promptbook/core`
9691
+ * @public exported from `@promptbook/utils`
9678
9692
  */
9679
- function parseAgentSource(agentSource) {
9680
- const parseResult = parseAgentSourceWithCommitments(agentSource);
9681
- // Find PERSONA and META commitments
9682
- let personaDescription = null;
9683
- for (const commitment of parseResult.commitments) {
9684
- if (commitment.type !== 'PERSONA') {
9685
- continue;
9693
+ function renderPromptbookMermaid(pipelineJson, options) {
9694
+ const { linkTask = () => null } = options || {};
9695
+ const MERMAID_PREFIX = 'pipeline_';
9696
+ const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
9697
+ const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
9698
+ const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
9699
+ const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
9700
+ const parameterNameToTaskName = (parameterName) => {
9701
+ if (parameterName === 'knowledge') {
9702
+ return MERMAID_KNOWLEDGE_NAME;
9686
9703
  }
9687
- if (personaDescription === null) {
9688
- personaDescription = '';
9704
+ else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
9705
+ return MERMAID_RESERVED_NAME;
9689
9706
  }
9690
- else {
9691
- personaDescription += `\n\n${personaDescription}`;
9707
+ const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
9708
+ if (!parameter) {
9709
+ throw new UnexpectedError(`Could not find {${parameterName}}`);
9710
+ // <- TODO: This causes problems when {knowledge} and other reserved parameters are used
9692
9711
  }
9693
- personaDescription += commitment.content;
9694
- }
9695
- const meta = {};
9696
- for (const commitment of parseResult.commitments) {
9697
- if (commitment.type !== 'META') {
9698
- continue;
9712
+ if (parameter.isInput) {
9713
+ return MERMAID_INPUT_NAME;
9699
9714
  }
9700
- // Parse META commitments - format is "META TYPE content"
9701
- const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
9702
- const metaType = normalizeTo_camelCase(metaTypeRaw);
9703
- meta[metaType] = spaceTrim$1(commitment.content.substring(metaTypeRaw.length));
9704
- }
9705
- // Generate gravatar fallback if no meta image specified
9706
- if (!meta.image) {
9707
- meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
9708
- }
9709
- // Parse parameters using unified approach - both @Parameter and {parameter} notations
9710
- // are treated as the same syntax feature with unified representation
9711
- const parameters = parseParameters(agentSource);
9712
- return {
9713
- agentName: parseResult.agentName,
9714
- personaDescription,
9715
- meta,
9716
- parameters,
9715
+ const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
9716
+ if (!task) {
9717
+ throw new Error(`Could not find task for {${parameterName}}`);
9718
+ }
9719
+ return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
9717
9720
  };
9721
+ const inputAndIntermediateParametersMermaid = pipelineJson.tasks
9722
+ .flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
9723
+ `${parameterNameToTaskName(resultingParameterName)}("${title}")`,
9724
+ ...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
9725
+ ])
9726
+ .join('\n');
9727
+ const outputParametersMermaid = pipelineJson.parameters
9728
+ .filter(({ isOutput }) => isOutput)
9729
+ .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
9730
+ .join('\n');
9731
+ const linksMermaid = pipelineJson.tasks
9732
+ .map((task) => {
9733
+ const link = linkTask(task);
9734
+ if (link === null) {
9735
+ return '';
9736
+ }
9737
+ const { href, title } = link;
9738
+ const taskName = parameterNameToTaskName(task.resultingParameterName);
9739
+ return `click ${taskName} href "${href}" "${title}";`;
9740
+ })
9741
+ .filter((line) => line !== '')
9742
+ .join('\n');
9743
+ const interactionPointsMermaid = Object.entries({
9744
+ [MERMAID_INPUT_NAME]: 'Input',
9745
+ [MERMAID_OUTPUT_NAME]: 'Output',
9746
+ [MERMAID_RESERVED_NAME]: 'Other',
9747
+ [MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
9748
+ })
9749
+ .filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
9750
+ .map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
9751
+ .join('\n');
9752
+ const promptbookMermaid = spaceTrim$2((block) => `
9753
+
9754
+ %% šŸ”® Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
9755
+
9756
+ flowchart LR
9757
+ subgraph "${pipelineJson.title}"
9758
+
9759
+ %% Basic configuration
9760
+ direction TB
9761
+
9762
+ %% Interaction points from pipeline to outside
9763
+ ${block(interactionPointsMermaid)}
9764
+
9765
+ %% Input and intermediate parameters
9766
+ ${block(inputAndIntermediateParametersMermaid)}
9767
+
9768
+
9769
+ %% Output parameters
9770
+ ${block(outputParametersMermaid)}
9771
+
9772
+ %% Links
9773
+ ${block(linksMermaid)}
9774
+
9775
+ %% Styles
9776
+ classDef ${MERMAID_INPUT_NAME} color: grey;
9777
+ classDef ${MERMAID_OUTPUT_NAME} color: grey;
9778
+ classDef ${MERMAID_RESERVED_NAME} color: grey;
9779
+ classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
9780
+
9781
+ end;
9782
+
9783
+ `);
9784
+ return promptbookMermaid;
9718
9785
  }
9719
9786
  /**
9720
- * TODO: [šŸ•›] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
9787
+ * TODO: [🧠] FOREACH in mermaid graph
9788
+ * TODO: [🧠] Knowledge in mermaid graph
9789
+ * TODO: [🧠] Personas in mermaid graph
9790
+ * TODO: Maybe use some Mermaid package instead of string templating
9791
+ * TODO: [šŸ•Œ] When more than 2 functionalities, split into separate functions
9721
9792
  */
9722
9793
 
9723
9794
  /**
9724
- * Creates model requirements for an agent based on its source
9795
+ * Tag function for notating a prompt as template literal
9725
9796
  *
9726
- * There are 2 similar functions:
9727
- * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
9728
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
9797
+ * Note: There are 3 similar functions:
9798
+ * 1) `prompt` for notating single prompt exported from `@promptbook/utils`
9799
+ * 2) `promptTemplate` alias for `prompt`
9800
+ * 3) `book` for notating and validating entire books exported from `@promptbook/utils`
9729
9801
  *
9730
- * @public exported from `@promptbook/core`
9802
+ * @param strings
9803
+ * @param values
9804
+ * @returns the prompt string
9805
+ * @public exported from `@promptbook/utils`
9731
9806
  */
9732
- async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
9733
- // If availableModels are provided and no specific modelName is given,
9734
- // use preparePersona to select the best model
9735
- if (availableModels && !modelName && llmTools) {
9736
- const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
9737
- return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
9807
+ function prompt(strings, ...values) {
9808
+ if (values.length === 0) {
9809
+ return spaceTrim$1(strings.join(''));
9738
9810
  }
9739
- // Use the new commitment-based system with provided or default model
9740
- return createAgentModelRequirementsWithCommitments(agentSource, modelName);
9741
- }
9742
- /**
9743
- * Selects the best model using the preparePersona function
9744
- * This directly uses preparePersona to ensure DRY principle
9745
- *
9746
- * @param agentSource The agent source to derive persona description from
9747
- * @param llmTools LLM tools for preparing persona
9748
- * @returns The name of the best selected model
9749
- * @private function of `createAgentModelRequirements`
9750
- */
9751
- async function selectBestModelUsingPersona(agentSource, llmTools) {
9752
- var _a;
9753
- // Parse agent source to get persona description
9754
- const { agentName, personaDescription } = parseAgentSource(agentSource);
9755
- // Use agent name as fallback if no persona description is available
9756
- const description = personaDescription || agentName || 'AI Agent';
9811
+ const stringsWithHiddenParameters = strings.map((stringsItem) =>
9812
+ // TODO: [0] DRY
9813
+ stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
9814
+ const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
9815
+ const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
9816
+ // Combine strings and values
9817
+ let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
9818
+ ? `${result}${stringsItem}`
9819
+ : `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
9820
+ pipelineString = spaceTrim$1(pipelineString);
9757
9821
  try {
9758
- // Use preparePersona directly
9759
- const { modelsRequirements } = await preparePersona(description, { llm: llmTools }, { isVerbose: false });
9760
- // Extract the first model name from the requirements
9761
- if (modelsRequirements.length > 0 && ((_a = modelsRequirements[0]) === null || _a === void 0 ? void 0 : _a.modelName)) {
9762
- return modelsRequirements[0].modelName;
9763
- }
9764
- // Fallback: get available models and return the first CHAT model
9765
- const availableModels = await llmTools.listModels();
9766
- const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
9767
- if (chatModels.length === 0) {
9768
- throw new Error('No CHAT models available for agent model selection');
9769
- }
9770
- return chatModels[0].modelName;
9822
+ pipelineString = templateParameters(pipelineString, parameters);
9771
9823
  }
9772
9824
  catch (error) {
9773
- console.warn('Failed to use preparePersona for model selection, falling back to first available model:', error);
9774
- // Fallback: get available models and return the first CHAT model
9775
- const availableModels = await llmTools.listModels();
9776
- const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
9777
- if (chatModels.length === 0) {
9778
- throw new Error('No CHAT models available for agent model selection');
9825
+ if (!(error instanceof PipelineExecutionError)) {
9826
+ throw error;
9779
9827
  }
9780
- return chatModels[0].modelName;
9828
+ console.error({ pipelineString, parameters, placeholderParameterNames, error });
9829
+ throw new UnexpectedError(spaceTrim$1((block) => `
9830
+ Internal error in prompt template literal
9831
+
9832
+ ${block(JSON.stringify({ strings, values }, null, 4))}}
9833
+
9834
+ `));
9781
9835
  }
9836
+ // TODO: [0] DRY
9837
+ pipelineString = pipelineString
9838
+ .split(`${REPLACING_NONCE}beginbracket`)
9839
+ .join('{')
9840
+ .split(`${REPLACING_NONCE}endbracket`)
9841
+ .join('}');
9842
+ return pipelineString;
9782
9843
  }
9783
9844
  /**
9784
- * Extracts MCP servers from agent source
9845
+ * TODO: [🧠][🈓] Where is the best location for this file
9846
+ * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
9847
+ */
9848
+
9849
+ /**
9850
+ * Detects if the code is running in a browser environment in main thread (Not in a web worker)
9785
9851
  *
9786
- * @param agentSource The agent source string that may contain MCP lines
9787
- * @returns Array of MCP server identifiers
9852
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9788
9853
  *
9789
- * @private TODO: [🧠] Maybe should be public
9854
+ * @public exported from `@promptbook/utils`
9790
9855
  */
9791
- function extractMcpServers(agentSource) {
9792
- if (!agentSource) {
9793
- return [];
9794
- }
9795
- const lines = agentSource.split('\n');
9796
- const mcpRegex = /^\s*MCP\s+(.+)$/i;
9856
+ const $isRunningInBrowser = new Function(`
9857
+ try {
9858
+ return this === window;
9859
+ } catch (e) {
9860
+ return false;
9861
+ }
9862
+ `);
9863
+ /**
9864
+ * TODO: [šŸŽŗ]
9865
+ */
9866
+
9867
+ /**
9868
+ * Detects if the code is running in jest environment
9869
+ *
9870
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9871
+ *
9872
+ * @public exported from `@promptbook/utils`
9873
+ */
9874
+ const $isRunningInJest = new Function(`
9875
+ try {
9876
+ return process.env.JEST_WORKER_ID !== undefined;
9877
+ } catch (e) {
9878
+ return false;
9879
+ }
9880
+ `);
9881
+ /**
9882
+ * TODO: [šŸŽŗ]
9883
+ */
9884
+
9885
+ /**
9886
+ * Detects if the code is running in a Node.js environment
9887
+ *
9888
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9889
+ *
9890
+ * @public exported from `@promptbook/utils`
9891
+ */
9892
+ const $isRunningInNode = new Function(`
9893
+ try {
9894
+ return this === global;
9895
+ } catch (e) {
9896
+ return false;
9897
+ }
9898
+ `);
9899
+ /**
9900
+ * TODO: [šŸŽŗ]
9901
+ */
9902
+
9903
+ /**
9904
+ * Detects if the code is running in a web worker
9905
+ *
9906
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9907
+ *
9908
+ * @public exported from `@promptbook/utils`
9909
+ */
9910
+ const $isRunningInWebWorker = new Function(`
9911
+ try {
9912
+ if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
9913
+ return true;
9914
+ } else {
9915
+ return false;
9916
+ }
9917
+ } catch (e) {
9918
+ return false;
9919
+ }
9920
+ `);
9921
+ /**
9922
+ * TODO: [šŸŽŗ]
9923
+ */
9924
+
9925
+ /**
9926
+ * Returns information about the current runtime environment
9927
+ *
9928
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
9929
+ *
9930
+ * @public exported from `@promptbook/utils`
9931
+ */
9932
+ function $detectRuntimeEnvironment() {
9933
+ return {
9934
+ isRunningInBrowser: $isRunningInBrowser(),
9935
+ isRunningInJest: $isRunningInJest(),
9936
+ isRunningInNode: $isRunningInNode(),
9937
+ isRunningInWebWorker: $isRunningInWebWorker(),
9938
+ };
9939
+ }
9940
+ /**
9941
+ * TODO: [šŸŽŗ] Also detect and report node version here
9942
+ */
9943
+
9944
+ /**
9945
+ * Simple wrapper `new Date().toISOString()`
9946
+ *
9947
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
9948
+ *
9949
+ * @returns string_date branded type
9950
+ * @public exported from `@promptbook/utils`
9951
+ */
9952
+ function $getCurrentDate() {
9953
+ return new Date().toISOString();
9954
+ }
9955
+
9956
+ /**
9957
+ * Function parseNumber will parse number from string
9958
+ *
9959
+ * Note: [šŸ”‚] This function is idempotent.
9960
+ * Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
9961
+ * Note: it also works only with decimal numbers
9962
+ *
9963
+ * @returns parsed number
9964
+ * @throws {ParseError} if the value is not a number
9965
+ *
9966
+ * @public exported from `@promptbook/utils`
9967
+ */
9968
+ function parseNumber(value) {
9969
+ const originalValue = value;
9970
+ if (typeof value === 'number') {
9971
+ value = value.toString(); // <- TODO: Maybe more efficient way to do this
9972
+ }
9973
+ if (typeof value !== 'string') {
9974
+ return 0;
9975
+ }
9976
+ value = value.trim();
9977
+ if (value.startsWith('+')) {
9978
+ return parseNumber(value.substring(1));
9979
+ }
9980
+ if (value.startsWith('-')) {
9981
+ const number = parseNumber(value.substring(1));
9982
+ if (number === 0) {
9983
+ return 0; // <- Note: To prevent -0
9984
+ }
9985
+ return -number;
9986
+ }
9987
+ value = value.replace(/,/g, '.');
9988
+ value = value.toUpperCase();
9989
+ if (value === '') {
9990
+ return 0;
9991
+ }
9992
+ if (value === '♾' || value.startsWith('INF')) {
9993
+ return Infinity;
9994
+ }
9995
+ if (value.includes('/')) {
9996
+ const [numerator_, denominator_] = value.split('/');
9997
+ const numerator = parseNumber(numerator_);
9998
+ const denominator = parseNumber(denominator_);
9999
+ if (denominator === 0) {
10000
+ throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
10001
+ }
10002
+ return numerator / denominator;
10003
+ }
10004
+ if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
10005
+ return 0;
10006
+ }
10007
+ if (value.includes('E')) {
10008
+ const [significand, exponent] = value.split('E');
10009
+ return parseNumber(significand) * 10 ** parseNumber(exponent);
10010
+ }
10011
+ if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
10012
+ throw new ParseError(`Unable to parse number from "${originalValue}"`);
10013
+ }
10014
+ const num = parseFloat(value);
10015
+ if (isNaN(num)) {
10016
+ throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
10017
+ }
10018
+ return num;
10019
+ }
10020
+ /**
10021
+ * TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
10022
+ * TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
10023
+ */
10024
+
10025
+ /**
10026
+ * Removes quotes from a string
10027
+ *
10028
+ * Note: [šŸ”‚] This function is idempotent.
10029
+ * Tip: This is very useful for post-processing of the result of the LLM model
10030
+ * Note: This function removes only the same quotes from the beginning and the end of the string
10031
+ * Note: There are two similar functions:
10032
+ * - `removeQuotes` which removes only bounding quotes
10033
+ * - `unwrapResult` which removes whole introduce sentence
10034
+ *
10035
+ * @param text optionally quoted text
10036
+ * @returns text without quotes
10037
+ * @public exported from `@promptbook/utils`
10038
+ */
10039
+ function removeQuotes(text) {
10040
+ if (text.startsWith('"') && text.endsWith('"')) {
10041
+ return text.slice(1, -1);
10042
+ }
10043
+ if (text.startsWith("'") && text.endsWith("'")) {
10044
+ return text.slice(1, -1);
10045
+ }
10046
+ return text;
10047
+ }
10048
+
10049
+ /**
10050
+ * Trims string from all 4 sides
10051
+ *
10052
+ * Note: This is a re-exported function from the `spacetrim` package which is
10053
+ * Developed by same author @hejny as this package
10054
+ *
10055
+ * @public exported from `@promptbook/utils`
10056
+ * @see https://github.com/hejny/spacetrim#usage
10057
+ */
10058
+ const spaceTrim = spaceTrim$2;
10059
+
10060
+ /**
10061
+ * Checks if the given value is a valid JavaScript identifier name.
10062
+ *
10063
+ * @param javascriptName The value to check for JavaScript identifier validity.
10064
+ * @returns `true` if the value is a valid JavaScript name, false otherwise.
10065
+ * @public exported from `@promptbook/utils`
10066
+ */
10067
+ function isValidJavascriptName(javascriptName) {
10068
+ if (typeof javascriptName !== 'string') {
10069
+ return false;
10070
+ }
10071
+ return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
10072
+ }
10073
+
10074
+ /**
10075
+ * Normalizes agent name from arbitrary string to valid agent name
10076
+ *
10077
+ * Note: [šŸ”‚] This function is idempotent.
10078
+ *
10079
+ * @public exported from `@promptbook/core`
10080
+ */
10081
+ function normalizeAgentName(rawAgentName) {
10082
+ return titleToName(spaceTrim$1(rawAgentName));
10083
+ }
10084
+
10085
+ /**
10086
+ * Creates temporary default agent name based on agent source hash
10087
+ *
10088
+ * @public exported from `@promptbook/core`
10089
+ */
10090
+ function createDefaultAgentName(agentSource) {
10091
+ const agentHash = computeAgentHash(agentSource);
10092
+ return normalizeAgentName(`Agent ${agentHash.substring(0, 6)}`);
10093
+ }
10094
+
10095
+ /**
10096
+ * Parses basic information from agent source
10097
+ *
10098
+ * There are 2 similar functions:
10099
+ * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
10100
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
10101
+ *
10102
+ * @public exported from `@promptbook/core`
10103
+ */
10104
+ function parseAgentSource(agentSource) {
10105
+ const parseResult = parseAgentSourceWithCommitments(agentSource);
10106
+ // Find PERSONA and META commitments
10107
+ let personaDescription = null;
10108
+ for (const commitment of parseResult.commitments) {
10109
+ if (commitment.type !== 'PERSONA') {
10110
+ continue;
10111
+ }
10112
+ if (personaDescription === null) {
10113
+ personaDescription = '';
10114
+ }
10115
+ else {
10116
+ personaDescription += `\n\n${personaDescription}`;
10117
+ }
10118
+ personaDescription += commitment.content;
10119
+ }
10120
+ const meta = {};
10121
+ for (const commitment of parseResult.commitments) {
10122
+ if (commitment.type !== 'META') {
10123
+ continue;
10124
+ }
10125
+ // Parse META commitments - format is "META TYPE content"
10126
+ const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
10127
+ const metaType = normalizeTo_camelCase(metaTypeRaw);
10128
+ meta[metaType] = spaceTrim$1(commitment.content.substring(metaTypeRaw.length));
10129
+ }
10130
+ // Generate gravatar fallback if no meta image specified
10131
+ if (!meta.image) {
10132
+ meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
10133
+ }
10134
+ // Parse parameters using unified approach - both @Parameter and {parameter} notations
10135
+ // are treated as the same syntax feature with unified representation
10136
+ const parameters = parseParameters(agentSource);
10137
+ const agentHash = computeAgentHash(agentSource);
10138
+ return {
10139
+ agentName: normalizeAgentName(parseResult.agentName || createDefaultAgentName(agentSource)),
10140
+ agentHash,
10141
+ personaDescription,
10142
+ meta,
10143
+ parameters,
10144
+ };
10145
+ }
10146
+ /**
10147
+ * TODO: [šŸ•›] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
10148
+ */
10149
+
10150
+ /**
10151
+ * Creates model requirements for an agent based on its source
10152
+ *
10153
+ * There are 2 similar functions:
10154
+ * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
10155
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
10156
+ *
10157
+ * @public exported from `@promptbook/core`
10158
+ */
10159
+ async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
10160
+ // If availableModels are provided and no specific modelName is given,
10161
+ // use preparePersona to select the best model
10162
+ if (availableModels && !modelName && llmTools) {
10163
+ const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
10164
+ return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
10165
+ }
10166
+ // Use the new commitment-based system with provided or default model
10167
+ return createAgentModelRequirementsWithCommitments(agentSource, modelName);
10168
+ }
10169
+ /**
10170
+ * Selects the best model using the preparePersona function
10171
+ * This directly uses preparePersona to ensure DRY principle
10172
+ *
10173
+ * @param agentSource The agent source to derive persona description from
10174
+ * @param llmTools LLM tools for preparing persona
10175
+ * @returns The name of the best selected model
10176
+ * @private function of `createAgentModelRequirements`
10177
+ */
10178
+ async function selectBestModelUsingPersona(agentSource, llmTools) {
10179
+ var _a;
10180
+ // Parse agent source to get persona description
10181
+ const { agentName, personaDescription } = parseAgentSource(agentSource);
10182
+ // Use agent name as fallback if no persona description is available
10183
+ const description = personaDescription || agentName || 'AI Agent';
10184
+ try {
10185
+ // Use preparePersona directly
10186
+ const { modelsRequirements } = await preparePersona(description, { llm: llmTools }, { isVerbose: false });
10187
+ // Extract the first model name from the requirements
10188
+ if (modelsRequirements.length > 0 && ((_a = modelsRequirements[0]) === null || _a === void 0 ? void 0 : _a.modelName)) {
10189
+ return modelsRequirements[0].modelName;
10190
+ }
10191
+ // Fallback: get available models and return the first CHAT model
10192
+ const availableModels = await llmTools.listModels();
10193
+ const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
10194
+ if (chatModels.length === 0) {
10195
+ throw new Error('No CHAT models available for agent model selection');
10196
+ }
10197
+ return chatModels[0].modelName;
10198
+ }
10199
+ catch (error) {
10200
+ console.warn('Failed to use preparePersona for model selection, falling back to first available model:', error);
10201
+ // Fallback: get available models and return the first CHAT model
10202
+ const availableModels = await llmTools.listModels();
10203
+ const chatModels = availableModels.filter(({ modelVariant }) => modelVariant === 'CHAT');
10204
+ if (chatModels.length === 0) {
10205
+ throw new Error('No CHAT models available for agent model selection');
10206
+ }
10207
+ return chatModels[0].modelName;
10208
+ }
10209
+ }
10210
+ /**
10211
+ * Extracts MCP servers from agent source
10212
+ *
10213
+ * @param agentSource The agent source string that may contain MCP lines
10214
+ * @returns Array of MCP server identifiers
10215
+ *
10216
+ * @private TODO: [🧠] Maybe should be public
10217
+ */
10218
+ function extractMcpServers(agentSource) {
10219
+ if (!agentSource) {
10220
+ return [];
10221
+ }
10222
+ const lines = agentSource.split('\n');
10223
+ const mcpRegex = /^\s*MCP\s+(.+)$/i;
9797
10224
  const mcpServers = [];
9798
10225
  // Look for MCP lines
9799
10226
  for (const line of lines) {
@@ -9887,17 +10314,6 @@ const DEFAULT_BOOK = padBook(validateBook(spaceTrim$1(`
9887
10314
  // <- !!! Buttons into genesis book
9888
10315
  // <- TODO: !!! generateBookBoilerplate and deprecate `DEFAULT_BOOK`
9889
10316
 
9890
- /**
9891
- * Trims string from all 4 sides
9892
- *
9893
- * Note: This is a re-exported function from the `spacetrim` package which is
9894
- * Developed by same author @hejny as this package
9895
- *
9896
- * @public exported from `@promptbook/utils`
9897
- * @see https://github.com/hejny/spacetrim#usage
9898
- */
9899
- const spaceTrim = spaceTrim$2;
9900
-
9901
10317
  /**
9902
10318
  * Agent collection stored in Supabase table
9903
10319
  *
@@ -9906,7 +10322,7 @@ const spaceTrim = spaceTrim$2;
9906
10322
  * @public exported from `@promptbook/core`
9907
10323
  * <- TODO: !!! Move to `@promptbook/supabase` package
9908
10324
  */
9909
- class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
10325
+ class AgentCollectionInSupabase /* TODO: !!!!!! implements Agent */ {
9910
10326
  /**
9911
10327
  * @param rootPath - path to the directory with agents
9912
10328
  * @param tools - Execution tools to be used in !!! `Agent` itself and listing the agents
@@ -9927,9 +10343,7 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
9927
10343
  */
9928
10344
  async listAgents( /* TODO: [🧠] Allow to pass some condition here */) {
9929
10345
  const { isVerbose = DEFAULT_IS_VERBOSE } = this.options || {};
9930
- const selectResult = await this.supabaseClient
9931
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
9932
- .select('agentProfile');
10346
+ const selectResult = await this.supabaseClient.from('Agent').select('agentName,agentProfile');
9933
10347
  if (selectResult.error) {
9934
10348
  throw new DatabaseError(spaceTrim((block) => `
9935
10349
 
@@ -9941,14 +10355,27 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
9941
10355
  if (isVerbose) {
9942
10356
  console.info(`Found ${selectResult.data.length} agents in directory`);
9943
10357
  }
9944
- return selectResult.data.map((row) => row.agentProfile);
10358
+ return selectResult.data.map(({ agentName, agentProfile }) => {
10359
+ if (isVerbose && agentProfile.agentName !== agentName) {
10360
+ console.warn(spaceTrim(`
10361
+ Agent name mismatch for agent "${agentName}". Using name from database.
10362
+
10363
+ agentName: "${agentName}"
10364
+ agentProfile.agentName: "${agentProfile.agentName}"
10365
+ `));
10366
+ }
10367
+ return {
10368
+ ...agentProfile,
10369
+ agentName,
10370
+ };
10371
+ });
9945
10372
  }
9946
10373
  /**
9947
10374
  * !!!@@@
9948
10375
  */
9949
10376
  async getAgentSource(agentName) {
9950
10377
  const selectResult = await this.supabaseClient
9951
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10378
+ .from('Agent')
9952
10379
  .select('agentSource')
9953
10380
  .eq('agentName', agentName)
9954
10381
  .single();
@@ -9976,65 +10403,88 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
9976
10403
  async createAgent(agentSource) {
9977
10404
  const agentProfile = parseAgentSource(agentSource);
9978
10405
  // <- TODO: [šŸ•›]
9979
- const selectResult = await this.supabaseClient
9980
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
9981
- .insert({
9982
- agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
10406
+ const { agentName, agentHash } = agentProfile;
10407
+ const insertAgentResult = await this.supabaseClient.from('Agent').insert({
10408
+ agentName,
10409
+ agentHash,
9983
10410
  agentProfile,
9984
10411
  createdAt: new Date().toISOString(),
9985
10412
  updatedAt: null,
9986
- agentVersion: 0,
9987
10413
  promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
9988
10414
  usage: ZERO_USAGE,
9989
10415
  agentSource: agentSource,
9990
10416
  });
9991
- if (selectResult.error) {
10417
+ if (insertAgentResult.error) {
9992
10418
  throw new DatabaseError(spaceTrim((block) => `
9993
10419
  Error creating agent "${agentProfile.agentName}" in Supabase:
9994
10420
 
9995
- ${block(selectResult.error.message)}
10421
+ ${block(insertAgentResult.error.message)}
9996
10422
  `));
9997
10423
  }
10424
+ await this.supabaseClient.from('AgentHistory').insert({
10425
+ createdAt: new Date().toISOString(),
10426
+ agentName,
10427
+ agentHash,
10428
+ previousAgentHash: null,
10429
+ agentSource,
10430
+ promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10431
+ });
10432
+ // <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
9998
10433
  return agentProfile;
9999
10434
  }
10000
10435
  /**
10001
10436
  * Updates an existing agent in the collection
10002
10437
  */
10003
10438
  async updateAgentSource(agentName, agentSource) {
10004
- const selectResult = await this.supabaseClient
10005
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10006
- .select('agentVersion')
10439
+ const selectPreviousAgentResult = await this.supabaseClient
10440
+ .from('Agent')
10441
+ .select('agentHash,agentName')
10007
10442
  .eq('agentName', agentName)
10008
10443
  .single();
10009
- if (!selectResult.data) {
10010
- throw new NotFoundError(`Agent "${agentName}" not found`);
10444
+ if (selectPreviousAgentResult.error) {
10445
+ throw new DatabaseError(spaceTrim((block) => `
10446
+
10447
+ Error fetching agent "${agentName}" from Supabase:
10448
+
10449
+ ${block(selectPreviousAgentResult.error.message)}
10450
+ `));
10451
+ // <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10011
10452
  }
10453
+ selectPreviousAgentResult.data.agentName;
10454
+ const previousAgentHash = selectPreviousAgentResult.data.agentHash;
10012
10455
  const agentProfile = parseAgentSource(agentSource);
10013
- // TODO: !!!!!! What about agentName change
10014
- console.log('!!! agentName', agentName);
10015
- const oldAgentSource = await this.getAgentSource(agentName);
10016
- const updateResult = await this.supabaseClient
10017
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10456
+ // <- TODO: [šŸ•›]
10457
+ const { agentHash } = agentProfile;
10458
+ const updateAgentResult = await this.supabaseClient
10459
+ .from('Agent')
10018
10460
  .update({
10019
10461
  // TODO: !!!! Compare not update> agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
10020
10462
  agentProfile,
10021
10463
  updatedAt: new Date().toISOString(),
10022
- agentVersion: selectResult.data.agentVersion + 1,
10464
+ agentHash: agentProfile.agentHash,
10023
10465
  agentSource,
10024
10466
  promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10025
10467
  })
10026
10468
  .eq('agentName', agentName);
10027
- const newAgentSource = await this.getAgentSource(agentName);
10028
- console.log('!!! updateAgent', updateResult);
10029
- console.log('!!! old', oldAgentSource);
10030
- console.log('!!! new', newAgentSource);
10031
- if (updateResult.error) {
10469
+ // console.log('!!! updateAgent', updateResult);
10470
+ // console.log('!!! old', oldAgentSource);
10471
+ // console.log('!!! new', newAgentSource);
10472
+ if (updateAgentResult.error) {
10032
10473
  throw new DatabaseError(spaceTrim((block) => `
10033
10474
  Error updating agent "${agentName}" in Supabase:
10034
10475
 
10035
- ${block(updateResult.error.message)}
10476
+ ${block(updateAgentResult.error.message)}
10036
10477
  `));
10037
10478
  }
10479
+ await this.supabaseClient.from('AgentHistory').insert({
10480
+ createdAt: new Date().toISOString(),
10481
+ agentName,
10482
+ agentHash,
10483
+ previousAgentHash,
10484
+ agentSource,
10485
+ promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10486
+ });
10487
+ // <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
10038
10488
  }
10039
10489
  // TODO: !!!! public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
10040
10490
  // Use Supabase realtime logic
@@ -10680,83 +11130,14 @@ const bookVersionCommandParser = {
10680
11130
  };
10681
11131
 
10682
11132
  /**
10683
- * Units of text measurement
10684
- *
10685
- * @see https://github.com/webgptorg/promptbook/discussions/30
10686
- * @public exported from `@promptbook/core`
10687
- */
10688
- const EXPECTATION_UNITS = ['CHARACTERS', 'WORDS', 'SENTENCES', 'LINES', 'PARAGRAPHS', 'PAGES'];
10689
- /**
10690
- * TODO: [šŸ’] Unite object for expecting amount and format - remove format
10691
- */
10692
-
10693
- /**
10694
- * Function parseNumber will parse number from string
10695
- *
10696
- * Note: [šŸ”‚] This function is idempotent.
10697
- * Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
10698
- * Note: it also works only with decimal numbers
10699
- *
10700
- * @returns parsed number
10701
- * @throws {ParseError} if the value is not a number
11133
+ * Units of text measurement
10702
11134
  *
10703
- * @public exported from `@promptbook/utils`
11135
+ * @see https://github.com/webgptorg/promptbook/discussions/30
11136
+ * @public exported from `@promptbook/core`
10704
11137
  */
10705
- function parseNumber(value) {
10706
- const originalValue = value;
10707
- if (typeof value === 'number') {
10708
- value = value.toString(); // <- TODO: Maybe more efficient way to do this
10709
- }
10710
- if (typeof value !== 'string') {
10711
- return 0;
10712
- }
10713
- value = value.trim();
10714
- if (value.startsWith('+')) {
10715
- return parseNumber(value.substring(1));
10716
- }
10717
- if (value.startsWith('-')) {
10718
- const number = parseNumber(value.substring(1));
10719
- if (number === 0) {
10720
- return 0; // <- Note: To prevent -0
10721
- }
10722
- return -number;
10723
- }
10724
- value = value.replace(/,/g, '.');
10725
- value = value.toUpperCase();
10726
- if (value === '') {
10727
- return 0;
10728
- }
10729
- if (value === '♾' || value.startsWith('INF')) {
10730
- return Infinity;
10731
- }
10732
- if (value.includes('/')) {
10733
- const [numerator_, denominator_] = value.split('/');
10734
- const numerator = parseNumber(numerator_);
10735
- const denominator = parseNumber(denominator_);
10736
- if (denominator === 0) {
10737
- throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
10738
- }
10739
- return numerator / denominator;
10740
- }
10741
- if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
10742
- return 0;
10743
- }
10744
- if (value.includes('E')) {
10745
- const [significand, exponent] = value.split('E');
10746
- return parseNumber(significand) * 10 ** parseNumber(exponent);
10747
- }
10748
- if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
10749
- throw new ParseError(`Unable to parse number from "${originalValue}"`);
10750
- }
10751
- const num = parseFloat(value);
10752
- if (isNaN(num)) {
10753
- throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
10754
- }
10755
- return num;
10756
- }
11138
+ const EXPECTATION_UNITS = ['CHARACTERS', 'WORDS', 'SENTENCES', 'LINES', 'PARAGRAPHS', 'PAGES'];
10757
11139
  /**
10758
- * TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
10759
- * TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
11140
+ * TODO: [šŸ’] Unite object for expecting amount and format - remove format
10760
11141
  */
10761
11142
 
10762
11143
  /**
@@ -10901,30 +11282,6 @@ const expectCommandParser = {
10901
11282
  },
10902
11283
  };
10903
11284
 
10904
- /**
10905
- * Removes quotes from a string
10906
- *
10907
- * Note: [šŸ”‚] This function is idempotent.
10908
- * Tip: This is very useful for post-processing of the result of the LLM model
10909
- * Note: This function removes only the same quotes from the beginning and the end of the string
10910
- * Note: There are two similar functions:
10911
- * - `removeQuotes` which removes only bounding quotes
10912
- * - `unwrapResult` which removes whole introduce sentence
10913
- *
10914
- * @param text optionally quoted text
10915
- * @returns text without quotes
10916
- * @public exported from `@promptbook/utils`
10917
- */
10918
- function removeQuotes(text) {
10919
- if (text.startsWith('"') && text.endsWith('"')) {
10920
- return text.slice(1, -1);
10921
- }
10922
- if (text.startsWith("'") && text.endsWith("'")) {
10923
- return text.slice(1, -1);
10924
- }
10925
- return text;
10926
- }
10927
-
10928
11285
  /**
10929
11286
  * Function `validateParameterName` will normalize and validate a parameter name for use in pipelines.
10930
11287
  * It removes diacritics, emojis, and quotes, normalizes to camelCase, and checks for reserved names and invalid characters.
@@ -12111,20 +12468,6 @@ function $applyToTaskJson(command, $taskJson, $pipelineJson) {
12111
12468
  persona.description += spaceTrim$1('\n\n' + personaDescription);
12112
12469
  }
12113
12470
 
12114
- /**
12115
- * Checks if the given value is a valid JavaScript identifier name.
12116
- *
12117
- * @param javascriptName The value to check for JavaScript identifier validity.
12118
- * @returns `true` if the value is a valid JavaScript name, false otherwise.
12119
- * @public exported from `@promptbook/utils`
12120
- */
12121
- function isValidJavascriptName(javascriptName) {
12122
- if (typeof javascriptName !== 'string') {
12123
- return false;
12124
- }
12125
- return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
12126
- }
12127
-
12128
12471
  /**
12129
12472
  * Parses the postprocess command
12130
12473
  *
@@ -13693,114 +14036,6 @@ function addAutoGeneratedSection(content, options) {
13693
14036
  * TODO: [šŸ›] This can be part of markdown builder
13694
14037
  */
13695
14038
 
13696
- /**
13697
- * Creates a Mermaid graph based on the promptbook
13698
- *
13699
- * Note: The result is not wrapped in a Markdown code block
13700
- *
13701
- * @public exported from `@promptbook/utils`
13702
- */
13703
- function renderPromptbookMermaid(pipelineJson, options) {
13704
- const { linkTask = () => null } = options || {};
13705
- const MERMAID_PREFIX = 'pipeline_';
13706
- const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
13707
- const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
13708
- const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
13709
- const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
13710
- const parameterNameToTaskName = (parameterName) => {
13711
- if (parameterName === 'knowledge') {
13712
- return MERMAID_KNOWLEDGE_NAME;
13713
- }
13714
- else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
13715
- return MERMAID_RESERVED_NAME;
13716
- }
13717
- const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
13718
- if (!parameter) {
13719
- throw new UnexpectedError(`Could not find {${parameterName}}`);
13720
- // <- TODO: This causes problems when {knowledge} and other reserved parameters are used
13721
- }
13722
- if (parameter.isInput) {
13723
- return MERMAID_INPUT_NAME;
13724
- }
13725
- const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
13726
- if (!task) {
13727
- throw new Error(`Could not find task for {${parameterName}}`);
13728
- }
13729
- return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
13730
- };
13731
- const inputAndIntermediateParametersMermaid = pipelineJson.tasks
13732
- .flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
13733
- `${parameterNameToTaskName(resultingParameterName)}("${title}")`,
13734
- ...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
13735
- ])
13736
- .join('\n');
13737
- const outputParametersMermaid = pipelineJson.parameters
13738
- .filter(({ isOutput }) => isOutput)
13739
- .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
13740
- .join('\n');
13741
- const linksMermaid = pipelineJson.tasks
13742
- .map((task) => {
13743
- const link = linkTask(task);
13744
- if (link === null) {
13745
- return '';
13746
- }
13747
- const { href, title } = link;
13748
- const taskName = parameterNameToTaskName(task.resultingParameterName);
13749
- return `click ${taskName} href "${href}" "${title}";`;
13750
- })
13751
- .filter((line) => line !== '')
13752
- .join('\n');
13753
- const interactionPointsMermaid = Object.entries({
13754
- [MERMAID_INPUT_NAME]: 'Input',
13755
- [MERMAID_OUTPUT_NAME]: 'Output',
13756
- [MERMAID_RESERVED_NAME]: 'Other',
13757
- [MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
13758
- })
13759
- .filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
13760
- .map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
13761
- .join('\n');
13762
- const promptbookMermaid = spaceTrim$2((block) => `
13763
-
13764
- %% šŸ”® Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
13765
-
13766
- flowchart LR
13767
- subgraph "${pipelineJson.title}"
13768
-
13769
- %% Basic configuration
13770
- direction TB
13771
-
13772
- %% Interaction points from pipeline to outside
13773
- ${block(interactionPointsMermaid)}
13774
-
13775
- %% Input and intermediate parameters
13776
- ${block(inputAndIntermediateParametersMermaid)}
13777
-
13778
-
13779
- %% Output parameters
13780
- ${block(outputParametersMermaid)}
13781
-
13782
- %% Links
13783
- ${block(linksMermaid)}
13784
-
13785
- %% Styles
13786
- classDef ${MERMAID_INPUT_NAME} color: grey;
13787
- classDef ${MERMAID_OUTPUT_NAME} color: grey;
13788
- classDef ${MERMAID_RESERVED_NAME} color: grey;
13789
- classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
13790
-
13791
- end;
13792
-
13793
- `);
13794
- return promptbookMermaid;
13795
- }
13796
- /**
13797
- * TODO: [🧠] FOREACH in mermaid graph
13798
- * TODO: [🧠] Knowledge in mermaid graph
13799
- * TODO: [🧠] Personas in mermaid graph
13800
- * TODO: Maybe use some Mermaid package instead of string templating
13801
- * TODO: [šŸ•Œ] When more than 2 functionalities, split into separate functions
13802
- */
13803
-
13804
14039
  /**
13805
14040
  * Prettyfies Promptbook string and adds Mermaid graph
13806
14041
  *
@@ -14352,71 +14587,13 @@ const $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
14352
14587
  /**
14353
14588
  * Register for LLM tools.
14354
14589
  *
14355
- * Note: `$` is used to indicate that this interacts with the global scope
14356
- * @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
14357
- * @public exported from `@promptbook/core`
14358
- */
14359
- const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
14360
- /**
14361
- * TODO: [Ā®] DRY Register logic
14362
- */
14363
-
14364
- /**
14365
- * Detects if the code is running in a browser environment in main thread (Not in a web worker)
14366
- *
14367
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14368
- *
14369
- * @public exported from `@promptbook/utils`
14370
- */
14371
- const $isRunningInBrowser = new Function(`
14372
- try {
14373
- return this === window;
14374
- } catch (e) {
14375
- return false;
14376
- }
14377
- `);
14378
- /**
14379
- * TODO: [šŸŽŗ]
14380
- */
14381
-
14382
- /**
14383
- * Detects if the code is running in a Node.js environment
14384
- *
14385
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14386
- *
14387
- * @public exported from `@promptbook/utils`
14388
- */
14389
- const $isRunningInNode = new Function(`
14390
- try {
14391
- return this === global;
14392
- } catch (e) {
14393
- return false;
14394
- }
14395
- `);
14396
- /**
14397
- * TODO: [šŸŽŗ]
14398
- */
14399
-
14400
- /**
14401
- * Detects if the code is running in a web worker
14402
- *
14403
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14404
- *
14405
- * @public exported from `@promptbook/utils`
14590
+ * Note: `$` is used to indicate that this interacts with the global scope
14591
+ * @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
14592
+ * @public exported from `@promptbook/core`
14406
14593
  */
14407
- const $isRunningInWebWorker = new Function(`
14408
- try {
14409
- if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
14410
- return true;
14411
- } else {
14412
- return false;
14413
- }
14414
- } catch (e) {
14415
- return false;
14416
- }
14417
- `);
14594
+ const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
14418
14595
  /**
14419
- * TODO: [šŸŽŗ]
14596
+ * TODO: [Ā®] DRY Register logic
14420
14597
  */
14421
14598
 
14422
14599
  /**
@@ -14652,18 +14829,6 @@ class MemoryStorage {
14652
14829
  }
14653
14830
  }
14654
14831
 
14655
- /**
14656
- * Simple wrapper `new Date().toISOString()`
14657
- *
14658
- * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
14659
- *
14660
- * @returns string_date branded type
14661
- * @public exported from `@promptbook/utils`
14662
- */
14663
- function $getCurrentDate() {
14664
- return new Date().toISOString();
14665
- }
14666
-
14667
14832
  /**
14668
14833
  * Intercepts LLM tools and counts total usage of the tools
14669
14834
  *
@@ -15290,17 +15455,17 @@ const OPENAI_MODELS = exportJson({
15290
15455
  },
15291
15456
  /**/
15292
15457
  /*/
15293
- {
15294
- modelTitle: 'tts-1-hd-1106',
15295
- modelName: 'tts-1-hd-1106',
15296
- },
15297
- /**/
15458
+ {
15459
+ modelTitle: 'tts-1-hd-1106',
15460
+ modelName: 'tts-1-hd-1106',
15461
+ },
15462
+ /**/
15298
15463
  /*/
15299
- {
15300
- modelTitle: 'tts-1-hd',
15301
- modelName: 'tts-1-hd',
15302
- },
15303
- /**/
15464
+ {
15465
+ modelTitle: 'tts-1-hd',
15466
+ modelName: 'tts-1-hd',
15467
+ },
15468
+ /**/
15304
15469
  /**/
15305
15470
  {
15306
15471
  modelVariant: 'CHAT',
@@ -16661,15 +16826,19 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16661
16826
  },
16662
16827
  });
16663
16828
  }
16664
- async playground() {
16829
+ /*
16830
+ public async playground() {
16665
16831
  const client = await this.getClient();
16832
+
16666
16833
  // List all assistants
16667
16834
  const assistants = await client.beta.assistants.list();
16668
16835
  console.log('!!! Assistants:', assistants);
16836
+
16669
16837
  // Get details of a specific assistant
16670
16838
  const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
16671
16839
  const assistant = await client.beta.assistants.retrieve(assistantId);
16672
16840
  console.log('!!! Assistant Details:', assistant);
16841
+
16673
16842
  // Update an assistant
16674
16843
  const updatedAssistant = await client.beta.assistants.update(assistantId, {
16675
16844
  name: assistant.name + '(M)',
@@ -16679,8 +16848,19 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16679
16848
  },
16680
16849
  });
16681
16850
  console.log('!!! Updated Assistant:', updatedAssistant);
16851
+
16682
16852
  await forEver();
16683
16853
  }
16854
+ */
16855
+ /**
16856
+ * Get an existing assistant tool wrapper
16857
+ */
16858
+ getAssistant(assistantId) {
16859
+ return new OpenAiAssistantExecutionTools({
16860
+ ...this.options,
16861
+ assistantId,
16862
+ });
16863
+ }
16684
16864
  async createNewAssistant(options) {
16685
16865
  if (!this.isCreatingNewAssistantsAllowed) {
16686
16866
  throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
@@ -16775,6 +16955,95 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16775
16955
  assistantId: assistant.id,
16776
16956
  });
16777
16957
  }
16958
+ async updateAssistant(options) {
16959
+ if (!this.isCreatingNewAssistantsAllowed) {
16960
+ throw new NotAllowed(`Updating assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
16961
+ }
16962
+ const { assistantId, name, instructions, knowledgeSources } = options;
16963
+ const client = await this.getClient();
16964
+ let vectorStoreId;
16965
+ // If knowledge sources are provided, create a vector store with them
16966
+ // TODO: [🧠] Reuse vector store creation logic from createNewAssistant
16967
+ if (knowledgeSources && knowledgeSources.length > 0) {
16968
+ if (this.options.isVerbose) {
16969
+ console.info(`šŸ“š Creating vector store for update with ${knowledgeSources.length} knowledge sources...`);
16970
+ }
16971
+ // Create a vector store
16972
+ const vectorStore = await client.beta.vectorStores.create({
16973
+ name: `${name} Knowledge Base`,
16974
+ });
16975
+ vectorStoreId = vectorStore.id;
16976
+ if (this.options.isVerbose) {
16977
+ console.info(`āœ… Vector store created: ${vectorStoreId}`);
16978
+ }
16979
+ // Upload files from knowledge sources to the vector store
16980
+ const fileStreams = [];
16981
+ for (const source of knowledgeSources) {
16982
+ try {
16983
+ // Check if it's a URL
16984
+ if (source.startsWith('http://') || source.startsWith('https://')) {
16985
+ // Download the file
16986
+ const response = await fetch(source);
16987
+ if (!response.ok) {
16988
+ console.error(`Failed to download ${source}: ${response.statusText}`);
16989
+ continue;
16990
+ }
16991
+ const buffer = await response.arrayBuffer();
16992
+ const filename = source.split('/').pop() || 'downloaded-file';
16993
+ const blob = new Blob([buffer]);
16994
+ const file = new File([blob], filename);
16995
+ fileStreams.push(file);
16996
+ }
16997
+ else {
16998
+ // Assume it's a local file path
16999
+ // Note: This will work in Node.js environment
17000
+ // For browser environments, this would need different handling
17001
+ const fs = await import('fs');
17002
+ const fileStream = fs.createReadStream(source);
17003
+ fileStreams.push(fileStream);
17004
+ }
17005
+ }
17006
+ catch (error) {
17007
+ console.error(`Error processing knowledge source ${source}:`, error);
17008
+ }
17009
+ }
17010
+ // Batch upload files to the vector store
17011
+ if (fileStreams.length > 0) {
17012
+ try {
17013
+ await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
17014
+ files: fileStreams,
17015
+ });
17016
+ if (this.options.isVerbose) {
17017
+ console.info(`āœ… Uploaded ${fileStreams.length} files to vector store`);
17018
+ }
17019
+ }
17020
+ catch (error) {
17021
+ console.error('Error uploading files to vector store:', error);
17022
+ }
17023
+ }
17024
+ }
17025
+ const assistantUpdate = {
17026
+ name,
17027
+ instructions,
17028
+ tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
17029
+ };
17030
+ if (vectorStoreId) {
17031
+ assistantUpdate.tool_resources = {
17032
+ file_search: {
17033
+ vector_store_ids: [vectorStoreId],
17034
+ },
17035
+ };
17036
+ }
17037
+ const assistant = await client.beta.assistants.update(assistantId, assistantUpdate);
17038
+ if (this.options.isVerbose) {
17039
+ console.log(`āœ… Assistant updated: ${assistant.id}`);
17040
+ }
17041
+ return new OpenAiAssistantExecutionTools({
17042
+ ...this.options,
17043
+ isCreatingNewAssistantsAllowed: false,
17044
+ assistantId: assistant.id,
17045
+ });
17046
+ }
16778
17047
  /**
16779
17048
  * Discriminant for type guards
16780
17049
  */
@@ -16916,27 +17185,58 @@ class AgentLlmExecutionTools {
16916
17185
  const chatPrompt = prompt;
16917
17186
  let underlyingLlmResult;
16918
17187
  if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
16919
- if (this.options.isVerbose) {
16920
- console.log(`Creating new OpenAI Assistant for agent ${this.title}...`);
17188
+ const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
17189
+ const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
17190
+ let assistant;
17191
+ if (cached) {
17192
+ if (cached.requirementsHash === requirementsHash) {
17193
+ if (this.options.isVerbose) {
17194
+ console.log(`1ļøāƒ£ Using cached OpenAI Assistant for agent ${this.title}...`);
17195
+ }
17196
+ assistant = this.options.llmTools.getAssistant(cached.assistantId);
17197
+ }
17198
+ else {
17199
+ if (this.options.isVerbose) {
17200
+ console.log(`1ļøāƒ£ Updating OpenAI Assistant for agent ${this.title}...`);
17201
+ }
17202
+ assistant = await this.options.llmTools.updateAssistant({
17203
+ assistantId: cached.assistantId,
17204
+ name: this.title,
17205
+ instructions: modelRequirements.systemMessage,
17206
+ knowledgeSources: modelRequirements.knowledgeSources,
17207
+ });
17208
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
17209
+ assistantId: assistant.assistantId,
17210
+ requirementsHash,
17211
+ });
17212
+ }
16921
17213
  }
16922
- // <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
16923
- const assistant = await this.options.llmTools.createNewAssistant({
16924
- name: this.title,
16925
- instructions: modelRequirements.systemMessage,
16926
- knowledgeSources: modelRequirements.knowledgeSources,
16927
- /*
16928
- !!!
16929
- metadata: {
16930
- agentModelName: this.modelName,
17214
+ else {
17215
+ if (this.options.isVerbose) {
17216
+ console.log(`1ļøāƒ£ Creating new OpenAI Assistant for agent ${this.title}...`);
16931
17217
  }
16932
- */
16933
- });
16934
- // <- TODO: !!! Cache the assistant in prepareCache
17218
+ // <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
17219
+ assistant = await this.options.llmTools.createNewAssistant({
17220
+ name: this.title,
17221
+ instructions: modelRequirements.systemMessage,
17222
+ knowledgeSources: modelRequirements.knowledgeSources,
17223
+ /*
17224
+ !!!
17225
+ metadata: {
17226
+ agentModelName: this.modelName,
17227
+ }
17228
+ */
17229
+ });
17230
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
17231
+ assistantId: assistant.assistantId,
17232
+ requirementsHash,
17233
+ });
17234
+ }
16935
17235
  underlyingLlmResult = await assistant.callChatModel(chatPrompt);
16936
17236
  }
16937
17237
  else {
16938
17238
  if (this.options.isVerbose) {
16939
- console.log(`Creating Assistant ${this.title} on generic LLM execution tools...`);
17239
+ console.log(`2ļøāƒ£ Creating Assistant ${this.title} on generic LLM execution tools...`);
16940
17240
  }
16941
17241
  // Create modified chat prompt with agent system message
16942
17242
  const modifiedChatPrompt = {
@@ -16966,6 +17266,10 @@ class AgentLlmExecutionTools {
16966
17266
  return agentResult;
16967
17267
  }
16968
17268
  }
17269
+ /**
17270
+ * Cache of OpenAI assistants to avoid creating duplicates
17271
+ */
17272
+ AgentLlmExecutionTools.assistantCache = new Map();
16969
17273
  /**
16970
17274
  * TODO: [šŸš] Implement Destroyable pattern to free resources
16971
17275
  * TODO: [🧠] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
@@ -16983,6 +17287,18 @@ class AgentLlmExecutionTools {
16983
17287
  * @public exported from `@promptbook/core`
16984
17288
  */
16985
17289
  class Agent extends AgentLlmExecutionTools {
17290
+ /**
17291
+ * Name of the agent
17292
+ */
17293
+ get agentName() {
17294
+ return this._agentName || createDefaultAgentName(this.agentSource.value);
17295
+ }
17296
+ /**
17297
+ * Computed hash of the agent source for integrity verification
17298
+ */
17299
+ get agentHash() {
17300
+ return computeAgentHash(this.agentSource.value);
17301
+ }
16986
17302
  /**
16987
17303
  * Not used in Agent, always returns empty array
16988
17304
  */
@@ -16998,10 +17314,7 @@ class Agent extends AgentLlmExecutionTools {
16998
17314
  llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
16999
17315
  agentSource: agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
17000
17316
  });
17001
- /**
17002
- * Name of the agent
17003
- */
17004
- this.agentName = null;
17317
+ this._agentName = undefined;
17005
17318
  /**
17006
17319
  * Description of the agent
17007
17320
  */
@@ -17010,12 +17323,12 @@ class Agent extends AgentLlmExecutionTools {
17010
17323
  * Metadata like image or color
17011
17324
  */
17012
17325
  this.meta = {};
17013
- // TODO: !!!! Add `Agent` simple "mocked" learning by appending to agent source
17014
- // TODO: !!!! Add `Agent` learning by promptbookAgent
17326
+ // TODO: !!!!! Add `Agent` simple "mocked" learning by appending to agent source
17327
+ // TODO: !!!!! Add `Agent` learning by promptbookAgent
17015
17328
  this.agentSource = agentSource;
17016
17329
  this.agentSource.subscribe((source) => {
17017
17330
  const { agentName, personaDescription, meta } = parseAgentSource(source);
17018
- this.agentName = agentName;
17331
+ this._agentName = agentName;
17019
17332
  this.personaDescription = personaDescription;
17020
17333
  this.meta = { ...this.meta, ...meta };
17021
17334
  });
@@ -17089,9 +17402,9 @@ const _AgentRegistration = $llmToolsRegister.register(createAgentLlmExecutionToo
17089
17402
  /**
17090
17403
  * Represents one AI Agent
17091
17404
  *
17092
- * !!! Note: [šŸ¦–] There are several different things in Promptbook:
17405
+ * !!!!!! Note: [šŸ¦–] There are several different things in Promptbook:
17093
17406
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17094
- * !!!! `RemoteAgent`
17407
+ * !!!!!! `RemoteAgent`
17095
17408
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
17096
17409
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17097
17410
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
@@ -17106,7 +17419,7 @@ class RemoteAgent extends Agent {
17106
17419
  // <- TODO: !!!! Maybe use promptbookFetch
17107
17420
  const agentSourceValue = (await bookResponse.text());
17108
17421
  const agentSource = new BehaviorSubject(agentSourceValue);
17109
- // <- TODO: !!!!!! Support updating
17422
+ // <- TODO: !!!! Support updating and self-updating
17110
17423
  return new RemoteAgent({
17111
17424
  ...options,
17112
17425
  executionTools: {
@@ -17167,7 +17480,7 @@ class RemoteAgent extends Agent {
17167
17480
  reader.releaseLock();
17168
17481
  }
17169
17482
  }
17170
- // <- TODO: !!!!!!!! Transfer metadata
17483
+ // <- TODO: !!!! Transfer metadata
17171
17484
  const agentResult = {
17172
17485
  content,
17173
17486
  modelName: this.modelName,
@@ -17176,7 +17489,7 @@ class RemoteAgent extends Agent {
17176
17489
  rawPromptContent: {},
17177
17490
  rawRequest: {},
17178
17491
  rawResponse: {},
17179
- // <- TODO: !!!!!!!! Transfer and proxy the metadata
17492
+ // <- TODO: !!!! Transfer and proxy the metadata
17180
17493
  };
17181
17494
  return agentResult;
17182
17495
  }
@@ -17307,24 +17620,6 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
17307
17620
  * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
17308
17621
  */
17309
17622
 
17310
- /**
17311
- * Detects if the code is running in jest environment
17312
- *
17313
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
17314
- *
17315
- * @public exported from `@promptbook/utils`
17316
- */
17317
- const $isRunningInJest = new Function(`
17318
- try {
17319
- return process.env.JEST_WORKER_ID !== undefined;
17320
- } catch (e) {
17321
- return false;
17322
- }
17323
- `);
17324
- /**
17325
- * TODO: [šŸŽŗ]
17326
- */
17327
-
17328
17623
  /**
17329
17624
  * Registration of LLM provider metadata
17330
17625
  *
@@ -17677,61 +17972,6 @@ function isValidPipelineString(pipelineString) {
17677
17972
  * TODO: [🧠][🈓] Where is the best location for this file
17678
17973
  */
17679
17974
 
17680
- /**
17681
- * Tag function for notating a prompt as template literal
17682
- *
17683
- * Note: There are 3 similar functions:
17684
- * 1) `prompt` for notating single prompt exported from `@promptbook/utils`
17685
- * 2) `promptTemplate` alias for `prompt`
17686
- * 3) `book` for notating and validating entire books exported from `@promptbook/utils`
17687
- *
17688
- * @param strings
17689
- * @param values
17690
- * @returns the prompt string
17691
- * @public exported from `@promptbook/utils`
17692
- */
17693
- function prompt(strings, ...values) {
17694
- if (values.length === 0) {
17695
- return spaceTrim$1(strings.join(''));
17696
- }
17697
- const stringsWithHiddenParameters = strings.map((stringsItem) =>
17698
- // TODO: [0] DRY
17699
- stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
17700
- const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
17701
- const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
17702
- // Combine strings and values
17703
- let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
17704
- ? `${result}${stringsItem}`
17705
- : `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
17706
- pipelineString = spaceTrim$1(pipelineString);
17707
- try {
17708
- pipelineString = templateParameters(pipelineString, parameters);
17709
- }
17710
- catch (error) {
17711
- if (!(error instanceof PipelineExecutionError)) {
17712
- throw error;
17713
- }
17714
- console.error({ pipelineString, parameters, placeholderParameterNames, error });
17715
- throw new UnexpectedError(spaceTrim$1((block) => `
17716
- Internal error in prompt template literal
17717
-
17718
- ${block(JSON.stringify({ strings, values }, null, 4))}}
17719
-
17720
- `));
17721
- }
17722
- // TODO: [0] DRY
17723
- pipelineString = pipelineString
17724
- .split(`${REPLACING_NONCE}beginbracket`)
17725
- .join('{')
17726
- .split(`${REPLACING_NONCE}endbracket`)
17727
- .join('}');
17728
- return pipelineString;
17729
- }
17730
- /**
17731
- * TODO: [🧠][🈓] Where is the best location for this file
17732
- * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
17733
- */
17734
-
17735
17975
  /**
17736
17976
  * Tag function for notating a pipeline with a book\`...\ notation as template literal
17737
17977
  *
@@ -18267,7 +18507,7 @@ const OpenAiSdkTranspiler = {
18267
18507
  });
18268
18508
 
18269
18509
  const answer = response.choices[0].message.content;
18270
- console.log('\\n🧠 ${agentName}:', answer, '\\n');
18510
+ console.log('\\n🧠 ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
18271
18511
 
18272
18512
  chatHistory.push({ role: 'assistant', content: answer });
18273
18513
  promptUser();
@@ -18286,7 +18526,7 @@ const OpenAiSdkTranspiler = {
18286
18526
 
18287
18527
  (async () => {
18288
18528
  await setupKnowledge();
18289
- console.log("šŸ¤– Chat with ${agentName} (type 'exit' to quit)\\n");
18529
+ console.log("šŸ¤– Chat with ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
18290
18530
  promptUser();
18291
18531
  })();
18292
18532
  `);
@@ -18333,7 +18573,7 @@ const OpenAiSdkTranspiler = {
18333
18573
  });
18334
18574
 
18335
18575
  const answer = response.choices[0].message.content;
18336
- console.log('\\n🧠 ${agentName}:', answer, '\\n');
18576
+ console.log('\\n🧠 ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
18337
18577
 
18338
18578
  chatHistory.push({ role: 'assistant', content: answer });
18339
18579
  promptUser();
@@ -18350,7 +18590,7 @@ const OpenAiSdkTranspiler = {
18350
18590
  });
18351
18591
  }
18352
18592
 
18353
- console.log("šŸ¤– Chat with ${agentName} (type 'exit' to quit)\\n");
18593
+ console.log("šŸ¤– Chat with ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
18354
18594
  promptUser();
18355
18595
 
18356
18596
  `);
@@ -18358,25 +18598,6 @@ const OpenAiSdkTranspiler = {
18358
18598
  },
18359
18599
  };
18360
18600
 
18361
- /**
18362
- * Returns information about the current runtime environment
18363
- *
18364
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
18365
- *
18366
- * @public exported from `@promptbook/utils`
18367
- */
18368
- function $detectRuntimeEnvironment() {
18369
- return {
18370
- isRunningInBrowser: $isRunningInBrowser(),
18371
- isRunningInJest: $isRunningInJest(),
18372
- isRunningInNode: $isRunningInNode(),
18373
- isRunningInWebWorker: $isRunningInWebWorker(),
18374
- };
18375
- }
18376
- /**
18377
- * TODO: [šŸŽŗ] Also detect and report node version here
18378
- */
18379
-
18380
18601
  /**
18381
18602
  * Provide information about Promptbook, engine version, book language version, servers, ...
18382
18603
  *
@@ -18555,7 +18776,7 @@ function $generateBookBoilerplate(options) {
18555
18776
  const agentSource = validateBook(spaceTrim$1((block) => `
18556
18777
  ${agentName}
18557
18778
 
18558
- META COLOR ${color || '#3498db' /* <- TODO: !!!! Best default color */}
18779
+ META COLOR ${color || '#3498db' /* <- TODO: [🧠] !!!! Best default color */}
18559
18780
  PERSONA ${block(personaDescription)}
18560
18781
  `));
18561
18782
  return agentSource;
@@ -18564,5 +18785,5 @@ function $generateBookBoilerplate(options) {
18564
18785
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
18565
18786
  */
18566
18787
 
18567
- export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
18788
+ export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeAgentHash, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createDefaultAgentName, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, normalizeAgentName, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
18568
18789
  //# sourceMappingURL=index.es.js.map