@promptbook/core 0.103.0-46 → 0.103.0-48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/esm/index.es.js +1043 -779
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/servers.d.ts +1 -7
  4. package/esm/typings/src/_packages/components.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +22 -14
  6. package/esm/typings/src/_packages/types.index.d.ts +14 -6
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +7 -3
  8. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +6 -1
  9. package/esm/typings/src/book-2.0/agent-source/AgentSourceParseResult.d.ts +3 -2
  10. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.d.ts +8 -0
  11. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/createCommitmentRegex.d.ts +1 -1
  13. package/esm/typings/src/book-2.0/agent-source/createDefaultAgentName.d.ts +8 -0
  14. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.d.ts +9 -0
  15. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.test.d.ts +1 -0
  16. package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.d.ts +1 -1
  17. package/esm/typings/src/book-components/Chat/AgentChat/AgentChat.d.ts +14 -0
  18. package/esm/typings/src/book-components/Chat/AgentChat/AgentChat.test.d.ts +1 -0
  19. package/esm/typings/src/book-components/Chat/AgentChat/AgentChatProps.d.ts +13 -0
  20. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +1 -60
  21. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +57 -32
  22. package/esm/typings/src/{book-2.0/commitments → commitments}/ACTION/ACTION.d.ts +1 -1
  23. package/esm/typings/src/{book-2.0/commitments → commitments}/DELETE/DELETE.d.ts +1 -1
  24. package/esm/typings/src/{book-2.0/commitments → commitments}/FORMAT/FORMAT.d.ts +1 -1
  25. package/esm/typings/src/{book-2.0/commitments → commitments}/GOAL/GOAL.d.ts +1 -1
  26. package/esm/typings/src/{book-2.0/commitments → commitments}/KNOWLEDGE/KNOWLEDGE.d.ts +1 -5
  27. package/esm/typings/src/{book-2.0/commitments → commitments}/MEMORY/MEMORY.d.ts +1 -1
  28. package/esm/typings/src/{book-2.0/commitments → commitments}/MESSAGE/MESSAGE.d.ts +1 -1
  29. package/esm/typings/src/{book-2.0/commitments → commitments}/META/META.d.ts +1 -1
  30. package/esm/typings/src/{book-2.0/commitments → commitments}/META_IMAGE/META_IMAGE.d.ts +1 -1
  31. package/esm/typings/src/{book-2.0/commitments → commitments}/META_LINK/META_LINK.d.ts +1 -1
  32. package/esm/typings/src/{book-2.0/commitments → commitments}/MODEL/MODEL.d.ts +1 -1
  33. package/esm/typings/src/{book-2.0/commitments → commitments}/NOTE/NOTE.d.ts +1 -1
  34. package/esm/typings/src/{book-2.0/commitments → commitments}/PERSONA/PERSONA.d.ts +1 -1
  35. package/esm/typings/src/{book-2.0/commitments → commitments}/RULE/RULE.d.ts +1 -1
  36. package/esm/typings/src/{book-2.0/commitments → commitments}/SAMPLE/SAMPLE.d.ts +1 -1
  37. package/esm/typings/src/{book-2.0/commitments → commitments}/SCENARIO/SCENARIO.d.ts +1 -1
  38. package/esm/typings/src/{book-2.0/commitments → commitments}/STYLE/STYLE.d.ts +1 -1
  39. package/esm/typings/src/{book-2.0/commitments → commitments}/_base/BaseCommitmentDefinition.d.ts +1 -1
  40. package/esm/typings/src/{book-2.0/commitments → commitments}/_base/CommitmentDefinition.d.ts +1 -1
  41. package/esm/typings/src/{book-2.0/commitments → commitments}/_base/NotYetImplementedCommitmentDefinition.d.ts +1 -1
  42. package/esm/typings/src/{book-2.0/commitments → commitments}/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  43. package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/_common/utils/assertUniqueModels.d.ts +12 -0
  45. package/esm/typings/src/llm-providers/agent/Agent.d.ts +10 -9
  46. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
  47. package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +1 -1
  48. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +32 -0
  49. package/esm/typings/src/llm-providers/agent/RemoteAgentOptions.d.ts +11 -0
  50. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +29 -4
  51. package/esm/typings/src/llm-providers/openai/openai-models.test.d.ts +4 -0
  52. package/esm/typings/src/remote-server/startAgentServer.d.ts +1 -1
  53. package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
  54. package/esm/typings/src/storage/_common/PromptbookStorage.d.ts +1 -0
  55. package/esm/typings/src/transpilers/openai-sdk/register.d.ts +1 -1
  56. package/esm/typings/src/types/typeAliases.d.ts +12 -0
  57. package/esm/typings/src/utils/color/internal-utils/checkChannelValue.d.ts +0 -3
  58. package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +2 -0
  59. package/esm/typings/src/utils/normalization/normalizeTo_PascalCase.d.ts +3 -0
  60. package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +2 -0
  61. package/esm/typings/src/utils/normalization/titleToName.d.ts +2 -0
  62. package/esm/typings/src/utils/random/$generateBookBoilerplate.d.ts +2 -2
  63. package/esm/typings/src/utils/random/$randomFullnameWithColor.d.ts +1 -1
  64. package/esm/typings/src/version.d.ts +1 -1
  65. package/package.json +1 -1
  66. package/umd/index.umd.js +1051 -783
  67. package/umd/index.umd.js.map +1 -1
  68. /package/esm/typings/src/{book-2.0/commitments → commitments}/_base/BookCommitment.d.ts +0 -0
  69. /package/esm/typings/src/{book-2.0/commitments → commitments}/_base/ParsedCommitment.d.ts +0 -0
  70. /package/esm/typings/src/{book-2.0/commitments → commitments}/index.d.ts +0 -0
package/umd/index.umd.js CHANGED
@@ -1,13 +1,13 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('crypto'), require('rxjs'), require('waitasecond'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path'), require('crypto-js'), require('mime-types'), require('papaparse'), require('moment'), require('colors'), require('bottleneck'), require('openai')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'crypto', 'rxjs', 'waitasecond', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path', 'crypto-js', 'mime-types', 'papaparse', 'moment', 'colors', 'bottleneck', 'openai'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim$1, global.crypto, global.rxjs, global.waitasecond, global.hexEncoder, global.sha256, global.path, global.cryptoJs, global.mimeTypes, global.papaparse, global.moment, global.colors, global.Bottleneck, global.OpenAI));
5
- })(this, (function (exports, spaceTrim$1, crypto, rxjs, waitasecond, hexEncoder, sha256, path, cryptoJs, mimeTypes, papaparse, moment, colors, Bottleneck, OpenAI) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('crypto-js'), require('crypto-js/enc-hex'), require('spacetrim'), require('crypto'), require('rxjs'), require('waitasecond'), require('crypto-js/sha256'), require('path'), require('mime-types'), require('papaparse'), require('moment'), require('colors'), require('bottleneck'), require('openai')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'crypto-js', 'crypto-js/enc-hex', 'spacetrim', 'crypto', 'rxjs', 'waitasecond', 'crypto-js/sha256', 'path', 'mime-types', 'papaparse', 'moment', 'colors', 'bottleneck', 'openai'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.cryptoJs, global.hexEncoder, global.spaceTrim$1, global.crypto, global.rxjs, global.waitasecond, global.sha256, global.path, global.mimeTypes, global.papaparse, global.moment, global.colors, global.Bottleneck, global.OpenAI));
5
+ })(this, (function (exports, cryptoJs, hexEncoder, spaceTrim$1, crypto, rxjs, waitasecond, sha256, path, mimeTypes, papaparse, moment, colors, Bottleneck, OpenAI) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
9
- var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim$1);
10
9
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
10
+ var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim$1);
11
11
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
12
12
  var moment__default = /*#__PURE__*/_interopDefaultLegacy(moment);
13
13
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
@@ -28,12 +28,21 @@
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-46';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-48';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
35
35
  */
36
36
 
37
+ /**
38
+ * Computes SHA-256 hash of the agent source
39
+ *
40
+ * @public exported from `@promptbook/core`
41
+ */
42
+ function computeAgentHash(agentSource) {
43
+ return cryptoJs.SHA256(hexEncoder__default["default"].parse(agentSource /* <- TODO: !!!!! spaceTrim */)).toString( /* hex */);
44
+ }
45
+
37
46
  var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
38
47
 
39
48
  /**
@@ -160,15 +169,20 @@
160
169
  */
161
170
  const REMOTE_SERVER_URLS = [
162
171
  {
163
- title: 'Promptbook',
164
- description: `Servers of Promptbook.studio`,
172
+ title: 'Promptbook.Studio',
173
+ description: `Server of Promptbook.studio`,
165
174
  owner: 'AI Web, LLC <legal@ptbk.io> (https://www.ptbk.io/)',
166
- isAnonymousModeAllowed: true,
167
175
  urls: [
168
176
  'https://promptbook.s5.ptbk.io/',
169
177
  // Note: Servers 1-4 are not running
170
178
  ],
171
179
  },
180
+ {
181
+ title: 'Testing Agents',
182
+ description: `Testing Agents server on Vercel`,
183
+ owner: 'AI Web, LLC <legal@ptbk.io> (https://www.ptbk.io/)',
184
+ urls: ['https://s6.ptbk.io/'],
185
+ },
172
186
  /*
173
187
  Note: Working on older version of Promptbook and not supported anymore
174
188
  {
@@ -413,9 +427,6 @@
413
427
  throw new Error(`${channelName} channel is greater than 255, it is ${value}`);
414
428
  }
415
429
  }
416
- /**
417
- * TODO: [🧠][🚓] Is/which combination it better to use asserts/check, validate or is utility function?
418
- */
419
430
 
420
431
  /**
421
432
  * Color object represents an RGB color with alpha channel
@@ -4324,6 +4335,8 @@
4324
4335
  /**
4325
4336
  * Converts a given text to kebab-case format.
4326
4337
  *
4338
+ * Note: [🔂] This function is idempotent.
4339
+ *
4327
4340
  * @param text The text to be converted.
4328
4341
  * @returns The kebab-case formatted string.
4329
4342
  * @example 'hello-world'
@@ -4479,6 +4492,8 @@
4479
4492
  /**
4480
4493
  * Converts a title string into a normalized name.
4481
4494
  *
4495
+ * Note: [🔂] This function is idempotent.
4496
+ *
4482
4497
  * @param value The title string to be converted to a name.
4483
4498
  * @returns A normalized name derived from the input title.
4484
4499
  * @example 'Hello World!' -> 'hello-world'
@@ -7391,40 +7406,6 @@
7391
7406
  * TODO: [🏢] Check validity of `temperature` in pipeline
7392
7407
  */
7393
7408
 
7394
- /**
7395
- * Creates an empty/basic agent model requirements object
7396
- * This serves as the starting point for the reduce-like pattern
7397
- * where each commitment applies its changes to build the final requirements
7398
- *
7399
- * @public exported from `@promptbook/core`
7400
- */
7401
- function createEmptyAgentModelRequirements() {
7402
- return {
7403
- systemMessage: '',
7404
- // modelName: 'gpt-5',
7405
- modelName: 'gemini-2.5-flash-lite',
7406
- temperature: 0.7,
7407
- topP: 0.9,
7408
- topK: 50,
7409
- };
7410
- }
7411
- /**
7412
- * Creates a basic agent model requirements with just the agent name
7413
- * This is used when we have an agent name but no commitments
7414
- *
7415
- * @public exported from `@promptbook/core`
7416
- */
7417
- function createBasicAgentModelRequirements(agentName) {
7418
- const empty = createEmptyAgentModelRequirements();
7419
- return {
7420
- ...empty,
7421
- systemMessage: `You are ${agentName || 'AI Agent'}`,
7422
- };
7423
- }
7424
- /**
7425
- * TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
7426
- */
7427
-
7428
7409
  /**
7429
7410
  * Generates a regex pattern to match a specific commitment
7430
7411
  *
@@ -7958,23 +7939,19 @@
7958
7939
  `);
7959
7940
  }
7960
7941
  applyToAgentModelRequirements(requirements, content) {
7961
- var _a;
7962
7942
  const trimmedContent = content.trim();
7963
7943
  if (!trimmedContent) {
7964
7944
  return requirements;
7965
7945
  }
7966
7946
  // Check if content is a URL (external knowledge source)
7967
- if (this.isUrl(trimmedContent)) {
7947
+ if (isValidUrl(trimmedContent)) {
7968
7948
  // Store the URL for later async processing
7969
7949
  const updatedRequirements = {
7970
7950
  ...requirements,
7971
- metadata: {
7972
- ...requirements.metadata,
7973
- knowledgeSources: [
7974
- ...(((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.knowledgeSources) || []),
7975
- trimmedContent,
7976
- ],
7977
- },
7951
+ knowledgeSources: [
7952
+ ...(requirements.knowledgeSources || []),
7953
+ trimmedContent,
7954
+ ],
7978
7955
  };
7979
7956
  // Add placeholder information about knowledge sources to system message
7980
7957
  const knowledgeInfo = `Knowledge Source URL: ${trimmedContent} (will be processed for retrieval during chat)`;
@@ -7986,18 +7963,6 @@
7986
7963
  return this.appendToSystemMessage(requirements, knowledgeSection, '\n\n');
7987
7964
  }
7988
7965
  }
7989
- /**
7990
- * Check if content is a URL
7991
- */
7992
- isUrl(content) {
7993
- try {
7994
- new URL(content);
7995
- return true;
7996
- }
7997
- catch (_a) {
7998
- return false;
7999
- }
8000
- }
8001
7966
  }
8002
7967
  /**
8003
7968
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -8808,6 +8773,7 @@
8808
8773
  // Keep everything after the PERSONA section
8809
8774
  cleanedMessage = lines.slice(personaEndIndex).join('\n').trim();
8810
8775
  }
8776
+ // TODO: [🕛] There should be `agentFullname` not `agentName`
8811
8777
  // Create new system message with persona at the beginning
8812
8778
  // Format: "You are {agentName}\n{personaContent}"
8813
8779
  // The # PERSONA comment will be removed later by removeCommentsFromSystemMessage
@@ -9323,6 +9289,40 @@
9323
9289
  * Note: [💞] Ignore a discrepancy between file name and entity name
9324
9290
  */
9325
9291
 
9292
+ /**
9293
+ * Creates an empty/basic agent model requirements object
9294
+ * This serves as the starting point for the reduce-like pattern
9295
+ * where each commitment applies its changes to build the final requirements
9296
+ *
9297
+ * @public exported from `@promptbook/core`
9298
+ */
9299
+ function createEmptyAgentModelRequirements() {
9300
+ return {
9301
+ systemMessage: '',
9302
+ // modelName: 'gpt-5',
9303
+ modelName: 'gemini-2.5-flash-lite',
9304
+ temperature: 0.7,
9305
+ topP: 0.9,
9306
+ topK: 50,
9307
+ };
9308
+ }
9309
+ /**
9310
+ * Creates a basic agent model requirements with just the agent name
9311
+ * This is used when we have an agent name but no commitments
9312
+ *
9313
+ * @public exported from `@promptbook/core`
9314
+ */
9315
+ function createBasicAgentModelRequirements(agentName) {
9316
+ const empty = createEmptyAgentModelRequirements();
9317
+ return {
9318
+ ...empty,
9319
+ systemMessage: `You are ${agentName || 'AI Agent'}`,
9320
+ };
9321
+ }
9322
+ /**
9323
+ * TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
9324
+ */
9325
+
9326
9326
  /**
9327
9327
  * Parses agent source using the new commitment system with multiline support
9328
9328
  * This function replaces the hardcoded commitment parsing in the original parseAgentSource
@@ -9413,29 +9413,6 @@
9413
9413
  };
9414
9414
  }
9415
9415
 
9416
- /**
9417
- * Removes comment lines (lines starting with #) from a system message
9418
- * This is used to clean up the final system message before sending it to the AI model
9419
- * while preserving the original content with comments in metadata
9420
- *
9421
- * @param systemMessage The system message that may contain comment lines
9422
- * @returns The system message with comment lines removed
9423
- *
9424
- * @private - TODO: [🧠] Maybe should be public?
9425
- */
9426
- function removeCommentsFromSystemMessage(systemMessage) {
9427
- if (!systemMessage) {
9428
- return systemMessage;
9429
- }
9430
- const lines = systemMessage.split('\n');
9431
- const filteredLines = lines.filter((line) => {
9432
- const trimmedLine = line.trim();
9433
- // Remove lines that start with # (comments)
9434
- return !trimmedLine.startsWith('#');
9435
- });
9436
- return filteredLines.join('\n').trim();
9437
- }
9438
-
9439
9416
  /**
9440
9417
  * Parses parameters from text using both supported notations:
9441
9418
  * 1. @Parameter - single word parameter starting with @
@@ -9494,6 +9471,29 @@
9494
9471
  return uniqueParameters;
9495
9472
  }
9496
9473
 
9474
+ /**
9475
+ * Removes comment lines (lines starting with #) from a system message
9476
+ * This is used to clean up the final system message before sending it to the AI model
9477
+ * while preserving the original content with comments in metadata
9478
+ *
9479
+ * @param systemMessage The system message that may contain comment lines
9480
+ * @returns The system message with comment lines removed
9481
+ *
9482
+ * @private - TODO: [🧠] Maybe should be public?
9483
+ */
9484
+ function removeCommentsFromSystemMessage(systemMessage) {
9485
+ if (!systemMessage) {
9486
+ return systemMessage;
9487
+ }
9488
+ const lines = systemMessage.split('\n');
9489
+ const filteredLines = lines.filter((line) => {
9490
+ const trimmedLine = line.trim();
9491
+ // Remove lines that start with # (comments)
9492
+ return !trimmedLine.startsWith('#');
9493
+ });
9494
+ return filteredLines.join('\n').trim();
9495
+ }
9496
+
9497
9497
  /**
9498
9498
  * Creates agent model requirements using the new commitment system
9499
9499
  * This function uses a reduce-like pattern where each commitment applies its changes
@@ -9595,6 +9595,8 @@
9595
9595
  /**
9596
9596
  * Normalizes a given text to camelCase format.
9597
9597
  *
9598
+ * Note: [🔂] This function is idempotent.
9599
+ *
9598
9600
  * @param text The text to be normalized.
9599
9601
  * @param _isFirstLetterCapital Whether the first letter should be capitalized.
9600
9602
  * @returns The camelCase formatted string.
@@ -9683,68 +9685,479 @@
9683
9685
  */
9684
9686
 
9685
9687
  /**
9686
- * Parses basic information from agent source
9688
+ * Creates a Mermaid graph based on the promptbook
9687
9689
  *
9688
- * There are 2 similar functions:
9689
- * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
9690
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
9690
+ * Note: The result is not wrapped in a Markdown code block
9691
9691
  *
9692
- * @public exported from `@promptbook/core`
9692
+ * @public exported from `@promptbook/utils`
9693
9693
  */
9694
- function parseAgentSource(agentSource) {
9695
- const parseResult = parseAgentSourceWithCommitments(agentSource);
9696
- // Find PERSONA and META commitments
9697
- let personaDescription = null;
9698
- for (const commitment of parseResult.commitments) {
9699
- if (commitment.type !== 'PERSONA') {
9700
- continue;
9694
+ function renderPromptbookMermaid(pipelineJson, options) {
9695
+ const { linkTask = () => null } = options || {};
9696
+ const MERMAID_PREFIX = 'pipeline_';
9697
+ const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
9698
+ const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
9699
+ const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
9700
+ const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
9701
+ const parameterNameToTaskName = (parameterName) => {
9702
+ if (parameterName === 'knowledge') {
9703
+ return MERMAID_KNOWLEDGE_NAME;
9701
9704
  }
9702
- if (personaDescription === null) {
9703
- personaDescription = '';
9705
+ else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
9706
+ return MERMAID_RESERVED_NAME;
9704
9707
  }
9705
- else {
9706
- personaDescription += `\n\n${personaDescription}`;
9708
+ const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
9709
+ if (!parameter) {
9710
+ throw new UnexpectedError(`Could not find {${parameterName}}`);
9711
+ // <- TODO: This causes problems when {knowledge} and other reserved parameters are used
9707
9712
  }
9708
- personaDescription += commitment.content;
9709
- }
9710
- const meta = {};
9711
- for (const commitment of parseResult.commitments) {
9712
- if (commitment.type !== 'META') {
9713
- continue;
9713
+ if (parameter.isInput) {
9714
+ return MERMAID_INPUT_NAME;
9714
9715
  }
9715
- // Parse META commitments - format is "META TYPE content"
9716
- const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
9717
- const metaType = normalizeTo_camelCase(metaTypeRaw);
9718
- meta[metaType] = spaceTrim__default["default"](commitment.content.substring(metaTypeRaw.length));
9719
- }
9720
- // Generate gravatar fallback if no meta image specified
9721
- if (!meta.image) {
9722
- meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
9723
- }
9724
- // Parse parameters using unified approach - both @Parameter and {parameter} notations
9725
- // are treated as the same syntax feature with unified representation
9726
- const parameters = parseParameters(agentSource);
9727
- return {
9728
- agentName: parseResult.agentName,
9729
- personaDescription,
9730
- meta,
9731
- parameters,
9716
+ const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
9717
+ if (!task) {
9718
+ throw new Error(`Could not find task for {${parameterName}}`);
9719
+ }
9720
+ return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
9732
9721
  };
9733
- }
9734
- /**
9735
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
9736
- */
9722
+ const inputAndIntermediateParametersMermaid = pipelineJson.tasks
9723
+ .flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
9724
+ `${parameterNameToTaskName(resultingParameterName)}("${title}")`,
9725
+ ...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
9726
+ ])
9727
+ .join('\n');
9728
+ const outputParametersMermaid = pipelineJson.parameters
9729
+ .filter(({ isOutput }) => isOutput)
9730
+ .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
9731
+ .join('\n');
9732
+ const linksMermaid = pipelineJson.tasks
9733
+ .map((task) => {
9734
+ const link = linkTask(task);
9735
+ if (link === null) {
9736
+ return '';
9737
+ }
9738
+ const { href, title } = link;
9739
+ const taskName = parameterNameToTaskName(task.resultingParameterName);
9740
+ return `click ${taskName} href "${href}" "${title}";`;
9741
+ })
9742
+ .filter((line) => line !== '')
9743
+ .join('\n');
9744
+ const interactionPointsMermaid = Object.entries({
9745
+ [MERMAID_INPUT_NAME]: 'Input',
9746
+ [MERMAID_OUTPUT_NAME]: 'Output',
9747
+ [MERMAID_RESERVED_NAME]: 'Other',
9748
+ [MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
9749
+ })
9750
+ .filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
9751
+ .map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
9752
+ .join('\n');
9753
+ const promptbookMermaid = spaceTrim$1.spaceTrim((block) => `
9737
9754
 
9738
- /**
9739
- * Creates model requirements for an agent based on its source
9740
- *
9741
- * There are 2 similar functions:
9742
- * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
9743
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
9744
- *
9745
- * @public exported from `@promptbook/core`
9746
- */
9747
- async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
9755
+ %% 🔮 Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
9756
+
9757
+ flowchart LR
9758
+ subgraph "${pipelineJson.title}"
9759
+
9760
+ %% Basic configuration
9761
+ direction TB
9762
+
9763
+ %% Interaction points from pipeline to outside
9764
+ ${block(interactionPointsMermaid)}
9765
+
9766
+ %% Input and intermediate parameters
9767
+ ${block(inputAndIntermediateParametersMermaid)}
9768
+
9769
+
9770
+ %% Output parameters
9771
+ ${block(outputParametersMermaid)}
9772
+
9773
+ %% Links
9774
+ ${block(linksMermaid)}
9775
+
9776
+ %% Styles
9777
+ classDef ${MERMAID_INPUT_NAME} color: grey;
9778
+ classDef ${MERMAID_OUTPUT_NAME} color: grey;
9779
+ classDef ${MERMAID_RESERVED_NAME} color: grey;
9780
+ classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
9781
+
9782
+ end;
9783
+
9784
+ `);
9785
+ return promptbookMermaid;
9786
+ }
9787
+ /**
9788
+ * TODO: [🧠] FOREACH in mermaid graph
9789
+ * TODO: [🧠] Knowledge in mermaid graph
9790
+ * TODO: [🧠] Personas in mermaid graph
9791
+ * TODO: Maybe use some Mermaid package instead of string templating
9792
+ * TODO: [🕌] When more than 2 functionalities, split into separate functions
9793
+ */
9794
+
9795
+ /**
9796
+ * Tag function for notating a prompt as template literal
9797
+ *
9798
+ * Note: There are 3 similar functions:
9799
+ * 1) `prompt` for notating single prompt exported from `@promptbook/utils`
9800
+ * 2) `promptTemplate` alias for `prompt`
9801
+ * 3) `book` for notating and validating entire books exported from `@promptbook/utils`
9802
+ *
9803
+ * @param strings
9804
+ * @param values
9805
+ * @returns the prompt string
9806
+ * @public exported from `@promptbook/utils`
9807
+ */
9808
+ function prompt(strings, ...values) {
9809
+ if (values.length === 0) {
9810
+ return spaceTrim__default["default"](strings.join(''));
9811
+ }
9812
+ const stringsWithHiddenParameters = strings.map((stringsItem) =>
9813
+ // TODO: [0] DRY
9814
+ stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
9815
+ const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
9816
+ const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
9817
+ // Combine strings and values
9818
+ let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
9819
+ ? `${result}${stringsItem}`
9820
+ : `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
9821
+ pipelineString = spaceTrim__default["default"](pipelineString);
9822
+ try {
9823
+ pipelineString = templateParameters(pipelineString, parameters);
9824
+ }
9825
+ catch (error) {
9826
+ if (!(error instanceof PipelineExecutionError)) {
9827
+ throw error;
9828
+ }
9829
+ console.error({ pipelineString, parameters, placeholderParameterNames, error });
9830
+ throw new UnexpectedError(spaceTrim__default["default"]((block) => `
9831
+ Internal error in prompt template literal
9832
+
9833
+ ${block(JSON.stringify({ strings, values }, null, 4))}}
9834
+
9835
+ `));
9836
+ }
9837
+ // TODO: [0] DRY
9838
+ pipelineString = pipelineString
9839
+ .split(`${REPLACING_NONCE}beginbracket`)
9840
+ .join('{')
9841
+ .split(`${REPLACING_NONCE}endbracket`)
9842
+ .join('}');
9843
+ return pipelineString;
9844
+ }
9845
+ /**
9846
+ * TODO: [🧠][🈴] Where is the best location for this file
9847
+ * Note: [💞] Ignore a discrepancy between file name and entity name
9848
+ */
9849
+
9850
+ /**
9851
+ * Detects if the code is running in a browser environment in main thread (Not in a web worker)
9852
+ *
9853
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9854
+ *
9855
+ * @public exported from `@promptbook/utils`
9856
+ */
9857
+ const $isRunningInBrowser = new Function(`
9858
+ try {
9859
+ return this === window;
9860
+ } catch (e) {
9861
+ return false;
9862
+ }
9863
+ `);
9864
+ /**
9865
+ * TODO: [🎺]
9866
+ */
9867
+
9868
+ /**
9869
+ * Detects if the code is running in jest environment
9870
+ *
9871
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9872
+ *
9873
+ * @public exported from `@promptbook/utils`
9874
+ */
9875
+ const $isRunningInJest = new Function(`
9876
+ try {
9877
+ return process.env.JEST_WORKER_ID !== undefined;
9878
+ } catch (e) {
9879
+ return false;
9880
+ }
9881
+ `);
9882
+ /**
9883
+ * TODO: [🎺]
9884
+ */
9885
+
9886
+ /**
9887
+ * Detects if the code is running in a Node.js environment
9888
+ *
9889
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9890
+ *
9891
+ * @public exported from `@promptbook/utils`
9892
+ */
9893
+ const $isRunningInNode = new Function(`
9894
+ try {
9895
+ return this === global;
9896
+ } catch (e) {
9897
+ return false;
9898
+ }
9899
+ `);
9900
+ /**
9901
+ * TODO: [🎺]
9902
+ */
9903
+
9904
+ /**
9905
+ * Detects if the code is running in a web worker
9906
+ *
9907
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9908
+ *
9909
+ * @public exported from `@promptbook/utils`
9910
+ */
9911
+ const $isRunningInWebWorker = new Function(`
9912
+ try {
9913
+ if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
9914
+ return true;
9915
+ } else {
9916
+ return false;
9917
+ }
9918
+ } catch (e) {
9919
+ return false;
9920
+ }
9921
+ `);
9922
+ /**
9923
+ * TODO: [🎺]
9924
+ */
9925
+
9926
+ /**
9927
+ * Returns information about the current runtime environment
9928
+ *
9929
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
9930
+ *
9931
+ * @public exported from `@promptbook/utils`
9932
+ */
9933
+ function $detectRuntimeEnvironment() {
9934
+ return {
9935
+ isRunningInBrowser: $isRunningInBrowser(),
9936
+ isRunningInJest: $isRunningInJest(),
9937
+ isRunningInNode: $isRunningInNode(),
9938
+ isRunningInWebWorker: $isRunningInWebWorker(),
9939
+ };
9940
+ }
9941
+ /**
9942
+ * TODO: [🎺] Also detect and report node version here
9943
+ */
9944
+
9945
+ /**
9946
+ * Simple wrapper `new Date().toISOString()`
9947
+ *
9948
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
9949
+ *
9950
+ * @returns string_date branded type
9951
+ * @public exported from `@promptbook/utils`
9952
+ */
9953
+ function $getCurrentDate() {
9954
+ return new Date().toISOString();
9955
+ }
9956
+
9957
+ /**
9958
+ * Function parseNumber will parse number from string
9959
+ *
9960
+ * Note: [🔂] This function is idempotent.
9961
+ * Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
9962
+ * Note: it also works only with decimal numbers
9963
+ *
9964
+ * @returns parsed number
9965
+ * @throws {ParseError} if the value is not a number
9966
+ *
9967
+ * @public exported from `@promptbook/utils`
9968
+ */
9969
+ function parseNumber(value) {
9970
+ const originalValue = value;
9971
+ if (typeof value === 'number') {
9972
+ value = value.toString(); // <- TODO: Maybe more efficient way to do this
9973
+ }
9974
+ if (typeof value !== 'string') {
9975
+ return 0;
9976
+ }
9977
+ value = value.trim();
9978
+ if (value.startsWith('+')) {
9979
+ return parseNumber(value.substring(1));
9980
+ }
9981
+ if (value.startsWith('-')) {
9982
+ const number = parseNumber(value.substring(1));
9983
+ if (number === 0) {
9984
+ return 0; // <- Note: To prevent -0
9985
+ }
9986
+ return -number;
9987
+ }
9988
+ value = value.replace(/,/g, '.');
9989
+ value = value.toUpperCase();
9990
+ if (value === '') {
9991
+ return 0;
9992
+ }
9993
+ if (value === '♾' || value.startsWith('INF')) {
9994
+ return Infinity;
9995
+ }
9996
+ if (value.includes('/')) {
9997
+ const [numerator_, denominator_] = value.split('/');
9998
+ const numerator = parseNumber(numerator_);
9999
+ const denominator = parseNumber(denominator_);
10000
+ if (denominator === 0) {
10001
+ throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
10002
+ }
10003
+ return numerator / denominator;
10004
+ }
10005
+ if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
10006
+ return 0;
10007
+ }
10008
+ if (value.includes('E')) {
10009
+ const [significand, exponent] = value.split('E');
10010
+ return parseNumber(significand) * 10 ** parseNumber(exponent);
10011
+ }
10012
+ if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
10013
+ throw new ParseError(`Unable to parse number from "${originalValue}"`);
10014
+ }
10015
+ const num = parseFloat(value);
10016
+ if (isNaN(num)) {
10017
+ throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
10018
+ }
10019
+ return num;
10020
+ }
10021
+ /**
10022
+ * TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
10023
+ * TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
10024
+ */
10025
+
10026
+ /**
10027
+ * Removes quotes from a string
10028
+ *
10029
+ * Note: [🔂] This function is idempotent.
10030
+ * Tip: This is very useful for post-processing of the result of the LLM model
10031
+ * Note: This function removes only the same quotes from the beginning and the end of the string
10032
+ * Note: There are two similar functions:
10033
+ * - `removeQuotes` which removes only bounding quotes
10034
+ * - `unwrapResult` which removes whole introduce sentence
10035
+ *
10036
+ * @param text optionally quoted text
10037
+ * @returns text without quotes
10038
+ * @public exported from `@promptbook/utils`
10039
+ */
10040
+ function removeQuotes(text) {
10041
+ if (text.startsWith('"') && text.endsWith('"')) {
10042
+ return text.slice(1, -1);
10043
+ }
10044
+ if (text.startsWith("'") && text.endsWith("'")) {
10045
+ return text.slice(1, -1);
10046
+ }
10047
+ return text;
10048
+ }
10049
+
10050
+ /**
10051
+ * Trims string from all 4 sides
10052
+ *
10053
+ * Note: This is a re-exported function from the `spacetrim` package which is
10054
+ * Developed by same author @hejny as this package
10055
+ *
10056
+ * @public exported from `@promptbook/utils`
10057
+ * @see https://github.com/hejny/spacetrim#usage
10058
+ */
10059
+ const spaceTrim = spaceTrim$1.spaceTrim;
10060
+
10061
+ /**
10062
+ * Checks if the given value is a valid JavaScript identifier name.
10063
+ *
10064
+ * @param javascriptName The value to check for JavaScript identifier validity.
10065
+ * @returns `true` if the value is a valid JavaScript name, false otherwise.
10066
+ * @public exported from `@promptbook/utils`
10067
+ */
10068
+ function isValidJavascriptName(javascriptName) {
10069
+ if (typeof javascriptName !== 'string') {
10070
+ return false;
10071
+ }
10072
+ return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
10073
+ }
10074
+
10075
+ /**
10076
+ * Normalizes agent name from arbitrary string to valid agent name
10077
+ *
10078
+ * Note: [🔂] This function is idempotent.
10079
+ *
10080
+ * @public exported from `@promptbook/core`
10081
+ */
10082
+ function normalizeAgentName(rawAgentName) {
10083
+ return titleToName(spaceTrim__default["default"](rawAgentName));
10084
+ }
10085
+
10086
+ /**
10087
+ * Creates temporary default agent name based on agent source hash
10088
+ *
10089
+ * @public exported from `@promptbook/core`
10090
+ */
10091
+ function createDefaultAgentName(agentSource) {
10092
+ const agentHash = computeAgentHash(agentSource);
10093
+ return normalizeAgentName(`Agent ${agentHash.substring(0, 6)}`);
10094
+ }
10095
+
10096
+ /**
10097
+ * Parses basic information from agent source
10098
+ *
10099
+ * There are 2 similar functions:
10100
+ * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
10101
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
10102
+ *
10103
+ * @public exported from `@promptbook/core`
10104
+ */
10105
+ function parseAgentSource(agentSource) {
10106
+ const parseResult = parseAgentSourceWithCommitments(agentSource);
10107
+ // Find PERSONA and META commitments
10108
+ let personaDescription = null;
10109
+ for (const commitment of parseResult.commitments) {
10110
+ if (commitment.type !== 'PERSONA') {
10111
+ continue;
10112
+ }
10113
+ if (personaDescription === null) {
10114
+ personaDescription = '';
10115
+ }
10116
+ else {
10117
+ personaDescription += `\n\n${personaDescription}`;
10118
+ }
10119
+ personaDescription += commitment.content;
10120
+ }
10121
+ const meta = {};
10122
+ for (const commitment of parseResult.commitments) {
10123
+ if (commitment.type !== 'META') {
10124
+ continue;
10125
+ }
10126
+ // Parse META commitments - format is "META TYPE content"
10127
+ const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
10128
+ const metaType = normalizeTo_camelCase(metaTypeRaw);
10129
+ meta[metaType] = spaceTrim__default["default"](commitment.content.substring(metaTypeRaw.length));
10130
+ }
10131
+ // Generate gravatar fallback if no meta image specified
10132
+ if (!meta.image) {
10133
+ meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
10134
+ }
10135
+ // Parse parameters using unified approach - both @Parameter and {parameter} notations
10136
+ // are treated as the same syntax feature with unified representation
10137
+ const parameters = parseParameters(agentSource);
10138
+ const agentHash = computeAgentHash(agentSource);
10139
+ return {
10140
+ agentName: normalizeAgentName(parseResult.agentName || createDefaultAgentName(agentSource)),
10141
+ agentHash,
10142
+ personaDescription,
10143
+ meta,
10144
+ parameters,
10145
+ };
10146
+ }
10147
+ /**
10148
+ * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
10149
+ */
10150
+
10151
+ /**
10152
+ * Creates model requirements for an agent based on its source
10153
+ *
10154
+ * There are 2 similar functions:
10155
+ * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
10156
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
10157
+ *
10158
+ * @public exported from `@promptbook/core`
10159
+ */
10160
+ async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
9748
10161
  // If availableModels are provided and no specific modelName is given,
9749
10162
  // use preparePersona to select the best model
9750
10163
  if (availableModels && !modelName && llmTools) {
@@ -9902,17 +10315,6 @@
9902
10315
  // <- !!! Buttons into genesis book
9903
10316
  // <- TODO: !!! generateBookBoilerplate and deprecate `DEFAULT_BOOK`
9904
10317
 
9905
- /**
9906
- * Trims string from all 4 sides
9907
- *
9908
- * Note: This is a re-exported function from the `spacetrim` package which is
9909
- * Developed by same author @hejny as this package
9910
- *
9911
- * @public exported from `@promptbook/utils`
9912
- * @see https://github.com/hejny/spacetrim#usage
9913
- */
9914
- const spaceTrim = spaceTrim$1.spaceTrim;
9915
-
9916
10318
  /**
9917
10319
  * Agent collection stored in Supabase table
9918
10320
  *
@@ -9921,7 +10323,7 @@
9921
10323
  * @public exported from `@promptbook/core`
9922
10324
  * <- TODO: !!! Move to `@promptbook/supabase` package
9923
10325
  */
9924
- class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
10326
+ class AgentCollectionInSupabase /* TODO: !!!!!! implements Agent */ {
9925
10327
  /**
9926
10328
  * @param rootPath - path to the directory with agents
9927
10329
  * @param tools - Execution tools to be used in !!! `Agent` itself and listing the agents
@@ -9937,125 +10339,62 @@
9937
10339
  console.info(`Creating pipeline collection from supabase...`);
9938
10340
  }
9939
10341
  }
9940
- /**
9941
- * Cached defined execution tools
9942
- */
9943
- // !!! private _definedTools: ExecutionTools | null = null;
9944
- /*
9945
- TODO: !!! Use or remove
9946
- /**
9947
- * Gets or creates execution tools for the collection
9948
- * /
9949
- private async getTools(): Promise<ExecutionTools> {
9950
- if (this._definedTools !== null) {
9951
- return this._definedTools;
9952
- }
9953
-
9954
- this._definedTools = {
9955
- ...(this.tools === undefined || this.tools.fs === undefined ? await $provideExecutionToolsForNode() : {}),
9956
- ...this.tools,
9957
- };
9958
- return this._definedTools;
9959
- }
9960
- // <- TODO: [👪] Maybe create some common abstraction *(or parent abstract class)*
9961
- */
9962
10342
  /**
9963
10343
  * Gets all agents in the collection
9964
10344
  */
9965
10345
  async listAgents( /* TODO: [🧠] Allow to pass some condition here */) {
9966
10346
  const { isVerbose = exports.DEFAULT_IS_VERBOSE } = this.options || {};
9967
- const result = await this.supabaseClient
9968
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
9969
- .select('agentProfile');
9970
- if (result.error) {
10347
+ const selectResult = await this.supabaseClient.from('Agent').select('agentName,agentProfile');
10348
+ if (selectResult.error) {
9971
10349
  throw new DatabaseError(spaceTrim((block) => `
9972
10350
 
9973
10351
  Error fetching agents from Supabase:
9974
10352
 
9975
- ${block(result.error.message)}
10353
+ ${block(selectResult.error.message)}
9976
10354
  `));
9977
10355
  }
9978
10356
  if (isVerbose) {
9979
- console.info(`Found ${result.data.length} agents in directory`);
10357
+ console.info(`Found ${selectResult.data.length} agents in directory`);
9980
10358
  }
9981
- return result.data.map((row) => row.agentProfile);
9982
- }
9983
- /**
9984
- * !!!
9985
- * /
9986
- public async spawnAgent(agentName: string_agent_name): Promise<Agent> {
9987
-
9988
- // <- TODO: !!! ENOENT: no such file or directory, open 'C:\Users\me\work\ai\promptbook\agents\examples\Asistent pro LŠVP.book
9989
- const { isVerbose = DEFAULT_IS_VERBOSE } = this.options || {};
9990
- const tools = await this.getTools();
10359
+ return selectResult.data.map(({ agentName, agentProfile }) => {
10360
+ if (isVerbose && agentProfile.agentName !== agentName) {
10361
+ console.warn(spaceTrim(`
10362
+ Agent name mismatch for agent "${agentName}". Using name from database.
9991
10363
 
9992
- const agentSourceValue = validateBook(await tools.fs!.readFile(agentSourcePath, 'utf-8'));
9993
- const agentSource = new BehaviorSubject(agentSourceValue);
9994
-
9995
- // Note: Write file whenever agent source changes
9996
- agentSource.subscribe(async (newSource) => {
9997
- if (isVerbose) {
9998
- console.info(colors.cyan(`Writing agent source to file ${agentSourcePath}`));
9999
- }
10000
- await forTime(500); // <- TODO: [🙌] !!! Remove
10001
- await tools.fs!.writeFile(agentSourcePath, newSource, 'utf-8');
10002
- });
10003
-
10004
- // Note: Watch file for external changes
10005
- for await (const event of tools.fs!.watch(agentSourcePath)) {
10006
- // <- TODO: !!!! Solve the memory freeing when the watching is no longer needed
10007
-
10008
- if (event.eventType !== 'change') {
10009
- continue;
10010
- }
10011
-
10012
- if (isVerbose) {
10013
- console.info(
10014
- colors.cyan(`Detected external change in agent source file ${agentSourcePath}, reloading`),
10015
- );
10364
+ agentName: "${agentName}"
10365
+ agentProfile.agentName: "${agentProfile.agentName}"
10366
+ `));
10016
10367
  }
10017
- await forTime(500); // <- TODO: [🙌] !!! Remove
10018
- const newSource = validateBook(await tools.fs!.readFile(agentSourcePath, 'utf-8'));
10019
- agentSource.next(newSource);
10020
- }
10021
-
10022
- // TODO: [🙌] !!!! Debug the infinite loop when file is changed externally and agent source is updated which causes file to be written again
10023
-
10024
- const agent = new Agent({
10025
- ...this.options,
10026
- agentSource,
10027
- executionTools: this.tools || {},
10368
+ return {
10369
+ ...agentProfile,
10370
+ agentName,
10371
+ };
10028
10372
  });
10029
-
10030
- if (isVerbose) {
10031
- console.info(colors.cyan(`Created agent "${agent.agentName}" from source file ${agentSourcePath}`));
10032
- }
10033
-
10034
- return agent;
10035
- * /
10036
10373
  }
10037
- */
10038
10374
  /**
10039
10375
  * !!!@@@
10040
10376
  */
10041
10377
  async getAgentSource(agentName) {
10042
- const result = await this.supabaseClient
10043
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10378
+ const selectResult = await this.supabaseClient
10379
+ .from('Agent')
10044
10380
  .select('agentSource')
10045
10381
  .eq('agentName', agentName)
10046
10382
  .single();
10047
- if (result.error) {
10383
+ /*
10384
+ if (selectResult.data===null) {
10385
+ throw new NotFoundError(`Agent "${agentName}" not found`);
10386
+ }
10387
+ */
10388
+ if (selectResult.error) {
10048
10389
  throw new DatabaseError(spaceTrim((block) => `
10049
10390
 
10050
10391
  Error fetching agent "${agentName}" from Supabase:
10051
10392
 
10052
- ${block(result.error.message)}
10393
+ ${block(selectResult.error.message)}
10053
10394
  `));
10054
10395
  // <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10055
10396
  }
10056
- const agentSource = new rxjs.BehaviorSubject(result.data.agentSource);
10057
- // <- TODO: !!!! Dynamic updates
10058
- return agentSource;
10397
+ return selectResult.data.agentSource;
10059
10398
  }
10060
10399
  /**
10061
10400
  * Creates a new agent in the collection
@@ -10065,56 +10404,91 @@
10065
10404
  async createAgent(agentSource) {
10066
10405
  const agentProfile = parseAgentSource(agentSource);
10067
10406
  // <- TODO: [🕛]
10068
- const result = await this.supabaseClient.from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */).insert({
10069
- agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
10407
+ const { agentName, agentHash } = agentProfile;
10408
+ const insertAgentResult = await this.supabaseClient.from('Agent').insert({
10409
+ agentName,
10410
+ agentHash,
10070
10411
  agentProfile,
10071
10412
  createdAt: new Date().toISOString(),
10072
10413
  updatedAt: null,
10073
- agentVersion: 0,
10074
10414
  promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10075
10415
  usage: ZERO_USAGE,
10076
10416
  agentSource: agentSource,
10077
10417
  });
10078
- if (result.error) {
10418
+ if (insertAgentResult.error) {
10079
10419
  throw new DatabaseError(spaceTrim((block) => `
10080
10420
  Error creating agent "${agentProfile.agentName}" in Supabase:
10081
10421
 
10082
- ${block(result.error.message)}
10422
+ ${block(insertAgentResult.error.message)}
10083
10423
  `));
10084
10424
  }
10425
+ await this.supabaseClient.from('AgentHistory').insert({
10426
+ createdAt: new Date().toISOString(),
10427
+ agentName,
10428
+ agentHash,
10429
+ previousAgentHash: null,
10430
+ agentSource,
10431
+ promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10432
+ });
10433
+ // <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
10085
10434
  return agentProfile;
10086
10435
  }
10087
10436
  /**
10088
10437
  * Updates an existing agent in the collection
10089
10438
  */
10090
10439
  async updateAgentSource(agentName, agentSource) {
10440
+ const selectPreviousAgentResult = await this.supabaseClient
10441
+ .from('Agent')
10442
+ .select('agentHash,agentName')
10443
+ .eq('agentName', agentName)
10444
+ .single();
10445
+ if (selectPreviousAgentResult.error) {
10446
+ throw new DatabaseError(spaceTrim((block) => `
10447
+
10448
+ Error fetching agent "${agentName}" from Supabase:
10449
+
10450
+ ${block(selectPreviousAgentResult.error.message)}
10451
+ `));
10452
+ // <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10453
+ }
10454
+ selectPreviousAgentResult.data.agentName;
10455
+ const previousAgentHash = selectPreviousAgentResult.data.agentHash;
10091
10456
  const agentProfile = parseAgentSource(agentSource);
10092
- // TODO: !!!!!! What about agentName change
10093
- console.log('!!! agentName', agentName);
10094
- const oldAgentSource = await this.getAgentSource(agentName);
10095
- const result = await this.supabaseClient
10096
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10457
+ // <- TODO: [🕛]
10458
+ const { agentHash } = agentProfile;
10459
+ const updateAgentResult = await this.supabaseClient
10460
+ .from('Agent')
10097
10461
  .update({
10098
10462
  // TODO: !!!! Compare not update> agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
10099
10463
  agentProfile,
10100
10464
  updatedAt: new Date().toISOString(),
10101
- agentVersion: 0,
10102
- agentSource: agentSource,
10465
+ agentHash: agentProfile.agentHash,
10466
+ agentSource,
10467
+ promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10103
10468
  })
10104
10469
  .eq('agentName', agentName);
10105
- const newAgentSource = await this.getAgentSource(agentName);
10106
- console.log('!!! updateAgent', result);
10107
- console.log('!!! old', oldAgentSource);
10108
- console.log('!!! new', newAgentSource);
10109
- if (result.error) {
10470
+ // console.log('!!! updateAgent', updateResult);
10471
+ // console.log('!!! old', oldAgentSource);
10472
+ // console.log('!!! new', newAgentSource);
10473
+ if (updateAgentResult.error) {
10110
10474
  throw new DatabaseError(spaceTrim((block) => `
10111
10475
  Error updating agent "${agentName}" in Supabase:
10112
10476
 
10113
- ${block(result.error.message)}
10477
+ ${block(updateAgentResult.error.message)}
10114
10478
  `));
10115
10479
  }
10480
+ await this.supabaseClient.from('AgentHistory').insert({
10481
+ createdAt: new Date().toISOString(),
10482
+ agentName,
10483
+ agentHash,
10484
+ previousAgentHash,
10485
+ agentSource,
10486
+ promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10487
+ });
10488
+ // <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
10116
10489
  }
10117
- // TODO: !!!! getAgentSourceSubject
10490
+ // TODO: !!!! public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
10491
+ // Use Supabase realtime logic
10118
10492
  /**
10119
10493
  * Deletes an agent from the collection
10120
10494
  */
@@ -10767,75 +11141,6 @@
10767
11141
  * TODO: [💝] Unite object for expecting amount and format - remove format
10768
11142
  */
10769
11143
 
10770
- /**
10771
- * Function parseNumber will parse number from string
10772
- *
10773
- * Note: [🔂] This function is idempotent.
10774
- * Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
10775
- * Note: it also works only with decimal numbers
10776
- *
10777
- * @returns parsed number
10778
- * @throws {ParseError} if the value is not a number
10779
- *
10780
- * @public exported from `@promptbook/utils`
10781
- */
10782
- function parseNumber(value) {
10783
- const originalValue = value;
10784
- if (typeof value === 'number') {
10785
- value = value.toString(); // <- TODO: Maybe more efficient way to do this
10786
- }
10787
- if (typeof value !== 'string') {
10788
- return 0;
10789
- }
10790
- value = value.trim();
10791
- if (value.startsWith('+')) {
10792
- return parseNumber(value.substring(1));
10793
- }
10794
- if (value.startsWith('-')) {
10795
- const number = parseNumber(value.substring(1));
10796
- if (number === 0) {
10797
- return 0; // <- Note: To prevent -0
10798
- }
10799
- return -number;
10800
- }
10801
- value = value.replace(/,/g, '.');
10802
- value = value.toUpperCase();
10803
- if (value === '') {
10804
- return 0;
10805
- }
10806
- if (value === '♾' || value.startsWith('INF')) {
10807
- return Infinity;
10808
- }
10809
- if (value.includes('/')) {
10810
- const [numerator_, denominator_] = value.split('/');
10811
- const numerator = parseNumber(numerator_);
10812
- const denominator = parseNumber(denominator_);
10813
- if (denominator === 0) {
10814
- throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
10815
- }
10816
- return numerator / denominator;
10817
- }
10818
- if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
10819
- return 0;
10820
- }
10821
- if (value.includes('E')) {
10822
- const [significand, exponent] = value.split('E');
10823
- return parseNumber(significand) * 10 ** parseNumber(exponent);
10824
- }
10825
- if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
10826
- throw new ParseError(`Unable to parse number from "${originalValue}"`);
10827
- }
10828
- const num = parseFloat(value);
10829
- if (isNaN(num)) {
10830
- throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
10831
- }
10832
- return num;
10833
- }
10834
- /**
10835
- * TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
10836
- * TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
10837
- */
10838
-
10839
11144
  /**
10840
11145
  import { WrappedError } from '../../errors/WrappedError';
10841
11146
  import { assertsError } from '../../errors/assertsError';
@@ -10978,30 +11283,6 @@
10978
11283
  },
10979
11284
  };
10980
11285
 
10981
- /**
10982
- * Removes quotes from a string
10983
- *
10984
- * Note: [🔂] This function is idempotent.
10985
- * Tip: This is very useful for post-processing of the result of the LLM model
10986
- * Note: This function removes only the same quotes from the beginning and the end of the string
10987
- * Note: There are two similar functions:
10988
- * - `removeQuotes` which removes only bounding quotes
10989
- * - `unwrapResult` which removes whole introduce sentence
10990
- *
10991
- * @param text optionally quoted text
10992
- * @returns text without quotes
10993
- * @public exported from `@promptbook/utils`
10994
- */
10995
- function removeQuotes(text) {
10996
- if (text.startsWith('"') && text.endsWith('"')) {
10997
- return text.slice(1, -1);
10998
- }
10999
- if (text.startsWith("'") && text.endsWith("'")) {
11000
- return text.slice(1, -1);
11001
- }
11002
- return text;
11003
- }
11004
-
11005
11286
  /**
11006
11287
  * Function `validateParameterName` will normalize and validate a parameter name for use in pipelines.
11007
11288
  * It removes diacritics, emojis, and quotes, normalizes to camelCase, and checks for reserved names and invalid characters.
@@ -12181,25 +12462,11 @@
12181
12462
  First definition:
12182
12463
  ${persona.description}
12183
12464
 
12184
- Second definition:
12185
- ${personaDescription}
12186
-
12187
- `));
12188
- persona.description += spaceTrim__default["default"]('\n\n' + personaDescription);
12189
- }
12190
-
12191
- /**
12192
- * Checks if the given value is a valid JavaScript identifier name.
12193
- *
12194
- * @param javascriptName The value to check for JavaScript identifier validity.
12195
- * @returns `true` if the value is a valid JavaScript name, false otherwise.
12196
- * @public exported from `@promptbook/utils`
12197
- */
12198
- function isValidJavascriptName(javascriptName) {
12199
- if (typeof javascriptName !== 'string') {
12200
- return false;
12201
- }
12202
- return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
12465
+ Second definition:
12466
+ ${personaDescription}
12467
+
12468
+ `));
12469
+ persona.description += spaceTrim__default["default"]('\n\n' + personaDescription);
12203
12470
  }
12204
12471
 
12205
12472
  /**
@@ -13770,114 +14037,6 @@
13770
14037
  * TODO: [🏛] This can be part of markdown builder
13771
14038
  */
13772
14039
 
13773
- /**
13774
- * Creates a Mermaid graph based on the promptbook
13775
- *
13776
- * Note: The result is not wrapped in a Markdown code block
13777
- *
13778
- * @public exported from `@promptbook/utils`
13779
- */
13780
- function renderPromptbookMermaid(pipelineJson, options) {
13781
- const { linkTask = () => null } = options || {};
13782
- const MERMAID_PREFIX = 'pipeline_';
13783
- const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
13784
- const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
13785
- const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
13786
- const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
13787
- const parameterNameToTaskName = (parameterName) => {
13788
- if (parameterName === 'knowledge') {
13789
- return MERMAID_KNOWLEDGE_NAME;
13790
- }
13791
- else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
13792
- return MERMAID_RESERVED_NAME;
13793
- }
13794
- const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
13795
- if (!parameter) {
13796
- throw new UnexpectedError(`Could not find {${parameterName}}`);
13797
- // <- TODO: This causes problems when {knowledge} and other reserved parameters are used
13798
- }
13799
- if (parameter.isInput) {
13800
- return MERMAID_INPUT_NAME;
13801
- }
13802
- const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
13803
- if (!task) {
13804
- throw new Error(`Could not find task for {${parameterName}}`);
13805
- }
13806
- return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
13807
- };
13808
- const inputAndIntermediateParametersMermaid = pipelineJson.tasks
13809
- .flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
13810
- `${parameterNameToTaskName(resultingParameterName)}("${title}")`,
13811
- ...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
13812
- ])
13813
- .join('\n');
13814
- const outputParametersMermaid = pipelineJson.parameters
13815
- .filter(({ isOutput }) => isOutput)
13816
- .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
13817
- .join('\n');
13818
- const linksMermaid = pipelineJson.tasks
13819
- .map((task) => {
13820
- const link = linkTask(task);
13821
- if (link === null) {
13822
- return '';
13823
- }
13824
- const { href, title } = link;
13825
- const taskName = parameterNameToTaskName(task.resultingParameterName);
13826
- return `click ${taskName} href "${href}" "${title}";`;
13827
- })
13828
- .filter((line) => line !== '')
13829
- .join('\n');
13830
- const interactionPointsMermaid = Object.entries({
13831
- [MERMAID_INPUT_NAME]: 'Input',
13832
- [MERMAID_OUTPUT_NAME]: 'Output',
13833
- [MERMAID_RESERVED_NAME]: 'Other',
13834
- [MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
13835
- })
13836
- .filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
13837
- .map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
13838
- .join('\n');
13839
- const promptbookMermaid = spaceTrim$1.spaceTrim((block) => `
13840
-
13841
- %% 🔮 Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
13842
-
13843
- flowchart LR
13844
- subgraph "${pipelineJson.title}"
13845
-
13846
- %% Basic configuration
13847
- direction TB
13848
-
13849
- %% Interaction points from pipeline to outside
13850
- ${block(interactionPointsMermaid)}
13851
-
13852
- %% Input and intermediate parameters
13853
- ${block(inputAndIntermediateParametersMermaid)}
13854
-
13855
-
13856
- %% Output parameters
13857
- ${block(outputParametersMermaid)}
13858
-
13859
- %% Links
13860
- ${block(linksMermaid)}
13861
-
13862
- %% Styles
13863
- classDef ${MERMAID_INPUT_NAME} color: grey;
13864
- classDef ${MERMAID_OUTPUT_NAME} color: grey;
13865
- classDef ${MERMAID_RESERVED_NAME} color: grey;
13866
- classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
13867
-
13868
- end;
13869
-
13870
- `);
13871
- return promptbookMermaid;
13872
- }
13873
- /**
13874
- * TODO: [🧠] FOREACH in mermaid graph
13875
- * TODO: [🧠] Knowledge in mermaid graph
13876
- * TODO: [🧠] Personas in mermaid graph
13877
- * TODO: Maybe use some Mermaid package instead of string templating
13878
- * TODO: [🕌] When more than 2 functionalities, split into separate functions
13879
- */
13880
-
13881
14040
  /**
13882
14041
  * Prettyfies Promptbook string and adds Mermaid graph
13883
14042
  *
@@ -14438,64 +14597,6 @@
14438
14597
  * TODO: [®] DRY Register logic
14439
14598
  */
14440
14599
 
14441
- /**
14442
- * Detects if the code is running in a browser environment in main thread (Not in a web worker)
14443
- *
14444
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14445
- *
14446
- * @public exported from `@promptbook/utils`
14447
- */
14448
- const $isRunningInBrowser = new Function(`
14449
- try {
14450
- return this === window;
14451
- } catch (e) {
14452
- return false;
14453
- }
14454
- `);
14455
- /**
14456
- * TODO: [🎺]
14457
- */
14458
-
14459
- /**
14460
- * Detects if the code is running in a Node.js environment
14461
- *
14462
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14463
- *
14464
- * @public exported from `@promptbook/utils`
14465
- */
14466
- const $isRunningInNode = new Function(`
14467
- try {
14468
- return this === global;
14469
- } catch (e) {
14470
- return false;
14471
- }
14472
- `);
14473
- /**
14474
- * TODO: [🎺]
14475
- */
14476
-
14477
- /**
14478
- * Detects if the code is running in a web worker
14479
- *
14480
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14481
- *
14482
- * @public exported from `@promptbook/utils`
14483
- */
14484
- const $isRunningInWebWorker = new Function(`
14485
- try {
14486
- if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
14487
- return true;
14488
- } else {
14489
- return false;
14490
- }
14491
- } catch (e) {
14492
- return false;
14493
- }
14494
- `);
14495
- /**
14496
- * TODO: [🎺]
14497
- */
14498
-
14499
14600
  /**
14500
14601
  * Creates a message with all registered LLM tools
14501
14602
  *
@@ -14729,18 +14830,6 @@
14729
14830
  }
14730
14831
  }
14731
14832
 
14732
- /**
14733
- * Simple wrapper `new Date().toISOString()`
14734
- *
14735
- * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
14736
- *
14737
- * @returns string_date branded type
14738
- * @public exported from `@promptbook/utils`
14739
- */
14740
- function $getCurrentDate() {
14741
- return new Date().toISOString();
14742
- }
14743
-
14744
14833
  /**
14745
14834
  * Intercepts LLM tools and counts total usage of the tools
14746
14835
  *
@@ -15367,17 +15456,17 @@
15367
15456
  },
15368
15457
  /**/
15369
15458
  /*/
15370
- {
15371
- modelTitle: 'tts-1-hd-1106',
15372
- modelName: 'tts-1-hd-1106',
15373
- },
15374
- /**/
15459
+ {
15460
+ modelTitle: 'tts-1-hd-1106',
15461
+ modelName: 'tts-1-hd-1106',
15462
+ },
15463
+ /**/
15375
15464
  /*/
15376
- {
15377
- modelTitle: 'tts-1-hd',
15378
- modelName: 'tts-1-hd',
15379
- },
15380
- /**/
15465
+ {
15466
+ modelTitle: 'tts-1-hd',
15467
+ modelName: 'tts-1-hd',
15468
+ },
15469
+ /**/
15381
15470
  /**/
15382
15471
  {
15383
15472
  modelVariant: 'CHAT',
@@ -16563,7 +16652,7 @@
16563
16652
  *
16564
16653
  * This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
16565
16654
  *
16566
- * Note: [🦖] There are several different things in Promptbook:
16655
+ * !!! Note: [🦖] There are several different things in Promptbook:
16567
16656
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16568
16657
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16569
16658
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
@@ -16669,17 +16758,21 @@
16669
16758
  console.info('connect', stream.currentEvent);
16670
16759
  }
16671
16760
  });
16761
+ /*
16672
16762
  stream.on('messageDelta', (messageDelta) => {
16673
- var _a;
16674
- if (this.options.isVerbose &&
16763
+ if (
16764
+ this.options.isVerbose &&
16675
16765
  messageDelta &&
16676
16766
  messageDelta.content &&
16677
16767
  messageDelta.content[0] &&
16678
- messageDelta.content[0].type === 'text') {
16679
- console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
16768
+ messageDelta.content[0].type === 'text'
16769
+ ) {
16770
+ console.info('messageDelta', messageDelta.content[0].text?.value);
16680
16771
  }
16772
+
16681
16773
  // <- TODO: [🐚] Make streaming and running tasks working
16682
16774
  });
16775
+ */
16683
16776
  stream.on('messageCreated', (message) => {
16684
16777
  if (this.options.isVerbose) {
16685
16778
  console.info('messageCreated', message);
@@ -16734,15 +16827,19 @@
16734
16827
  },
16735
16828
  });
16736
16829
  }
16737
- async playground() {
16830
+ /*
16831
+ public async playground() {
16738
16832
  const client = await this.getClient();
16833
+
16739
16834
  // List all assistants
16740
16835
  const assistants = await client.beta.assistants.list();
16741
16836
  console.log('!!! Assistants:', assistants);
16837
+
16742
16838
  // Get details of a specific assistant
16743
16839
  const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
16744
16840
  const assistant = await client.beta.assistants.retrieve(assistantId);
16745
16841
  console.log('!!! Assistant Details:', assistant);
16842
+
16746
16843
  // Update an assistant
16747
16844
  const updatedAssistant = await client.beta.assistants.update(assistantId, {
16748
16845
  name: assistant.name + '(M)',
@@ -16752,75 +16849,196 @@
16752
16849
  },
16753
16850
  });
16754
16851
  console.log('!!! Updated Assistant:', updatedAssistant);
16755
- await waitasecond.forEver();
16852
+
16853
+ await forEver();
16854
+ }
16855
+ */
16856
+ /**
16857
+ * Get an existing assistant tool wrapper
16858
+ */
16859
+ getAssistant(assistantId) {
16860
+ return new OpenAiAssistantExecutionTools({
16861
+ ...this.options,
16862
+ assistantId,
16863
+ });
16756
16864
  }
16757
16865
  async createNewAssistant(options) {
16758
16866
  if (!this.isCreatingNewAssistantsAllowed) {
16759
16867
  throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
16760
16868
  }
16761
16869
  // await this.playground();
16762
- const { name, instructions } = options;
16870
+ const { name, instructions, knowledgeSources } = options;
16763
16871
  const client = await this.getClient();
16764
- /*/
16765
- //TODO: !!!
16766
- async function downloadFile(url: string, folder = './tmp'): Promise<string> {
16767
- const filename = path.basename(url.split('?')[0]);
16768
- const filepath = path.join(folder, filename);
16769
-
16770
- if (!fs.existsSync(folder)) fs.mkdirSync(folder);
16771
-
16772
- const res = await fetch(url);
16773
- if (!res.ok) throw new Error(`Download error: ${url}`);
16774
- const buffer = await res.arrayBuffer();
16775
- fs.writeFileSync(filepath, Buffer.from(buffer));
16776
- console.log(`📥 File downloaded: ${filename}`);
16777
-
16778
- return filepath;
16779
- }
16780
-
16781
- async function uploadFileToOpenAI(filepath: string) {
16782
- const file = await client.files.create({
16783
- file: fs.createReadStream(filepath),
16784
- purpose: 'assistants',
16872
+ let vectorStoreId;
16873
+ // If knowledge sources are provided, create a vector store with them
16874
+ if (knowledgeSources && knowledgeSources.length > 0) {
16875
+ if (this.options.isVerbose) {
16876
+ console.info(`📚 Creating vector store with ${knowledgeSources.length} knowledge sources...`);
16877
+ }
16878
+ // Create a vector store
16879
+ const vectorStore = await client.beta.vectorStores.create({
16880
+ name: `${name} Knowledge Base`,
16785
16881
  });
16786
- console.log(`⬆️ File uploaded to OpenAI: ${file.filename} (${file.id})`);
16787
- return file;
16882
+ vectorStoreId = vectorStore.id;
16883
+ if (this.options.isVerbose) {
16884
+ console.info(`✅ Vector store created: ${vectorStoreId}`);
16885
+ }
16886
+ // Upload files from knowledge sources to the vector store
16887
+ const fileStreams = [];
16888
+ for (const source of knowledgeSources) {
16889
+ try {
16890
+ // Check if it's a URL
16891
+ if (source.startsWith('http://') || source.startsWith('https://')) {
16892
+ // Download the file
16893
+ const response = await fetch(source);
16894
+ if (!response.ok) {
16895
+ console.error(`Failed to download ${source}: ${response.statusText}`);
16896
+ continue;
16897
+ }
16898
+ const buffer = await response.arrayBuffer();
16899
+ const filename = source.split('/').pop() || 'downloaded-file';
16900
+ const blob = new Blob([buffer]);
16901
+ const file = new File([blob], filename);
16902
+ fileStreams.push(file);
16903
+ }
16904
+ else {
16905
+ // Assume it's a local file path
16906
+ // Note: This will work in Node.js environment
16907
+ // For browser environments, this would need different handling
16908
+ const fs = await import('fs');
16909
+ const fileStream = fs.createReadStream(source);
16910
+ fileStreams.push(fileStream);
16911
+ }
16912
+ }
16913
+ catch (error) {
16914
+ console.error(`Error processing knowledge source ${source}:`, error);
16915
+ }
16916
+ }
16917
+ // Batch upload files to the vector store
16918
+ if (fileStreams.length > 0) {
16919
+ try {
16920
+ await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
16921
+ files: fileStreams,
16922
+ });
16923
+ if (this.options.isVerbose) {
16924
+ console.info(`✅ Uploaded ${fileStreams.length} files to vector store`);
16925
+ }
16926
+ }
16927
+ catch (error) {
16928
+ console.error('Error uploading files to vector store:', error);
16929
+ }
16930
+ }
16788
16931
  }
16789
-
16790
- // 🌐 URL addresses of files to upload
16791
- const fileUrls = [
16792
- 'https://raw.githubusercontent.com/vercel/next.js/canary/packages/next/README.md',
16793
- 'https://raw.githubusercontent.com/openai/openai-cookbook/main/examples/How_to_call_the_Assistants_API_with_Node.js.ipynb',
16794
- ];
16795
-
16796
- // 1️⃣ Download files from URL
16797
- const localFiles = [];
16798
- for (const url of fileUrls) {
16799
- const filepath = await downloadFile(url);
16800
- localFiles.push(filepath);
16932
+ // Create assistant with vector store attached
16933
+ const assistantConfig = {
16934
+ name,
16935
+ description: 'Assistant created via Promptbook',
16936
+ model: 'gpt-4o',
16937
+ instructions,
16938
+ tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
16939
+ };
16940
+ // Attach vector store if created
16941
+ if (vectorStoreId) {
16942
+ assistantConfig.tool_resources = {
16943
+ file_search: {
16944
+ vector_store_ids: [vectorStoreId],
16945
+ },
16946
+ };
16947
+ }
16948
+ const assistant = await client.beta.assistants.create(assistantConfig);
16949
+ console.log(`✅ Assistant created: ${assistant.id}`);
16950
+ // TODO: !!!! Try listing existing assistants
16951
+ // TODO: !!!! Try marking existing assistants by DISCRIMINANT
16952
+ // TODO: !!!! Allow to update and reconnect to existing assistants
16953
+ return new OpenAiAssistantExecutionTools({
16954
+ ...this.options,
16955
+ isCreatingNewAssistantsAllowed: false,
16956
+ assistantId: assistant.id,
16957
+ });
16958
+ }
16959
+ async updateAssistant(options) {
16960
+ if (!this.isCreatingNewAssistantsAllowed) {
16961
+ throw new NotAllowed(`Updating assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
16801
16962
  }
16802
-
16803
- // 2️⃣ Upload files to OpenAI
16804
- const uploadedFiles = [];
16805
- for (const filepath of localFiles) {
16806
- const file = await uploadFileToOpenAI(filepath);
16807
- uploadedFiles.push(file.id);
16963
+ const { assistantId, name, instructions, knowledgeSources } = options;
16964
+ const client = await this.getClient();
16965
+ let vectorStoreId;
16966
+ // If knowledge sources are provided, create a vector store with them
16967
+ // TODO: [🧠] Reuse vector store creation logic from createNewAssistant
16968
+ if (knowledgeSources && knowledgeSources.length > 0) {
16969
+ if (this.options.isVerbose) {
16970
+ console.info(`📚 Creating vector store for update with ${knowledgeSources.length} knowledge sources...`);
16971
+ }
16972
+ // Create a vector store
16973
+ const vectorStore = await client.beta.vectorStores.create({
16974
+ name: `${name} Knowledge Base`,
16975
+ });
16976
+ vectorStoreId = vectorStore.id;
16977
+ if (this.options.isVerbose) {
16978
+ console.info(`✅ Vector store created: ${vectorStoreId}`);
16979
+ }
16980
+ // Upload files from knowledge sources to the vector store
16981
+ const fileStreams = [];
16982
+ for (const source of knowledgeSources) {
16983
+ try {
16984
+ // Check if it's a URL
16985
+ if (source.startsWith('http://') || source.startsWith('https://')) {
16986
+ // Download the file
16987
+ const response = await fetch(source);
16988
+ if (!response.ok) {
16989
+ console.error(`Failed to download ${source}: ${response.statusText}`);
16990
+ continue;
16991
+ }
16992
+ const buffer = await response.arrayBuffer();
16993
+ const filename = source.split('/').pop() || 'downloaded-file';
16994
+ const blob = new Blob([buffer]);
16995
+ const file = new File([blob], filename);
16996
+ fileStreams.push(file);
16997
+ }
16998
+ else {
16999
+ // Assume it's a local file path
17000
+ // Note: This will work in Node.js environment
17001
+ // For browser environments, this would need different handling
17002
+ const fs = await import('fs');
17003
+ const fileStream = fs.createReadStream(source);
17004
+ fileStreams.push(fileStream);
17005
+ }
17006
+ }
17007
+ catch (error) {
17008
+ console.error(`Error processing knowledge source ${source}:`, error);
17009
+ }
17010
+ }
17011
+ // Batch upload files to the vector store
17012
+ if (fileStreams.length > 0) {
17013
+ try {
17014
+ await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
17015
+ files: fileStreams,
17016
+ });
17017
+ if (this.options.isVerbose) {
17018
+ console.info(`✅ Uploaded ${fileStreams.length} files to vector store`);
17019
+ }
17020
+ }
17021
+ catch (error) {
17022
+ console.error('Error uploading files to vector store:', error);
17023
+ }
17024
+ }
16808
17025
  }
16809
- /**/
16810
- // alert('!!!! Creating new OpenAI assistant');
16811
- // 3️⃣ Create assistant with uploaded files
16812
- const assistant = await client.beta.assistants.create({
17026
+ const assistantUpdate = {
16813
17027
  name,
16814
- description: 'Assistant created via Promptbook',
16815
- model: 'gpt-4o',
16816
17028
  instructions,
16817
17029
  tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
16818
- // !!!! file_ids: uploadedFiles,
16819
- });
16820
- console.log(`✅ Assistant created: ${assistant.id}`);
16821
- // TODO: !!!! Try listing existing assistants
16822
- // TODO: !!!! Try marking existing assistants by DISCRIMINANT
16823
- // TODO: !!!! Allow to update and reconnect to existing assistants
17030
+ };
17031
+ if (vectorStoreId) {
17032
+ assistantUpdate.tool_resources = {
17033
+ file_search: {
17034
+ vector_store_ids: [vectorStoreId],
17035
+ },
17036
+ };
17037
+ }
17038
+ const assistant = await client.beta.assistants.update(assistantId, assistantUpdate);
17039
+ if (this.options.isVerbose) {
17040
+ console.log(`✅ Assistant updated: ${assistant.id}`);
17041
+ }
16824
17042
  return new OpenAiAssistantExecutionTools({
16825
17043
  ...this.options,
16826
17044
  isCreatingNewAssistantsAllowed: false,
@@ -16859,7 +17077,7 @@
16859
17077
  * Execution Tools for calling LLM models with a predefined agent "soul"
16860
17078
  * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
16861
17079
  *
16862
- * Note: [🦖] There are several different things in Promptbook:
17080
+ * !!! Note: [🦖] There are several different things in Promptbook:
16863
17081
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16864
17082
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16865
17083
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
@@ -16968,26 +17186,58 @@
16968
17186
  const chatPrompt = prompt;
16969
17187
  let underlyingLlmResult;
16970
17188
  if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
16971
- if (this.options.isVerbose) {
16972
- console.log(`Creating new OpenAI Assistant for agent ${this.title}...`);
17189
+ const requirementsHash = cryptoJs.SHA256(JSON.stringify(modelRequirements)).toString();
17190
+ const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
17191
+ let assistant;
17192
+ if (cached) {
17193
+ if (cached.requirementsHash === requirementsHash) {
17194
+ if (this.options.isVerbose) {
17195
+ console.log(`1️⃣ Using cached OpenAI Assistant for agent ${this.title}...`);
17196
+ }
17197
+ assistant = this.options.llmTools.getAssistant(cached.assistantId);
17198
+ }
17199
+ else {
17200
+ if (this.options.isVerbose) {
17201
+ console.log(`1️⃣ Updating OpenAI Assistant for agent ${this.title}...`);
17202
+ }
17203
+ assistant = await this.options.llmTools.updateAssistant({
17204
+ assistantId: cached.assistantId,
17205
+ name: this.title,
17206
+ instructions: modelRequirements.systemMessage,
17207
+ knowledgeSources: modelRequirements.knowledgeSources,
17208
+ });
17209
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
17210
+ assistantId: assistant.assistantId,
17211
+ requirementsHash,
17212
+ });
17213
+ }
16973
17214
  }
16974
- // <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
16975
- const assistant = await this.options.llmTools.createNewAssistant({
16976
- name: this.title,
16977
- instructions: modelRequirements.systemMessage,
16978
- /*
16979
- !!!
16980
- metadata: {
16981
- agentModelName: this.modelName,
17215
+ else {
17216
+ if (this.options.isVerbose) {
17217
+ console.log(`1️⃣ Creating new OpenAI Assistant for agent ${this.title}...`);
16982
17218
  }
16983
- */
16984
- });
16985
- // <- TODO: !!! Cache the assistant in prepareCache
17219
+ // <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
17220
+ assistant = await this.options.llmTools.createNewAssistant({
17221
+ name: this.title,
17222
+ instructions: modelRequirements.systemMessage,
17223
+ knowledgeSources: modelRequirements.knowledgeSources,
17224
+ /*
17225
+ !!!
17226
+ metadata: {
17227
+ agentModelName: this.modelName,
17228
+ }
17229
+ */
17230
+ });
17231
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
17232
+ assistantId: assistant.assistantId,
17233
+ requirementsHash,
17234
+ });
17235
+ }
16986
17236
  underlyingLlmResult = await assistant.callChatModel(chatPrompt);
16987
17237
  }
16988
17238
  else {
16989
17239
  if (this.options.isVerbose) {
16990
- console.log(`Creating Assistant ${this.title} on generic LLM execution tools...`);
17240
+ console.log(`2️⃣ Creating Assistant ${this.title} on generic LLM execution tools...`);
16991
17241
  }
16992
17242
  // Create modified chat prompt with agent system message
16993
17243
  const modifiedChatPrompt = {
@@ -17017,6 +17267,10 @@
17017
17267
  return agentResult;
17018
17268
  }
17019
17269
  }
17270
+ /**
17271
+ * Cache of OpenAI assistants to avoid creating duplicates
17272
+ */
17273
+ AgentLlmExecutionTools.assistantCache = new Map();
17020
17274
  /**
17021
17275
  * TODO: [🍚] Implement Destroyable pattern to free resources
17022
17276
  * TODO: [🧠] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
@@ -17025,7 +17279,7 @@
17025
17279
  /**
17026
17280
  * Represents one AI Agent
17027
17281
  *
17028
- * Note: [🦖] There are several different things in Promptbook:
17282
+ * !!! Note: [🦖] There are several different things in Promptbook:
17029
17283
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17030
17284
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
17031
17285
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
@@ -17033,7 +17287,19 @@
17033
17287
  *
17034
17288
  * @public exported from `@promptbook/core`
17035
17289
  */
17036
- class Agent {
17290
+ class Agent extends AgentLlmExecutionTools {
17291
+ /**
17292
+ * Name of the agent
17293
+ */
17294
+ get agentName() {
17295
+ return this._agentName || createDefaultAgentName(this.agentSource.value);
17296
+ }
17297
+ /**
17298
+ * Computed hash of the agent source for integrity verification
17299
+ */
17300
+ get agentHash() {
17301
+ return computeAgentHash(this.agentSource.value);
17302
+ }
17037
17303
  /**
17038
17304
  * Not used in Agent, always returns empty array
17039
17305
  */
@@ -17043,11 +17309,13 @@
17043
17309
  ];
17044
17310
  }
17045
17311
  constructor(options) {
17046
- this.options = options;
17047
- /**
17048
- * Name of the agent
17049
- */
17050
- this.agentName = null;
17312
+ const agentSource = asUpdatableSubject(options.agentSource);
17313
+ super({
17314
+ isVerbose: options.isVerbose,
17315
+ llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
17316
+ agentSource: agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
17317
+ });
17318
+ this._agentName = undefined;
17051
17319
  /**
17052
17320
  * Description of the agent
17053
17321
  */
@@ -17056,27 +17324,16 @@
17056
17324
  * Metadata like image or color
17057
17325
  */
17058
17326
  this.meta = {};
17059
- this.agentSource = asUpdatableSubject(options.agentSource);
17327
+ // TODO: !!!!! Add `Agent` simple "mocked" learning by appending to agent source
17328
+ // TODO: !!!!! Add `Agent` learning by promptbookAgent
17329
+ this.agentSource = agentSource;
17060
17330
  this.agentSource.subscribe((source) => {
17061
17331
  const { agentName, personaDescription, meta } = parseAgentSource(source);
17062
- this.agentName = agentName;
17332
+ this._agentName = agentName;
17063
17333
  this.personaDescription = personaDescription;
17064
17334
  this.meta = { ...this.meta, ...meta };
17065
17335
  });
17066
17336
  }
17067
- /**
17068
- * Creates LlmExecutionTools which exposes the agent as a model
17069
- */
17070
- getLlmExecutionTools() {
17071
- const llmTools = new AgentLlmExecutionTools({
17072
- isVerbose: this.options.isVerbose,
17073
- llmTools: getSingleLlmExecutionTools(this.options.executionTools.llm),
17074
- agentSource: this.agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
17075
- });
17076
- // TODO: !!!! Add `Agent` simple "mocked" learning by appending to agent source
17077
- // TODO: !!!! Add `Agent` learning by promptbookAgent
17078
- return llmTools;
17079
- }
17080
17337
  }
17081
17338
  /**
17082
17339
  * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
@@ -17143,6 +17400,106 @@
17143
17400
  * Note: [💞] Ignore a discrepancy between file name and entity name
17144
17401
  */
17145
17402
 
17403
+ /**
17404
+ * Represents one AI Agent
17405
+ *
17406
+ * !!!!!! Note: [🦖] There are several different things in Promptbook:
17407
+ * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17408
+ * !!!!!! `RemoteAgent`
17409
+ * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
17410
+ * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17411
+ * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17412
+ *
17413
+ * @public exported from `@promptbook/core`
17414
+ */
17415
+ class RemoteAgent extends Agent {
17416
+ static async connect(options) {
17417
+ console.log('!!!!!', `${options.agentUrl}/api/book`);
17418
+ const bookResponse = await fetch(`${options.agentUrl}/api/book`);
17419
+ // <- TODO: !!!! What about closed-source agents?
17420
+ // <- TODO: !!!! Maybe use promptbookFetch
17421
+ const agentSourceValue = (await bookResponse.text());
17422
+ const agentSource = new rxjs.BehaviorSubject(agentSourceValue);
17423
+ // <- TODO: !!!! Support updating and self-updating
17424
+ return new RemoteAgent({
17425
+ ...options,
17426
+ executionTools: {
17427
+ /* Note: These tools are not used */
17428
+ // ---------------------------------------
17429
+ /*
17430
+ TODO: !!! Get rid of
17431
+
17432
+ > You have not provided any `LlmExecutionTools`
17433
+ > This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.
17434
+ >
17435
+ > Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
17436
+
17437
+ */
17438
+ },
17439
+ agentSource,
17440
+ });
17441
+ }
17442
+ constructor(options) {
17443
+ super(options);
17444
+ this.agentUrl = options.agentUrl;
17445
+ }
17446
+ /**
17447
+ * Calls the agent on agents remote server
17448
+ */
17449
+ async callChatModel(prompt) {
17450
+ // Ensure we're working with a chat prompt
17451
+ if (prompt.modelRequirements.modelVariant !== 'CHAT') {
17452
+ throw new Error('Agents only supports chat prompts');
17453
+ }
17454
+ const bookResponse = await fetch(`${this.agentUrl}/api/chat?message=${encodeURIComponent(prompt.content)}`);
17455
+ // <- TODO: !!!! What about closed-source agents?
17456
+ // <- TODO: !!!! Maybe use promptbookFetch
17457
+ let content = '';
17458
+ if (!bookResponse.body) {
17459
+ content = await bookResponse.text();
17460
+ }
17461
+ else {
17462
+ // Note: [🐚] Problem with streaming is not here but it is not implemented on server
17463
+ const decoder = new TextDecoder();
17464
+ // Web ReadableStream is not async-iterable in many runtimes; use a reader.
17465
+ const reader = bookResponse.body.getReader();
17466
+ try {
17467
+ let doneReading = false;
17468
+ while (!doneReading) {
17469
+ const { done, value } = await reader.read();
17470
+ doneReading = !!done;
17471
+ if (value) {
17472
+ const textChunk = decoder.decode(value, { stream: true });
17473
+ // console.debug('RemoteAgent chunk:', textChunk);
17474
+ content += textChunk;
17475
+ }
17476
+ }
17477
+ // Flush any remaining decoder internal state
17478
+ content += decoder.decode();
17479
+ }
17480
+ finally {
17481
+ reader.releaseLock();
17482
+ }
17483
+ }
17484
+ // <- TODO: !!!! Transfer metadata
17485
+ const agentResult = {
17486
+ content,
17487
+ modelName: this.modelName,
17488
+ timing: {},
17489
+ usage: {},
17490
+ rawPromptContent: {},
17491
+ rawRequest: {},
17492
+ rawResponse: {},
17493
+ // <- TODO: !!!! Transfer and proxy the metadata
17494
+ };
17495
+ return agentResult;
17496
+ }
17497
+ }
17498
+ /**
17499
+ * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
17500
+ * TODO: !!! Agent on remote server
17501
+ */
17502
+
17146
17503
  /**
17147
17504
  * Registration of LLM provider metadata
17148
17505
  *
@@ -17264,24 +17621,6 @@
17264
17621
  * Note: [💞] Ignore a discrepancy between file name and entity name
17265
17622
  */
17266
17623
 
17267
- /**
17268
- * Detects if the code is running in jest environment
17269
- *
17270
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
17271
- *
17272
- * @public exported from `@promptbook/utils`
17273
- */
17274
- const $isRunningInJest = new Function(`
17275
- try {
17276
- return process.env.JEST_WORKER_ID !== undefined;
17277
- } catch (e) {
17278
- return false;
17279
- }
17280
- `);
17281
- /**
17282
- * TODO: [🎺]
17283
- */
17284
-
17285
17624
  /**
17286
17625
  * Registration of LLM provider metadata
17287
17626
  *
@@ -17634,61 +17973,6 @@
17634
17973
  * TODO: [🧠][🈴] Where is the best location for this file
17635
17974
  */
17636
17975
 
17637
- /**
17638
- * Tag function for notating a prompt as template literal
17639
- *
17640
- * Note: There are 3 similar functions:
17641
- * 1) `prompt` for notating single prompt exported from `@promptbook/utils`
17642
- * 2) `promptTemplate` alias for `prompt`
17643
- * 3) `book` for notating and validating entire books exported from `@promptbook/utils`
17644
- *
17645
- * @param strings
17646
- * @param values
17647
- * @returns the prompt string
17648
- * @public exported from `@promptbook/utils`
17649
- */
17650
- function prompt(strings, ...values) {
17651
- if (values.length === 0) {
17652
- return spaceTrim__default["default"](strings.join(''));
17653
- }
17654
- const stringsWithHiddenParameters = strings.map((stringsItem) =>
17655
- // TODO: [0] DRY
17656
- stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
17657
- const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
17658
- const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
17659
- // Combine strings and values
17660
- let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
17661
- ? `${result}${stringsItem}`
17662
- : `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
17663
- pipelineString = spaceTrim__default["default"](pipelineString);
17664
- try {
17665
- pipelineString = templateParameters(pipelineString, parameters);
17666
- }
17667
- catch (error) {
17668
- if (!(error instanceof PipelineExecutionError)) {
17669
- throw error;
17670
- }
17671
- console.error({ pipelineString, parameters, placeholderParameterNames, error });
17672
- throw new UnexpectedError(spaceTrim__default["default"]((block) => `
17673
- Internal error in prompt template literal
17674
-
17675
- ${block(JSON.stringify({ strings, values }, null, 4))}}
17676
-
17677
- `));
17678
- }
17679
- // TODO: [0] DRY
17680
- pipelineString = pipelineString
17681
- .split(`${REPLACING_NONCE}beginbracket`)
17682
- .join('{')
17683
- .split(`${REPLACING_NONCE}endbracket`)
17684
- .join('}');
17685
- return pipelineString;
17686
- }
17687
- /**
17688
- * TODO: [🧠][🈴] Where is the best location for this file
17689
- * Note: [💞] Ignore a discrepancy between file name and entity name
17690
- */
17691
-
17692
17976
  /**
17693
17977
  * Tag function for notating a pipeline with a book\`...\ notation as template literal
17694
17978
  *
@@ -18224,7 +18508,7 @@
18224
18508
  });
18225
18509
 
18226
18510
  const answer = response.choices[0].message.content;
18227
- console.log('\\n🧠 ${agentName}:', answer, '\\n');
18511
+ console.log('\\n🧠 ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
18228
18512
 
18229
18513
  chatHistory.push({ role: 'assistant', content: answer });
18230
18514
  promptUser();
@@ -18243,7 +18527,7 @@
18243
18527
 
18244
18528
  (async () => {
18245
18529
  await setupKnowledge();
18246
- console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
18530
+ console.log("🤖 Chat with ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
18247
18531
  promptUser();
18248
18532
  })();
18249
18533
  `);
@@ -18290,7 +18574,7 @@
18290
18574
  });
18291
18575
 
18292
18576
  const answer = response.choices[0].message.content;
18293
- console.log('\\n🧠 ${agentName}:', answer, '\\n');
18577
+ console.log('\\n🧠 ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
18294
18578
 
18295
18579
  chatHistory.push({ role: 'assistant', content: answer });
18296
18580
  promptUser();
@@ -18307,7 +18591,7 @@
18307
18591
  });
18308
18592
  }
18309
18593
 
18310
- console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
18594
+ console.log("🤖 Chat with ${agentName /* <- TODO: [🕛] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
18311
18595
  promptUser();
18312
18596
 
18313
18597
  `);
@@ -18315,25 +18599,6 @@
18315
18599
  },
18316
18600
  };
18317
18601
 
18318
- /**
18319
- * Returns information about the current runtime environment
18320
- *
18321
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
18322
- *
18323
- * @public exported from `@promptbook/utils`
18324
- */
18325
- function $detectRuntimeEnvironment() {
18326
- return {
18327
- isRunningInBrowser: $isRunningInBrowser(),
18328
- isRunningInJest: $isRunningInJest(),
18329
- isRunningInNode: $isRunningInNode(),
18330
- isRunningInWebWorker: $isRunningInWebWorker(),
18331
- };
18332
- }
18333
- /**
18334
- * TODO: [🎺] Also detect and report node version here
18335
- */
18336
-
18337
18602
  /**
18338
18603
  * Provide information about Promptbook, engine version, book language version, servers, ...
18339
18604
  *
@@ -18361,8 +18626,7 @@
18361
18626
 
18362
18627
  ## Servers
18363
18628
 
18364
- ${block(REMOTE_SERVER_URLS.map(({ title, urls, isAnonymousModeAllowed, description }, index) => `${index + 1}. ${title} ${description}
18365
- ${isAnonymousModeAllowed ? '🐱‍💻 ' : ''} ${urls.join(', ')}
18629
+ ${block(REMOTE_SERVER_URLS.map(({ title, urls, description }, index) => `${index + 1}. ${title} ${description} ${urls.join(', ')}
18366
18630
  `).join('\n'))}
18367
18631
  `);
18368
18632
  fullInfoPieces.push(serversInfo);
@@ -18405,6 +18669,30 @@
18405
18669
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
18406
18670
  */
18407
18671
 
18672
+ const PERSONALITIES = [
18673
+ 'Friendly and helpful AI agent.',
18674
+ 'Professional and efficient virtual assistant.',
18675
+ 'Creative and imaginative digital companion.',
18676
+ 'Knowledgeable and informative AI guide.',
18677
+ 'Empathetic and understanding support bot.',
18678
+ 'Energetic and enthusiastic conversational partner.',
18679
+ 'Calm and patient virtual helper.',
18680
+ 'Curious and inquisitive AI explorer.',
18681
+ 'Witty and humorous digital friend.',
18682
+ 'Serious and focused AI consultant.',
18683
+ ];
18684
+ /**
18685
+ * @@@@
18686
+ *
18687
+ * @private internal helper function
18688
+ */
18689
+ function $randomAgentPersona() {
18690
+ return $randomItem(...PERSONALITIES);
18691
+ }
18692
+ /**
18693
+ * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
18694
+ */
18695
+
18408
18696
  const FIRSTNAMES = [
18409
18697
  'Paul',
18410
18698
  'George',
@@ -18465,30 +18753,6 @@
18465
18753
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
18466
18754
  */
18467
18755
 
18468
- const PERSONALITIES = [
18469
- 'Friendly and helpful AI agent.',
18470
- 'Professional and efficient virtual assistant.',
18471
- 'Creative and imaginative digital companion.',
18472
- 'Knowledgeable and informative AI guide.',
18473
- 'Empathetic and understanding support bot.',
18474
- 'Energetic and enthusiastic conversational partner.',
18475
- 'Calm and patient virtual helper.',
18476
- 'Curious and inquisitive AI explorer.',
18477
- 'Witty and humorous digital friend.',
18478
- 'Serious and focused AI consultant.',
18479
- ];
18480
- /**
18481
- * @@@@
18482
- *
18483
- * @private internal helper function
18484
- */
18485
- function $randomAgentPersona() {
18486
- return $randomItem(...PERSONALITIES);
18487
- }
18488
- /**
18489
- * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
18490
- */
18491
-
18492
18756
  /**
18493
18757
  * Generates boilerplate for a new agent book
18494
18758
  *
@@ -18513,7 +18777,7 @@
18513
18777
  const agentSource = validateBook(spaceTrim__default["default"]((block) => `
18514
18778
  ${agentName}
18515
18779
 
18516
- META COLOR ${color || '#3498db' /* <- TODO: !!!! Best default color */}
18780
+ META COLOR ${color || '#3498db' /* <- TODO: [🧠] !!!! Best default color */}
18517
18781
  PERSONA ${block(personaDescription)}
18518
18782
  `));
18519
18783
  return agentSource;
@@ -18620,6 +18884,7 @@
18620
18884
  exports.PromptbookFetchError = PromptbookFetchError;
18621
18885
  exports.REMOTE_SERVER_URLS = REMOTE_SERVER_URLS;
18622
18886
  exports.RESERVED_PARAMETER_NAMES = RESERVED_PARAMETER_NAMES;
18887
+ exports.RemoteAgent = RemoteAgent;
18623
18888
  exports.SET_IS_VERBOSE = SET_IS_VERBOSE;
18624
18889
  exports.SectionTypes = SectionTypes;
18625
18890
  exports.SheetsFormfactorDefinition = SheetsFormfactorDefinition;
@@ -18655,12 +18920,14 @@
18655
18920
  exports.book = book;
18656
18921
  exports.cacheLlmTools = cacheLlmTools;
18657
18922
  exports.compilePipeline = compilePipeline;
18923
+ exports.computeAgentHash = computeAgentHash;
18658
18924
  exports.computeCosineSimilarity = computeCosineSimilarity;
18659
18925
  exports.countUsage = countUsage;
18660
18926
  exports.createAgentLlmExecutionTools = createAgentLlmExecutionTools;
18661
18927
  exports.createAgentModelRequirements = createAgentModelRequirements;
18662
18928
  exports.createAgentModelRequirementsWithCommitments = createAgentModelRequirementsWithCommitments;
18663
18929
  exports.createBasicAgentModelRequirements = createBasicAgentModelRequirements;
18930
+ exports.createDefaultAgentName = createDefaultAgentName;
18664
18931
  exports.createEmptyAgentModelRequirements = createEmptyAgentModelRequirements;
18665
18932
  exports.createLlmToolsFromConfiguration = createLlmToolsFromConfiguration;
18666
18933
  exports.createPipelineCollectionFromJson = createPipelineCollectionFromJson;
@@ -18690,6 +18957,7 @@
18690
18957
  exports.limitTotalUsage = limitTotalUsage;
18691
18958
  exports.makeKnowledgeSourceHandler = makeKnowledgeSourceHandler;
18692
18959
  exports.migratePipeline = migratePipeline;
18960
+ exports.normalizeAgentName = normalizeAgentName;
18693
18961
  exports.padBook = padBook;
18694
18962
  exports.parseAgentSource = parseAgentSource;
18695
18963
  exports.parseParameters = parseParameters;