@promptbook/core 0.103.0-47 → 0.103.0-49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/esm/index.es.js +1287 -876
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/servers.d.ts +1 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +17 -3
  8. package/esm/typings/src/book-2.0/agent-source/AgentSourceParseResult.d.ts +2 -1
  9. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.d.ts +8 -0
  10. package/esm/typings/src/book-2.0/agent-source/computeAgentHash.test.d.ts +1 -0
  11. package/esm/typings/src/book-2.0/agent-source/createDefaultAgentName.d.ts +8 -0
  12. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.d.ts +9 -0
  13. package/esm/typings/src/book-2.0/agent-source/normalizeAgentName.test.d.ts +1 -0
  14. package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.d.ts +1 -1
  15. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +14 -8
  16. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabaseOptions.d.ts +10 -0
  17. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +57 -32
  18. package/esm/typings/src/commitments/MESSAGE/InitialMessageCommitmentDefinition.d.ts +28 -0
  19. package/esm/typings/src/commitments/index.d.ts +2 -1
  20. package/esm/typings/src/config.d.ts +1 -0
  21. package/esm/typings/src/errors/DatabaseError.d.ts +2 -2
  22. package/esm/typings/src/errors/WrappedError.d.ts +2 -2
  23. package/esm/typings/src/execution/ExecutionTask.d.ts +2 -2
  24. package/esm/typings/src/execution/LlmExecutionTools.d.ts +6 -1
  25. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +2 -2
  26. package/esm/typings/src/llm-providers/_common/utils/assertUniqueModels.d.ts +12 -0
  27. package/esm/typings/src/llm-providers/agent/Agent.d.ts +17 -4
  28. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +10 -1
  29. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +6 -2
  30. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +30 -4
  31. package/esm/typings/src/llm-providers/openai/openai-models.test.d.ts +4 -0
  32. package/esm/typings/src/remote-server/startAgentServer.d.ts +2 -2
  33. package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
  34. package/esm/typings/src/transpilers/openai-sdk/register.d.ts +1 -1
  35. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  36. package/esm/typings/src/utils/color/Color.d.ts +7 -0
  37. package/esm/typings/src/utils/color/Color.test.d.ts +1 -0
  38. package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -2
  39. package/esm/typings/src/utils/misc/computeHash.d.ts +11 -0
  40. package/esm/typings/src/utils/misc/computeHash.test.d.ts +1 -0
  41. package/esm/typings/src/utils/normalization/normalize-to-kebab-case.d.ts +2 -0
  42. package/esm/typings/src/utils/normalization/normalizeTo_PascalCase.d.ts +3 -0
  43. package/esm/typings/src/utils/normalization/normalizeTo_camelCase.d.ts +2 -0
  44. package/esm/typings/src/utils/normalization/titleToName.d.ts +2 -0
  45. package/esm/typings/src/utils/organization/$sideEffect.d.ts +2 -2
  46. package/esm/typings/src/utils/organization/$side_effect.d.ts +2 -2
  47. package/esm/typings/src/utils/organization/TODO_USE.d.ts +2 -2
  48. package/esm/typings/src/utils/organization/keepUnused.d.ts +2 -2
  49. package/esm/typings/src/utils/organization/preserve.d.ts +3 -3
  50. package/esm/typings/src/utils/organization/really_any.d.ts +7 -0
  51. package/esm/typings/src/utils/serialization/asSerializable.d.ts +2 -2
  52. package/esm/typings/src/version.d.ts +1 -1
  53. package/package.json +1 -1
  54. package/umd/index.umd.js +1291 -877
  55. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -1,11 +1,11 @@
1
+ import { SHA256 } from 'crypto-js';
2
+ import hexEncoder from 'crypto-js/enc-hex';
1
3
  import spaceTrim$1, { spaceTrim as spaceTrim$2 } from 'spacetrim';
2
4
  import { randomBytes } from 'crypto';
3
5
  import { Subject, BehaviorSubject } from 'rxjs';
4
- import { forTime, forEver } from 'waitasecond';
5
- import hexEncoder from 'crypto-js/enc-hex';
6
+ import { forTime } from 'waitasecond';
6
7
  import sha256 from 'crypto-js/sha256';
7
8
  import { basename, join, dirname, isAbsolute } from 'path';
8
- import { SHA256 } from 'crypto-js';
9
9
  import { lookup, extension } from 'mime-types';
10
10
  import { parse, unparse } from 'papaparse';
11
11
  import moment from 'moment';
@@ -27,131 +27,12 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-47';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-49';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
34
34
  */
35
35
 
36
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [šŸ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [šŸ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"āœ Convert Knowledge-piece to title\" but \"āœ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"āœ Convert Knowledge-piece to title\" but \"āœ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
37
-
38
- /**
39
- * Checks if value is valid email
40
- *
41
- * @public exported from `@promptbook/utils`
42
- */
43
- function isValidEmail(email) {
44
- if (typeof email !== 'string') {
45
- return false;
46
- }
47
- if (email.split('\n').length > 1) {
48
- return false;
49
- }
50
- return /^.+@.+\..+$/.test(email);
51
- }
52
-
53
- /**
54
- * Tests if given string is valid file path.
55
- *
56
- * Note: This does not check if the file exists only if the path is valid
57
- * @public exported from `@promptbook/utils`
58
- */
59
- function isValidFilePath(filename) {
60
- if (typeof filename !== 'string') {
61
- return false;
62
- }
63
- if (filename.split('\n').length > 1) {
64
- return false;
65
- }
66
- // Normalize slashes early so heuristics can detect path-like inputs
67
- const filenameSlashes = filename.replace(/\\/g, '/');
68
- // Reject strings that look like sentences (informational text)
69
- // Heuristic: contains multiple spaces and ends with a period, or contains typical sentence punctuation
70
- // But skip this heuristic if the string looks like a path (contains '/' or starts with a drive letter)
71
- if (filename.trim().length > 60 && // long enough to be a sentence
72
- /[.!?]/.test(filename) && // contains sentence punctuation
73
- filename.split(' ').length > 8 && // has many words
74
- !/\/|^[A-Z]:/i.test(filenameSlashes) // do NOT treat as sentence if looks like a path
75
- ) {
76
- return false;
77
- }
78
- // Absolute Unix path: /hello.txt
79
- if (/^(\/)/i.test(filenameSlashes)) {
80
- // console.log(filename, 'Absolute Unix path: /hello.txt');
81
- return true;
82
- }
83
- // Absolute Windows path: C:/ or C:\ (allow spaces and multiple dots in filename)
84
- if (/^[A-Z]:\/.+$/i.test(filenameSlashes)) {
85
- // console.log(filename, 'Absolute Windows path: /hello.txt');
86
- return true;
87
- }
88
- // Relative path: ./hello.txt
89
- if (/^(\.\.?\/)+/i.test(filenameSlashes)) {
90
- // console.log(filename, 'Relative path: ./hello.txt');
91
- return true;
92
- }
93
- // Allow paths like foo/hello
94
- if (/^[^/]+\/[^/]+/i.test(filenameSlashes)) {
95
- // console.log(filename, 'Allow paths like foo/hello');
96
- return true;
97
- }
98
- // Allow paths like hello.book
99
- if (/^[^/]+\.[^/]+$/i.test(filenameSlashes)) {
100
- // console.log(filename, 'Allow paths like hello.book');
101
- return true;
102
- }
103
- return false;
104
- }
105
- /**
106
- * TODO: [šŸ] Implement for MacOs
107
- */
108
-
109
- /**
110
- * Tests if given string is valid URL.
111
- *
112
- * Note: [šŸ”‚] This function is idempotent.
113
- * Note: Dataurl are considered perfectly valid.
114
- * Note: There are two similar functions:
115
- * - `isValidUrl` which tests any URL
116
- * - `isValidPipelineUrl` *(this one)* which tests just promptbook URL
117
- *
118
- * @public exported from `@promptbook/utils`
119
- */
120
- function isValidUrl(url) {
121
- if (typeof url !== 'string') {
122
- return false;
123
- }
124
- try {
125
- if (url.startsWith('blob:')) {
126
- url = url.replace(/^blob:/, '');
127
- }
128
- const urlObject = new URL(url /* because fail is handled */);
129
- if (!['http:', 'https:', 'data:'].includes(urlObject.protocol)) {
130
- return false;
131
- }
132
- return true;
133
- }
134
- catch (error) {
135
- return false;
136
- }
137
- }
138
-
139
- /**
140
- * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
141
- *
142
- * @public exported from `@promptbook/core`
143
- */
144
- class ParseError extends Error {
145
- constructor(message) {
146
- super(message);
147
- this.name = 'ParseError';
148
- Object.setPrototypeOf(this, ParseError.prototype);
149
- }
150
- }
151
- /**
152
- * TODO: Maybe split `ParseError` and `ApplyError`
153
- */
154
-
155
36
  /**
156
37
  * Available remote servers for the Promptbook
157
38
  *
@@ -185,6 +66,7 @@ const REMOTE_SERVER_URLS = [
185
66
  */
186
67
  ];
187
68
  /**
69
+ * TODO: [šŸ±ā€šŸš€] Auto-federated server from url in here
188
70
  * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
189
71
  */
190
72
 
@@ -518,6 +400,9 @@ class Color {
518
400
  if (hex.length === 3) {
519
401
  return Color.fromHex3(hex);
520
402
  }
403
+ if (hex.length === 4) {
404
+ return Color.fromHex4(hex);
405
+ }
521
406
  if (hex.length === 6) {
522
407
  return Color.fromHex6(hex);
523
408
  }
@@ -538,6 +423,19 @@ class Color {
538
423
  const b = parseInt(hex.substr(2, 1), 16) * 16;
539
424
  return take(new Color(r, g, b));
540
425
  }
426
+ /**
427
+ * Creates a new Color instance from color in hex format with 4 digits (with alpha channel)
428
+ *
429
+ * @param color in hex for example `09df`
430
+ * @returns Color object
431
+ */
432
+ static fromHex4(hex) {
433
+ const r = parseInt(hex.substr(0, 1), 16) * 16;
434
+ const g = parseInt(hex.substr(1, 1), 16) * 16;
435
+ const b = parseInt(hex.substr(2, 1), 16) * 16;
436
+ const a = parseInt(hex.substr(3, 1), 16) * 16;
437
+ return take(new Color(r, g, b, a));
438
+ }
541
439
  /**
542
440
  * Creates a new Color instance from color in hex format with 6 color digits (without alpha channel)
543
441
  *
@@ -728,7 +626,8 @@ class Color {
728
626
  * @returns true if the value is a valid hex color string (e.g., `#009edd`, `#fff`, etc.)
729
627
  */
730
628
  static isHexColorString(value) {
731
- return typeof value === 'string' && /^#(?:[0-9a-fA-F]{3}){1,2}$/.test(value);
629
+ return (typeof value === 'string' &&
630
+ /^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$/.test(value));
732
631
  }
733
632
  /**
734
633
  * Creates new Color object
@@ -1069,6 +968,7 @@ const PROMPTBOOK_COLOR = Color.fromHex('#79EAFD');
1069
968
  const PROMPTBOOK_SYNTAX_COLORS = {
1070
969
  TITLE: Color.fromHex('#244EA8'),
1071
970
  LINE: Color.fromHex('#eeeeee'),
971
+ SEPARATOR: Color.fromHex('#cccccc'),
1072
972
  COMMITMENT: Color.fromHex('#DA0F78'),
1073
973
  PARAMETER: Color.fromHex('#8e44ad'),
1074
974
  };
@@ -1519,86 +1419,307 @@ function assertsError(whatWasThrown) {
1519
1419
  }
1520
1420
 
1521
1421
  /**
1522
- * Function isValidJsonString will tell you if the string is valid JSON or not
1523
- *
1524
- * @param value The string to check
1525
- * @returns `true` if the string is a valid JSON string, false otherwise
1422
+ * Format either small or big number
1526
1423
  *
1527
1424
  * @public exported from `@promptbook/utils`
1528
1425
  */
1529
- function isValidJsonString(value /* <- [šŸ‘Øā€āš–ļø] */) {
1530
- try {
1531
- JSON.parse(value);
1532
- return true;
1426
+ function numberToString(value) {
1427
+ if (value === 0) {
1428
+ return '0';
1533
1429
  }
1534
- catch (error) {
1535
- assertsError(error);
1536
- if (error.message.includes('Unexpected token')) {
1537
- return false;
1430
+ else if (Number.isNaN(value)) {
1431
+ return VALUE_STRINGS.nan;
1432
+ }
1433
+ else if (value === Infinity) {
1434
+ return VALUE_STRINGS.infinity;
1435
+ }
1436
+ else if (value === -Infinity) {
1437
+ return VALUE_STRINGS.negativeInfinity;
1438
+ }
1439
+ for (let exponent = 0; exponent < 15; exponent++) {
1440
+ const factor = 10 ** exponent;
1441
+ const valueRounded = Math.round(value * factor) / factor;
1442
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
1443
+ return valueRounded.toFixed(exponent);
1538
1444
  }
1539
- return false;
1540
1445
  }
1446
+ return value.toString();
1541
1447
  }
1542
1448
 
1543
1449
  /**
1544
- * Function `validatePipelineString` will validate the if the string is a valid pipeline string
1545
- * It does not check if the string is fully logically correct, but if it is a string that can be a pipeline string or the string looks completely different.
1450
+ * Function `valueToString` will convert the given value to string
1451
+ * This is useful and used in the `templateParameters` function
1546
1452
  *
1547
- * Note: [šŸ”‚] This function is idempotent.
1453
+ * Note: This function is not just calling `toString` method
1454
+ * It's more complex and can handle this conversion specifically for LLM models
1455
+ * See `VALUE_STRINGS`
1548
1456
  *
1549
- * @param {string} pipelineString the candidate for a pipeline string
1550
- * @returns {PipelineString} the same string as input, but validated as valid
1551
- * @throws {ParseError} if the string is not a valid pipeline string
1552
- * @public exported from `@promptbook/core`
1457
+ * Note: There are 2 similar functions
1458
+ * - `valueToString` converts value to string for LLM models as human-readable string
1459
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
1460
+ *
1461
+ * @public exported from `@promptbook/utils`
1553
1462
  */
1554
- function validatePipelineString(pipelineString) {
1555
- if (isValidJsonString(pipelineString)) {
1556
- throw new ParseError('Expected a book, but got a JSON string');
1557
- }
1558
- else if (isValidUrl(pipelineString)) {
1559
- throw new ParseError(`Expected a book, but got just the URL "${pipelineString}"`);
1560
- }
1561
- else if (isValidFilePath(pipelineString)) {
1562
- throw new ParseError(`Expected a book, but got just the file path "${pipelineString}"`);
1463
+ function valueToString(value) {
1464
+ try {
1465
+ if (value === '') {
1466
+ return VALUE_STRINGS.empty;
1467
+ }
1468
+ else if (value === null) {
1469
+ return VALUE_STRINGS.null;
1470
+ }
1471
+ else if (value === undefined) {
1472
+ return VALUE_STRINGS.undefined;
1473
+ }
1474
+ else if (typeof value === 'string') {
1475
+ return value;
1476
+ }
1477
+ else if (typeof value === 'number') {
1478
+ return numberToString(value);
1479
+ }
1480
+ else if (value instanceof Date) {
1481
+ return value.toISOString();
1482
+ }
1483
+ else {
1484
+ try {
1485
+ return JSON.stringify(value);
1486
+ }
1487
+ catch (error) {
1488
+ if (error instanceof TypeError && error.message.includes('circular structure')) {
1489
+ return VALUE_STRINGS.circular;
1490
+ }
1491
+ throw error;
1492
+ }
1493
+ }
1563
1494
  }
1564
- else if (isValidEmail(pipelineString)) {
1565
- throw new ParseError(`Expected a book, but got just the email "${pipelineString}"`);
1495
+ catch (error) {
1496
+ assertsError(error);
1497
+ console.error(error);
1498
+ return VALUE_STRINGS.unserializable;
1566
1499
  }
1567
- // <- TODO: Implement the validation + add tests when the pipeline logic considered as invalid
1568
- return pipelineString;
1569
1500
  }
1570
- /**
1571
- * TODO: [🧠][🈓] Where is the best location for this file
1572
- */
1573
1501
 
1574
1502
  /**
1575
- * Prettify the html code
1503
+ * Computes SHA-256 hash of the given object
1576
1504
  *
1577
- * @param content raw html code
1578
- * @returns formatted html code
1579
- * @private withing the package because of HUGE size of prettier dependency
1580
- * @deprecated Prettier removed from Promptbook due to package size
1505
+ * @public exported from `@promptbook/utils`
1581
1506
  */
1582
- function prettifyMarkdown(content) {
1583
- return (content + `\n\n<!-- Note: Prettier removed from Promptbook -->`);
1507
+ function computeHash(value) {
1508
+ return SHA256(hexEncoder.parse(spaceTrim$1(valueToString(value)))).toString( /* hex */);
1584
1509
  }
1510
+ /**
1511
+ * TODO: [🄬][🄬] Use this ACRY
1512
+ */
1585
1513
 
1586
1514
  /**
1587
- * Makes first letter of a string uppercase
1515
+ * Computes SHA-256 hash of the agent source
1588
1516
  *
1589
- * Note: [šŸ”‚] This function is idempotent.
1590
- *
1591
- * @public exported from `@promptbook/utils`
1517
+ * @public exported from `@promptbook/core`
1592
1518
  */
1593
- function capitalize(word) {
1594
- return word.substring(0, 1).toUpperCase() + word.substring(1);
1519
+ function computeAgentHash(agentSource) {
1520
+ return computeHash(agentSource);
1595
1521
  }
1596
1522
 
1523
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [šŸ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [šŸ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"āœ Convert Knowledge-piece to title\" but \"āœ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"āœ Convert Knowledge-piece to title\" but \"āœ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1524
+
1597
1525
  /**
1598
- * Converts promptbook in JSON format to string format
1526
+ * Checks if value is valid email
1599
1527
  *
1600
- * @deprecated TODO: [šŸ„][🧠] Backup original files in `PipelineJson` same as in Promptbook.studio
1601
- * @param pipelineJson Promptbook in JSON format (.bookc)
1528
+ * @public exported from `@promptbook/utils`
1529
+ */
1530
+ function isValidEmail(email) {
1531
+ if (typeof email !== 'string') {
1532
+ return false;
1533
+ }
1534
+ if (email.split('\n').length > 1) {
1535
+ return false;
1536
+ }
1537
+ return /^.+@.+\..+$/.test(email);
1538
+ }
1539
+
1540
+ /**
1541
+ * Tests if given string is valid file path.
1542
+ *
1543
+ * Note: This does not check if the file exists only if the path is valid
1544
+ * @public exported from `@promptbook/utils`
1545
+ */
1546
+ function isValidFilePath(filename) {
1547
+ if (typeof filename !== 'string') {
1548
+ return false;
1549
+ }
1550
+ if (filename.split('\n').length > 1) {
1551
+ return false;
1552
+ }
1553
+ // Normalize slashes early so heuristics can detect path-like inputs
1554
+ const filenameSlashes = filename.replace(/\\/g, '/');
1555
+ // Reject strings that look like sentences (informational text)
1556
+ // Heuristic: contains multiple spaces and ends with a period, or contains typical sentence punctuation
1557
+ // But skip this heuristic if the string looks like a path (contains '/' or starts with a drive letter)
1558
+ if (filename.trim().length > 60 && // long enough to be a sentence
1559
+ /[.!?]/.test(filename) && // contains sentence punctuation
1560
+ filename.split(' ').length > 8 && // has many words
1561
+ !/\/|^[A-Z]:/i.test(filenameSlashes) // do NOT treat as sentence if looks like a path
1562
+ ) {
1563
+ return false;
1564
+ }
1565
+ // Absolute Unix path: /hello.txt
1566
+ if (/^(\/)/i.test(filenameSlashes)) {
1567
+ // console.log(filename, 'Absolute Unix path: /hello.txt');
1568
+ return true;
1569
+ }
1570
+ // Absolute Windows path: C:/ or C:\ (allow spaces and multiple dots in filename)
1571
+ if (/^[A-Z]:\/.+$/i.test(filenameSlashes)) {
1572
+ // console.log(filename, 'Absolute Windows path: /hello.txt');
1573
+ return true;
1574
+ }
1575
+ // Relative path: ./hello.txt
1576
+ if (/^(\.\.?\/)+/i.test(filenameSlashes)) {
1577
+ // console.log(filename, 'Relative path: ./hello.txt');
1578
+ return true;
1579
+ }
1580
+ // Allow paths like foo/hello
1581
+ if (/^[^/]+\/[^/]+/i.test(filenameSlashes)) {
1582
+ // console.log(filename, 'Allow paths like foo/hello');
1583
+ return true;
1584
+ }
1585
+ // Allow paths like hello.book
1586
+ if (/^[^/]+\.[^/]+$/i.test(filenameSlashes)) {
1587
+ // console.log(filename, 'Allow paths like hello.book');
1588
+ return true;
1589
+ }
1590
+ return false;
1591
+ }
1592
+ /**
1593
+ * TODO: [šŸ] Implement for MacOs
1594
+ */
1595
+
1596
+ /**
1597
+ * Tests if given string is valid URL.
1598
+ *
1599
+ * Note: [šŸ”‚] This function is idempotent.
1600
+ * Note: Dataurl are considered perfectly valid.
1601
+ * Note: There are two similar functions:
1602
+ * - `isValidUrl` which tests any URL
1603
+ * - `isValidPipelineUrl` *(this one)* which tests just promptbook URL
1604
+ *
1605
+ * @public exported from `@promptbook/utils`
1606
+ */
1607
+ function isValidUrl(url) {
1608
+ if (typeof url !== 'string') {
1609
+ return false;
1610
+ }
1611
+ try {
1612
+ if (url.startsWith('blob:')) {
1613
+ url = url.replace(/^blob:/, '');
1614
+ }
1615
+ const urlObject = new URL(url /* because fail is handled */);
1616
+ if (!['http:', 'https:', 'data:'].includes(urlObject.protocol)) {
1617
+ return false;
1618
+ }
1619
+ return true;
1620
+ }
1621
+ catch (error) {
1622
+ return false;
1623
+ }
1624
+ }
1625
+
1626
+ /**
1627
+ * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
1628
+ *
1629
+ * @public exported from `@promptbook/core`
1630
+ */
1631
+ class ParseError extends Error {
1632
+ constructor(message) {
1633
+ super(message);
1634
+ this.name = 'ParseError';
1635
+ Object.setPrototypeOf(this, ParseError.prototype);
1636
+ }
1637
+ }
1638
+ /**
1639
+ * TODO: Maybe split `ParseError` and `ApplyError`
1640
+ */
1641
+
1642
+ /**
1643
+ * Function isValidJsonString will tell you if the string is valid JSON or not
1644
+ *
1645
+ * @param value The string to check
1646
+ * @returns `true` if the string is a valid JSON string, false otherwise
1647
+ *
1648
+ * @public exported from `@promptbook/utils`
1649
+ */
1650
+ function isValidJsonString(value /* <- [šŸ‘Øā€āš–ļø] */) {
1651
+ try {
1652
+ JSON.parse(value);
1653
+ return true;
1654
+ }
1655
+ catch (error) {
1656
+ assertsError(error);
1657
+ if (error.message.includes('Unexpected token')) {
1658
+ return false;
1659
+ }
1660
+ return false;
1661
+ }
1662
+ }
1663
+
1664
+ /**
1665
+ * Function `validatePipelineString` will validate the if the string is a valid pipeline string
1666
+ * It does not check if the string is fully logically correct, but if it is a string that can be a pipeline string or the string looks completely different.
1667
+ *
1668
+ * Note: [šŸ”‚] This function is idempotent.
1669
+ *
1670
+ * @param {string} pipelineString the candidate for a pipeline string
1671
+ * @returns {PipelineString} the same string as input, but validated as valid
1672
+ * @throws {ParseError} if the string is not a valid pipeline string
1673
+ * @public exported from `@promptbook/core`
1674
+ */
1675
+ function validatePipelineString(pipelineString) {
1676
+ if (isValidJsonString(pipelineString)) {
1677
+ throw new ParseError('Expected a book, but got a JSON string');
1678
+ }
1679
+ else if (isValidUrl(pipelineString)) {
1680
+ throw new ParseError(`Expected a book, but got just the URL "${pipelineString}"`);
1681
+ }
1682
+ else if (isValidFilePath(pipelineString)) {
1683
+ throw new ParseError(`Expected a book, but got just the file path "${pipelineString}"`);
1684
+ }
1685
+ else if (isValidEmail(pipelineString)) {
1686
+ throw new ParseError(`Expected a book, but got just the email "${pipelineString}"`);
1687
+ }
1688
+ // <- TODO: Implement the validation + add tests when the pipeline logic considered as invalid
1689
+ return pipelineString;
1690
+ }
1691
+ /**
1692
+ * TODO: [🧠][🈓] Where is the best location for this file
1693
+ */
1694
+
1695
+ /**
1696
+ * Prettify the html code
1697
+ *
1698
+ * @param content raw html code
1699
+ * @returns formatted html code
1700
+ * @private withing the package because of HUGE size of prettier dependency
1701
+ * @deprecated Prettier removed from Promptbook due to package size
1702
+ */
1703
+ function prettifyMarkdown(content) {
1704
+ return (content + `\n\n<!-- Note: Prettier removed from Promptbook -->`);
1705
+ }
1706
+
1707
+ /**
1708
+ * Makes first letter of a string uppercase
1709
+ *
1710
+ * Note: [šŸ”‚] This function is idempotent.
1711
+ *
1712
+ * @public exported from `@promptbook/utils`
1713
+ */
1714
+ function capitalize(word) {
1715
+ return word.substring(0, 1).toUpperCase() + word.substring(1);
1716
+ }
1717
+
1718
+ /**
1719
+ * Converts promptbook in JSON format to string format
1720
+ *
1721
+ * @deprecated TODO: [šŸ„][🧠] Backup original files in `PipelineJson` same as in Promptbook.studio
1722
+ * @param pipelineJson Promptbook in JSON format (.bookc)
1602
1723
  * @returns Promptbook in string format (.book.md)
1603
1724
  * @public exported from `@promptbook/core`
1604
1725
  */
@@ -1930,7 +2051,7 @@ function deepClone(objectValue) {
1930
2051
  TODO: [🧠] Is there a better implementation?
1931
2052
  > const propertyNames = Object.getOwnPropertyNames(objectValue);
1932
2053
  > for (const propertyName of propertyNames) {
1933
- > const value = (objectValue as really_any)[propertyName];
2054
+ > const value = (objectValue as chococake)[propertyName];
1934
2055
  > if (value && typeof value === 'object') {
1935
2056
  > deepClone(value);
1936
2057
  > }
@@ -2783,7 +2904,7 @@ class DatabaseError extends Error {
2783
2904
  }
2784
2905
  }
2785
2906
  /**
2786
- * TODO: !!!! Explain that NotFoundError (!!! and other specific errors) has priority over DatabaseError in some contexts
2907
+ * TODO: [šŸ±ā€šŸš€] Explain that NotFoundError ([šŸ±ā€šŸš€] and other specific errors) has priority over DatabaseError in some contexts
2787
2908
  */
2788
2909
 
2789
2910
  /**
@@ -4325,6 +4446,8 @@ function removeDiacritics(input) {
4325
4446
  /**
4326
4447
  * Converts a given text to kebab-case format.
4327
4448
  *
4449
+ * Note: [šŸ”‚] This function is idempotent.
4450
+ *
4328
4451
  * @param text The text to be converted.
4329
4452
  * @returns The kebab-case formatted string.
4330
4453
  * @example 'hello-world'
@@ -4480,6 +4603,8 @@ function removeEmojis(text) {
4480
4603
  /**
4481
4604
  * Converts a title string into a normalized name.
4482
4605
  *
4606
+ * Note: [šŸ”‚] This function is idempotent.
4607
+ *
4483
4608
  * @param value The title string to be converted to a name.
4484
4609
  * @returns A normalized name derived from the input title.
4485
4610
  * @example 'Hello World!' -> 'hello-world'
@@ -5049,98 +5174,17 @@ async function preparePipeline(pipeline, tools, options) {
5049
5174
  */
5050
5175
 
5051
5176
  /**
5052
- * Format either small or big number
5053
- *
5054
- * @public exported from `@promptbook/utils`
5055
- */
5056
- function numberToString(value) {
5057
- if (value === 0) {
5058
- return '0';
5059
- }
5060
- else if (Number.isNaN(value)) {
5061
- return VALUE_STRINGS.nan;
5062
- }
5063
- else if (value === Infinity) {
5064
- return VALUE_STRINGS.infinity;
5065
- }
5066
- else if (value === -Infinity) {
5067
- return VALUE_STRINGS.negativeInfinity;
5068
- }
5069
- for (let exponent = 0; exponent < 15; exponent++) {
5070
- const factor = 10 ** exponent;
5071
- const valueRounded = Math.round(value * factor) / factor;
5072
- if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
5073
- return valueRounded.toFixed(exponent);
5074
- }
5075
- }
5076
- return value.toString();
5077
- }
5078
-
5079
- /**
5080
- * Function `valueToString` will convert the given value to string
5081
- * This is useful and used in the `templateParameters` function
5082
- *
5083
- * Note: This function is not just calling `toString` method
5084
- * It's more complex and can handle this conversion specifically for LLM models
5085
- * See `VALUE_STRINGS`
5086
- *
5087
- * Note: There are 2 similar functions
5088
- * - `valueToString` converts value to string for LLM models as human-readable string
5089
- * - `asSerializable` converts value to string to preserve full information to be able to convert it back
5177
+ * Parses the given script and returns the list of all used variables that are not defined in the script
5090
5178
  *
5091
- * @public exported from `@promptbook/utils`
5179
+ * @param script from which to extract the variables
5180
+ * @returns the list of variable names
5181
+ * @throws {ParseError} if the script is invalid
5182
+ * @public exported from `@promptbook/javascript`
5092
5183
  */
5093
- function valueToString(value) {
5094
- try {
5095
- if (value === '') {
5096
- return VALUE_STRINGS.empty;
5097
- }
5098
- else if (value === null) {
5099
- return VALUE_STRINGS.null;
5100
- }
5101
- else if (value === undefined) {
5102
- return VALUE_STRINGS.undefined;
5103
- }
5104
- else if (typeof value === 'string') {
5105
- return value;
5106
- }
5107
- else if (typeof value === 'number') {
5108
- return numberToString(value);
5109
- }
5110
- else if (value instanceof Date) {
5111
- return value.toISOString();
5112
- }
5113
- else {
5114
- try {
5115
- return JSON.stringify(value);
5116
- }
5117
- catch (error) {
5118
- if (error instanceof TypeError && error.message.includes('circular structure')) {
5119
- return VALUE_STRINGS.circular;
5120
- }
5121
- throw error;
5122
- }
5123
- }
5124
- }
5125
- catch (error) {
5126
- assertsError(error);
5127
- console.error(error);
5128
- return VALUE_STRINGS.unserializable;
5129
- }
5130
- }
5131
-
5132
- /**
5133
- * Parses the given script and returns the list of all used variables that are not defined in the script
5134
- *
5135
- * @param script from which to extract the variables
5136
- * @returns the list of variable names
5137
- * @throws {ParseError} if the script is invalid
5138
- * @public exported from `@promptbook/javascript`
5139
- */
5140
- function extractVariablesFromJavascript(script) {
5141
- const variables = new Set();
5142
- const originalScript = script;
5143
- script = `(()=>{${script}})()`;
5184
+ function extractVariablesFromJavascript(script) {
5185
+ const variables = new Set();
5186
+ const originalScript = script;
5187
+ script = `(()=>{${script}})()`;
5144
5188
  try {
5145
5189
  for (let i = 0; i < LOOP_LIMIT; i++)
5146
5190
  try {
@@ -8055,6 +8099,60 @@ class MemoryCommitmentDefinition extends BaseCommitmentDefinition {
8055
8099
  * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
8056
8100
  */
8057
8101
 
8102
+ /**
8103
+ * INITIAL MESSAGE commitment definition
8104
+ *
8105
+ * The INITIAL MESSAGE commitment defines the first message that the user sees when opening the chat.
8106
+ * It is used to greet the user and set the tone of the conversation.
8107
+ *
8108
+ * Example usage in agent source:
8109
+ *
8110
+ * ```book
8111
+ * INITIAL MESSAGE Hello! I am ready to help you with your tasks.
8112
+ * ```
8113
+ *
8114
+ * @private [šŸŖ”] Maybe export the commitments through some package
8115
+ */
8116
+ class InitialMessageCommitmentDefinition extends BaseCommitmentDefinition {
8117
+ constructor() {
8118
+ super('INITIAL MESSAGE');
8119
+ }
8120
+ /**
8121
+ * Short one-line description of INITIAL MESSAGE.
8122
+ */
8123
+ get description() {
8124
+ return 'Defines the **initial message** shown to the user when the chat starts.';
8125
+ }
8126
+ /**
8127
+ * Markdown documentation for INITIAL MESSAGE commitment.
8128
+ */
8129
+ get documentation() {
8130
+ return spaceTrim$2(`
8131
+ # ${this.type}
8132
+
8133
+ Defines the first message that the user sees when opening the chat. This message is purely for display purposes in the UI and does not inherently become part of the LLM's system prompt context (unless also included via other means).
8134
+
8135
+ ## Key aspects
8136
+
8137
+ - Used to greet the user.
8138
+ - Sets the tone of the conversation.
8139
+ - Displayed immediately when the chat interface loads.
8140
+
8141
+ ## Examples
8142
+
8143
+ \`\`\`book
8144
+ Support Agent
8145
+
8146
+ PERSONA You are a helpful support agent.
8147
+ INITIAL MESSAGE Hi there! How can I assist you today?
8148
+ \`\`\`
8149
+ `);
8150
+ }
8151
+ applyToAgentModelRequirements(requirements, content) {
8152
+ return requirements;
8153
+ }
8154
+ }
8155
+
8058
8156
  /**
8059
8157
  * MESSAGE commitment definition
8060
8158
  *
@@ -8759,6 +8857,7 @@ class PersonaCommitmentDefinition extends BaseCommitmentDefinition {
8759
8857
  // Keep everything after the PERSONA section
8760
8858
  cleanedMessage = lines.slice(personaEndIndex).join('\n').trim();
8761
8859
  }
8860
+ // TODO: [šŸ•›] There should be `agentFullname` not `agentName`
8762
8861
  // Create new system message with persona at the beginning
8763
8862
  // Format: "You are {agentName}\n{personaContent}"
8764
8863
  // The # PERSONA comment will be removed later by removeCommentsFromSystemMessage
@@ -9215,6 +9314,7 @@ const COMMITMENT_REGISTRY = [
9215
9314
  new NoteCommitmentDefinition('NONCE'),
9216
9315
  new GoalCommitmentDefinition('GOAL'),
9217
9316
  new GoalCommitmentDefinition('GOALS'),
9317
+ new InitialMessageCommitmentDefinition(),
9218
9318
  new MessageCommitmentDefinition('MESSAGE'),
9219
9319
  new MessageCommitmentDefinition('MESSAGES'),
9220
9320
  new ScenarioCommitmentDefinition('SCENARIO'),
@@ -9580,6 +9680,8 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
9580
9680
  /**
9581
9681
  * Normalizes a given text to camelCase format.
9582
9682
  *
9683
+ * Note: [šŸ”‚] This function is idempotent.
9684
+ *
9583
9685
  * @param text The text to be normalized.
9584
9686
  * @param _isFirstLetterCapital Whether the first letter should be capitalized.
9585
9687
  * @returns The camelCase formatted string.
@@ -9668,91 +9770,522 @@ function generatePlaceholderAgentProfileImageUrl(agentName) {
9668
9770
  */
9669
9771
 
9670
9772
  /**
9671
- * Parses basic information from agent source
9773
+ * Creates a Mermaid graph based on the promptbook
9672
9774
  *
9673
- * There are 2 similar functions:
9674
- * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
9675
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
9775
+ * Note: The result is not wrapped in a Markdown code block
9676
9776
  *
9677
- * @public exported from `@promptbook/core`
9777
+ * @public exported from `@promptbook/utils`
9678
9778
  */
9679
- function parseAgentSource(agentSource) {
9680
- const parseResult = parseAgentSourceWithCommitments(agentSource);
9681
- // Find PERSONA and META commitments
9682
- let personaDescription = null;
9683
- for (const commitment of parseResult.commitments) {
9684
- if (commitment.type !== 'PERSONA') {
9685
- continue;
9779
+ function renderPromptbookMermaid(pipelineJson, options) {
9780
+ const { linkTask = () => null } = options || {};
9781
+ const MERMAID_PREFIX = 'pipeline_';
9782
+ const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
9783
+ const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
9784
+ const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
9785
+ const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
9786
+ const parameterNameToTaskName = (parameterName) => {
9787
+ if (parameterName === 'knowledge') {
9788
+ return MERMAID_KNOWLEDGE_NAME;
9686
9789
  }
9687
- if (personaDescription === null) {
9688
- personaDescription = '';
9790
+ else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
9791
+ return MERMAID_RESERVED_NAME;
9689
9792
  }
9690
- else {
9691
- personaDescription += `\n\n${personaDescription}`;
9793
+ const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
9794
+ if (!parameter) {
9795
+ throw new UnexpectedError(`Could not find {${parameterName}}`);
9796
+ // <- TODO: This causes problems when {knowledge} and other reserved parameters are used
9692
9797
  }
9693
- personaDescription += commitment.content;
9694
- }
9695
- const meta = {};
9696
- for (const commitment of parseResult.commitments) {
9697
- if (commitment.type !== 'META') {
9698
- continue;
9798
+ if (parameter.isInput) {
9799
+ return MERMAID_INPUT_NAME;
9699
9800
  }
9700
- // Parse META commitments - format is "META TYPE content"
9701
- const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
9702
- const metaType = normalizeTo_camelCase(metaTypeRaw);
9703
- meta[metaType] = spaceTrim$1(commitment.content.substring(metaTypeRaw.length));
9704
- }
9705
- // Generate gravatar fallback if no meta image specified
9706
- if (!meta.image) {
9707
- meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
9708
- }
9709
- // Parse parameters using unified approach - both @Parameter and {parameter} notations
9710
- // are treated as the same syntax feature with unified representation
9711
- const parameters = parseParameters(agentSource);
9712
- return {
9713
- agentName: parseResult.agentName,
9714
- personaDescription,
9715
- meta,
9716
- parameters,
9801
+ const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
9802
+ if (!task) {
9803
+ throw new Error(`Could not find task for {${parameterName}}`);
9804
+ }
9805
+ return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
9717
9806
  };
9807
+ const inputAndIntermediateParametersMermaid = pipelineJson.tasks
9808
+ .flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
9809
+ `${parameterNameToTaskName(resultingParameterName)}("${title}")`,
9810
+ ...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
9811
+ ])
9812
+ .join('\n');
9813
+ const outputParametersMermaid = pipelineJson.parameters
9814
+ .filter(({ isOutput }) => isOutput)
9815
+ .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
9816
+ .join('\n');
9817
+ const linksMermaid = pipelineJson.tasks
9818
+ .map((task) => {
9819
+ const link = linkTask(task);
9820
+ if (link === null) {
9821
+ return '';
9822
+ }
9823
+ const { href, title } = link;
9824
+ const taskName = parameterNameToTaskName(task.resultingParameterName);
9825
+ return `click ${taskName} href "${href}" "${title}";`;
9826
+ })
9827
+ .filter((line) => line !== '')
9828
+ .join('\n');
9829
+ const interactionPointsMermaid = Object.entries({
9830
+ [MERMAID_INPUT_NAME]: 'Input',
9831
+ [MERMAID_OUTPUT_NAME]: 'Output',
9832
+ [MERMAID_RESERVED_NAME]: 'Other',
9833
+ [MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
9834
+ })
9835
+ .filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
9836
+ .map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
9837
+ .join('\n');
9838
+ const promptbookMermaid = spaceTrim$2((block) => `
9839
+
9840
+ %% šŸ”® Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
9841
+
9842
+ flowchart LR
9843
+ subgraph "${pipelineJson.title}"
9844
+
9845
+ %% Basic configuration
9846
+ direction TB
9847
+
9848
+ %% Interaction points from pipeline to outside
9849
+ ${block(interactionPointsMermaid)}
9850
+
9851
+ %% Input and intermediate parameters
9852
+ ${block(inputAndIntermediateParametersMermaid)}
9853
+
9854
+
9855
+ %% Output parameters
9856
+ ${block(outputParametersMermaid)}
9857
+
9858
+ %% Links
9859
+ ${block(linksMermaid)}
9860
+
9861
+ %% Styles
9862
+ classDef ${MERMAID_INPUT_NAME} color: grey;
9863
+ classDef ${MERMAID_OUTPUT_NAME} color: grey;
9864
+ classDef ${MERMAID_RESERVED_NAME} color: grey;
9865
+ classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
9866
+
9867
+ end;
9868
+
9869
+ `);
9870
+ return promptbookMermaid;
9718
9871
  }
9719
9872
  /**
9720
- * TODO: [šŸ•›] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
9873
+ * TODO: [🧠] FOREACH in mermaid graph
9874
+ * TODO: [🧠] Knowledge in mermaid graph
9875
+ * TODO: [🧠] Personas in mermaid graph
9876
+ * TODO: Maybe use some Mermaid package instead of string templating
9877
+ * TODO: [šŸ•Œ] When more than 2 functionalities, split into separate functions
9721
9878
  */
9722
9879
 
9723
9880
  /**
9724
- * Creates model requirements for an agent based on its source
9881
+ * Tag function for notating a prompt as template literal
9725
9882
  *
9726
- * There are 2 similar functions:
9727
- * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
9728
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
9883
+ * Note: There are 3 similar functions:
9884
+ * 1) `prompt` for notating single prompt exported from `@promptbook/utils`
9885
+ * 2) `promptTemplate` alias for `prompt`
9886
+ * 3) `book` for notating and validating entire books exported from `@promptbook/utils`
9729
9887
  *
9730
- * @public exported from `@promptbook/core`
9888
+ * @param strings
9889
+ * @param values
9890
+ * @returns the prompt string
9891
+ * @public exported from `@promptbook/utils`
9731
9892
  */
9732
- async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
9733
- // If availableModels are provided and no specific modelName is given,
9734
- // use preparePersona to select the best model
9735
- if (availableModels && !modelName && llmTools) {
9736
- const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
9737
- return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
9893
+ function prompt(strings, ...values) {
9894
+ if (values.length === 0) {
9895
+ return spaceTrim$1(strings.join(''));
9738
9896
  }
9739
- // Use the new commitment-based system with provided or default model
9740
- return createAgentModelRequirementsWithCommitments(agentSource, modelName);
9741
- }
9742
- /**
9743
- * Selects the best model using the preparePersona function
9744
- * This directly uses preparePersona to ensure DRY principle
9745
- *
9746
- * @param agentSource The agent source to derive persona description from
9747
- * @param llmTools LLM tools for preparing persona
9748
- * @returns The name of the best selected model
9749
- * @private function of `createAgentModelRequirements`
9750
- */
9751
- async function selectBestModelUsingPersona(agentSource, llmTools) {
9752
- var _a;
9753
- // Parse agent source to get persona description
9754
- const { agentName, personaDescription } = parseAgentSource(agentSource);
9755
- // Use agent name as fallback if no persona description is available
9897
+ const stringsWithHiddenParameters = strings.map((stringsItem) =>
9898
+ // TODO: [0] DRY
9899
+ stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
9900
+ const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
9901
+ const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
9902
+ // Combine strings and values
9903
+ let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
9904
+ ? `${result}${stringsItem}`
9905
+ : `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
9906
+ pipelineString = spaceTrim$1(pipelineString);
9907
+ try {
9908
+ pipelineString = templateParameters(pipelineString, parameters);
9909
+ }
9910
+ catch (error) {
9911
+ if (!(error instanceof PipelineExecutionError)) {
9912
+ throw error;
9913
+ }
9914
+ console.error({ pipelineString, parameters, placeholderParameterNames, error });
9915
+ throw new UnexpectedError(spaceTrim$1((block) => `
9916
+ Internal error in prompt template literal
9917
+
9918
+ ${block(JSON.stringify({ strings, values }, null, 4))}}
9919
+
9920
+ `));
9921
+ }
9922
+ // TODO: [0] DRY
9923
+ pipelineString = pipelineString
9924
+ .split(`${REPLACING_NONCE}beginbracket`)
9925
+ .join('{')
9926
+ .split(`${REPLACING_NONCE}endbracket`)
9927
+ .join('}');
9928
+ return pipelineString;
9929
+ }
9930
+ /**
9931
+ * TODO: [🧠][🈓] Where is the best location for this file
9932
+ * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
9933
+ */
9934
+
9935
+ /**
9936
+ * Detects if the code is running in a browser environment in main thread (Not in a web worker)
9937
+ *
9938
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9939
+ *
9940
+ * @public exported from `@promptbook/utils`
9941
+ */
9942
+ const $isRunningInBrowser = new Function(`
9943
+ try {
9944
+ return this === window;
9945
+ } catch (e) {
9946
+ return false;
9947
+ }
9948
+ `);
9949
+ /**
9950
+ * TODO: [šŸŽŗ]
9951
+ */
9952
+
9953
+ /**
9954
+ * Detects if the code is running in jest environment
9955
+ *
9956
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9957
+ *
9958
+ * @public exported from `@promptbook/utils`
9959
+ */
9960
+ const $isRunningInJest = new Function(`
9961
+ try {
9962
+ return process.env.JEST_WORKER_ID !== undefined;
9963
+ } catch (e) {
9964
+ return false;
9965
+ }
9966
+ `);
9967
+ /**
9968
+ * TODO: [šŸŽŗ]
9969
+ */
9970
+
9971
+ /**
9972
+ * Detects if the code is running in a Node.js environment
9973
+ *
9974
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9975
+ *
9976
+ * @public exported from `@promptbook/utils`
9977
+ */
9978
+ const $isRunningInNode = new Function(`
9979
+ try {
9980
+ return this === global;
9981
+ } catch (e) {
9982
+ return false;
9983
+ }
9984
+ `);
9985
+ /**
9986
+ * TODO: [šŸŽŗ]
9987
+ */
9988
+
9989
+ /**
9990
+ * Detects if the code is running in a web worker
9991
+ *
9992
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9993
+ *
9994
+ * @public exported from `@promptbook/utils`
9995
+ */
9996
+ const $isRunningInWebWorker = new Function(`
9997
+ try {
9998
+ if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
9999
+ return true;
10000
+ } else {
10001
+ return false;
10002
+ }
10003
+ } catch (e) {
10004
+ return false;
10005
+ }
10006
+ `);
10007
+ /**
10008
+ * TODO: [šŸŽŗ]
10009
+ */
10010
+
10011
+ /**
10012
+ * Returns information about the current runtime environment
10013
+ *
10014
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
10015
+ *
10016
+ * @public exported from `@promptbook/utils`
10017
+ */
10018
+ function $detectRuntimeEnvironment() {
10019
+ return {
10020
+ isRunningInBrowser: $isRunningInBrowser(),
10021
+ isRunningInJest: $isRunningInJest(),
10022
+ isRunningInNode: $isRunningInNode(),
10023
+ isRunningInWebWorker: $isRunningInWebWorker(),
10024
+ };
10025
+ }
10026
+ /**
10027
+ * TODO: [šŸŽŗ] Also detect and report node version here
10028
+ */
10029
+
10030
+ /**
10031
+ * Simple wrapper `new Date().toISOString()`
10032
+ *
10033
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
10034
+ *
10035
+ * @returns string_date branded type
10036
+ * @public exported from `@promptbook/utils`
10037
+ */
10038
+ function $getCurrentDate() {
10039
+ return new Date().toISOString();
10040
+ }
10041
+
10042
+ /**
10043
+ * Function parseNumber will parse number from string
10044
+ *
10045
+ * Note: [šŸ”‚] This function is idempotent.
10046
+ * Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
10047
+ * Note: it also works only with decimal numbers
10048
+ *
10049
+ * @returns parsed number
10050
+ * @throws {ParseError} if the value is not a number
10051
+ *
10052
+ * @public exported from `@promptbook/utils`
10053
+ */
10054
+ function parseNumber(value) {
10055
+ const originalValue = value;
10056
+ if (typeof value === 'number') {
10057
+ value = value.toString(); // <- TODO: Maybe more efficient way to do this
10058
+ }
10059
+ if (typeof value !== 'string') {
10060
+ return 0;
10061
+ }
10062
+ value = value.trim();
10063
+ if (value.startsWith('+')) {
10064
+ return parseNumber(value.substring(1));
10065
+ }
10066
+ if (value.startsWith('-')) {
10067
+ const number = parseNumber(value.substring(1));
10068
+ if (number === 0) {
10069
+ return 0; // <- Note: To prevent -0
10070
+ }
10071
+ return -number;
10072
+ }
10073
+ value = value.replace(/,/g, '.');
10074
+ value = value.toUpperCase();
10075
+ if (value === '') {
10076
+ return 0;
10077
+ }
10078
+ if (value === '♾' || value.startsWith('INF')) {
10079
+ return Infinity;
10080
+ }
10081
+ if (value.includes('/')) {
10082
+ const [numerator_, denominator_] = value.split('/');
10083
+ const numerator = parseNumber(numerator_);
10084
+ const denominator = parseNumber(denominator_);
10085
+ if (denominator === 0) {
10086
+ throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
10087
+ }
10088
+ return numerator / denominator;
10089
+ }
10090
+ if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
10091
+ return 0;
10092
+ }
10093
+ if (value.includes('E')) {
10094
+ const [significand, exponent] = value.split('E');
10095
+ return parseNumber(significand) * 10 ** parseNumber(exponent);
10096
+ }
10097
+ if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
10098
+ throw new ParseError(`Unable to parse number from "${originalValue}"`);
10099
+ }
10100
+ const num = parseFloat(value);
10101
+ if (isNaN(num)) {
10102
+ throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
10103
+ }
10104
+ return num;
10105
+ }
10106
+ /**
10107
+ * TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
10108
+ * TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
10109
+ */
10110
+
10111
+ /**
10112
+ * Removes quotes from a string
10113
+ *
10114
+ * Note: [šŸ”‚] This function is idempotent.
10115
+ * Tip: This is very useful for post-processing of the result of the LLM model
10116
+ * Note: This function removes only the same quotes from the beginning and the end of the string
10117
+ * Note: There are two similar functions:
10118
+ * - `removeQuotes` which removes only bounding quotes
10119
+ * - `unwrapResult` which removes whole introduce sentence
10120
+ *
10121
+ * @param text optionally quoted text
10122
+ * @returns text without quotes
10123
+ * @public exported from `@promptbook/utils`
10124
+ */
10125
+ function removeQuotes(text) {
10126
+ if (text.startsWith('"') && text.endsWith('"')) {
10127
+ return text.slice(1, -1);
10128
+ }
10129
+ if (text.startsWith("'") && text.endsWith("'")) {
10130
+ return text.slice(1, -1);
10131
+ }
10132
+ return text;
10133
+ }
10134
+
10135
+ /**
10136
+ * Trims string from all 4 sides
10137
+ *
10138
+ * Note: This is a re-exported function from the `spacetrim` package which is
10139
+ * Developed by same author @hejny as this package
10140
+ *
10141
+ * @public exported from `@promptbook/utils`
10142
+ * @see https://github.com/hejny/spacetrim#usage
10143
+ */
10144
+ const spaceTrim = spaceTrim$2;
10145
+
10146
+ /**
10147
+ * Checks if the given value is a valid JavaScript identifier name.
10148
+ *
10149
+ * @param javascriptName The value to check for JavaScript identifier validity.
10150
+ * @returns `true` if the value is a valid JavaScript name, false otherwise.
10151
+ * @public exported from `@promptbook/utils`
10152
+ */
10153
+ function isValidJavascriptName(javascriptName) {
10154
+ if (typeof javascriptName !== 'string') {
10155
+ return false;
10156
+ }
10157
+ return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
10158
+ }
10159
+
10160
+ /**
10161
+ * Normalizes agent name from arbitrary string to valid agent name
10162
+ *
10163
+ * Note: [šŸ”‚] This function is idempotent.
10164
+ *
10165
+ * @public exported from `@promptbook/core`
10166
+ */
10167
+ function normalizeAgentName(rawAgentName) {
10168
+ return titleToName(spaceTrim$1(rawAgentName));
10169
+ }
10170
+
10171
+ /**
10172
+ * Creates temporary default agent name based on agent source hash
10173
+ *
10174
+ * @public exported from `@promptbook/core`
10175
+ */
10176
+ function createDefaultAgentName(agentSource) {
10177
+ const agentHash = computeAgentHash(agentSource);
10178
+ return normalizeAgentName(`Agent ${agentHash.substring(0, 6)}`);
10179
+ }
10180
+
10181
+ /**
10182
+ * Parses basic information from agent source
10183
+ *
10184
+ * There are 2 similar functions:
10185
+ * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
10186
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
10187
+ *
10188
+ * @public exported from `@promptbook/core`
10189
+ */
10190
+ function parseAgentSource(agentSource) {
10191
+ const parseResult = parseAgentSourceWithCommitments(agentSource);
10192
+ // Find PERSONA and META commitments
10193
+ let personaDescription = null;
10194
+ for (const commitment of parseResult.commitments) {
10195
+ if (commitment.type !== 'PERSONA') {
10196
+ continue;
10197
+ }
10198
+ if (personaDescription === null) {
10199
+ personaDescription = '';
10200
+ }
10201
+ else {
10202
+ personaDescription += `\n\n${personaDescription}`;
10203
+ }
10204
+ personaDescription += commitment.content;
10205
+ }
10206
+ let initialMessage = null;
10207
+ for (const commitment of parseResult.commitments) {
10208
+ if (commitment.type !== 'INITIAL MESSAGE') {
10209
+ continue;
10210
+ }
10211
+ // Note: Initial message override logic - later overrides earlier
10212
+ // Or should it append? Usually initial message is just one block.
10213
+ // Let's stick to "later overrides earlier" for simplicity, or just take the last one.
10214
+ initialMessage = commitment.content;
10215
+ }
10216
+ const meta = {};
10217
+ const links = [];
10218
+ for (const commitment of parseResult.commitments) {
10219
+ if (commitment.type === 'META LINK') {
10220
+ links.push(spaceTrim$1(commitment.content));
10221
+ continue;
10222
+ }
10223
+ if (commitment.type !== 'META') {
10224
+ continue;
10225
+ }
10226
+ // Parse META commitments - format is "META TYPE content"
10227
+ const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
10228
+ if (metaTypeRaw === 'LINK') {
10229
+ links.push(spaceTrim$1(commitment.content.substring(metaTypeRaw.length)));
10230
+ }
10231
+ const metaType = normalizeTo_camelCase(metaTypeRaw);
10232
+ meta[metaType] = spaceTrim$1(commitment.content.substring(metaTypeRaw.length));
10233
+ }
10234
+ // Generate gravatar fallback if no meta image specified
10235
+ if (!meta.image) {
10236
+ meta.image = generatePlaceholderAgentProfileImageUrl(parseResult.agentName || '!!');
10237
+ }
10238
+ // Parse parameters using unified approach - both @Parameter and {parameter} notations
10239
+ // are treated as the same syntax feature with unified representation
10240
+ const parameters = parseParameters(agentSource);
10241
+ const agentHash = computeAgentHash(agentSource);
10242
+ return {
10243
+ agentName: normalizeAgentName(parseResult.agentName || createDefaultAgentName(agentSource)),
10244
+ agentHash,
10245
+ personaDescription,
10246
+ initialMessage,
10247
+ meta,
10248
+ links,
10249
+ parameters,
10250
+ };
10251
+ }
10252
+ /**
10253
+ * TODO: [šŸ•›] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
10254
+ */
10255
+
10256
+ /**
10257
+ * Creates model requirements for an agent based on its source
10258
+ *
10259
+ * There are 2 similar functions:
10260
+ * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
10261
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
10262
+ *
10263
+ * @public exported from `@promptbook/core`
10264
+ */
10265
+ async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
10266
+ // If availableModels are provided and no specific modelName is given,
10267
+ // use preparePersona to select the best model
10268
+ if (availableModels && !modelName && llmTools) {
10269
+ const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
10270
+ return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
10271
+ }
10272
+ // Use the new commitment-based system with provided or default model
10273
+ return createAgentModelRequirementsWithCommitments(agentSource, modelName);
10274
+ }
10275
+ /**
10276
+ * Selects the best model using the preparePersona function
10277
+ * This directly uses preparePersona to ensure DRY principle
10278
+ *
10279
+ * @param agentSource The agent source to derive persona description from
10280
+ * @param llmTools LLM tools for preparing persona
10281
+ * @returns The name of the best selected model
10282
+ * @private function of `createAgentModelRequirements`
10283
+ */
10284
+ async function selectBestModelUsingPersona(agentSource, llmTools) {
10285
+ var _a;
10286
+ // Parse agent source to get persona description
10287
+ const { agentName, personaDescription } = parseAgentSource(agentSource);
10288
+ // Use agent name as fallback if no persona description is available
9756
10289
  const description = personaDescription || agentName || 'AI Agent';
9757
10290
  try {
9758
10291
  // Use preparePersona directly
@@ -9883,37 +10416,28 @@ const DEFAULT_BOOK = padBook(validateBook(spaceTrim$1(`
9883
10416
  PERSONA A friendly AI assistant that helps you with your tasks
9884
10417
  `)));
9885
10418
  // <- Note: Not using book`...` notation to avoid strange error in jest unit tests `TypeError: (0 , book_notation_1.book) is not a function`
9886
- // <- TODO: !!! `GENESIS_BOOK` / `ADAM_BOOK` in `/agents/adam.book`
9887
- // <- !!! Buttons into genesis book
9888
- // <- TODO: !!! generateBookBoilerplate and deprecate `DEFAULT_BOOK`
9889
-
9890
- /**
9891
- * Trims string from all 4 sides
9892
- *
9893
- * Note: This is a re-exported function from the `spacetrim` package which is
9894
- * Developed by same author @hejny as this package
9895
- *
9896
- * @public exported from `@promptbook/utils`
9897
- * @see https://github.com/hejny/spacetrim#usage
9898
- */
9899
- const spaceTrim = spaceTrim$2;
10419
+ // <- TODO: [šŸ±ā€šŸš€] `GENESIS_BOOK` / `ADAM_BOOK` in `/agents/adam.book`
10420
+ // <- [šŸ±ā€šŸš€] Buttons into genesis book
10421
+ // <- TODO: [šŸ±ā€šŸš€] generateBookBoilerplate and deprecate `DEFAULT_BOOK`
9900
10422
 
10423
+ // import { getTableName } from '../../../../../apps/agents-server/src/database/getTableName';
10424
+ // <- TODO: [šŸ±ā€šŸš€] Prevent imports from `/apps` -> `/src`
9901
10425
  /**
9902
10426
  * Agent collection stored in Supabase table
9903
10427
  *
9904
10428
  * Note: This object can work both from Node.js and browser environment depending on the Supabase client provided
9905
10429
  *
9906
10430
  * @public exported from `@promptbook/core`
9907
- * <- TODO: !!! Move to `@promptbook/supabase` package
10431
+ * <- TODO: [šŸ±ā€šŸš€] Move to `@promptbook/supabase` package
9908
10432
  */
9909
- class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
10433
+ class AgentCollectionInSupabase /* TODO: [šŸ±ā€šŸš€] implements Agent */ {
9910
10434
  /**
9911
10435
  * @param rootPath - path to the directory with agents
9912
- * @param tools - Execution tools to be used in !!! `Agent` itself and listing the agents
10436
+ * @param tools - Execution tools to be used in [šŸ±ā€šŸš€] `Agent` itself and listing the agents
9913
10437
  * @param options - Options for the collection creation
9914
10438
  */
9915
10439
  constructor(supabaseClient,
9916
- /// TODO: !!! Remove> private readonly tools?: Pick<ExecutionTools, 'llm' | 'fs' | 'scrapers'>,
10440
+ /// TODO: [šŸ±ā€šŸš€] Remove> private readonly tools?: Pick<ExecutionTools, 'llm' | 'fs' | 'scrapers'>,
9917
10441
  options) {
9918
10442
  this.supabaseClient = supabaseClient;
9919
10443
  this.options = options;
@@ -9928,8 +10452,8 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
9928
10452
  async listAgents( /* TODO: [🧠] Allow to pass some condition here */) {
9929
10453
  const { isVerbose = DEFAULT_IS_VERBOSE } = this.options || {};
9930
10454
  const selectResult = await this.supabaseClient
9931
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
9932
- .select('agentProfile');
10455
+ .from(this.getTableName('Agent'))
10456
+ .select('agentName,agentProfile');
9933
10457
  if (selectResult.error) {
9934
10458
  throw new DatabaseError(spaceTrim((block) => `
9935
10459
 
@@ -9941,14 +10465,27 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
9941
10465
  if (isVerbose) {
9942
10466
  console.info(`Found ${selectResult.data.length} agents in directory`);
9943
10467
  }
9944
- return selectResult.data.map((row) => row.agentProfile);
10468
+ return selectResult.data.map(({ agentName, agentProfile }) => {
10469
+ if (isVerbose && agentProfile.agentName !== agentName) {
10470
+ console.warn(spaceTrim(`
10471
+ Agent name mismatch for agent "${agentName}". Using name from database.
10472
+
10473
+ agentName: "${agentName}"
10474
+ agentProfile.agentName: "${agentProfile.agentName}"
10475
+ `));
10476
+ }
10477
+ return {
10478
+ ...agentProfile,
10479
+ agentName,
10480
+ };
10481
+ });
9945
10482
  }
9946
10483
  /**
9947
- * !!!@@@
10484
+ * [šŸ±ā€šŸš€]@@@
9948
10485
  */
9949
10486
  async getAgentSource(agentName) {
9950
10487
  const selectResult = await this.supabaseClient
9951
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10488
+ .from(this.getTableName('Agent'))
9952
10489
  .select('agentSource')
9953
10490
  .eq('agentName', agentName)
9954
10491
  .single();
@@ -9964,7 +10501,7 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
9964
10501
 
9965
10502
  ${block(selectResult.error.message)}
9966
10503
  `));
9967
- // <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10504
+ // <- TODO: [šŸ±ā€šŸš€] First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
9968
10505
  }
9969
10506
  return selectResult.data.agentSource;
9970
10507
  }
@@ -9976,67 +10513,90 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
9976
10513
  async createAgent(agentSource) {
9977
10514
  const agentProfile = parseAgentSource(agentSource);
9978
10515
  // <- TODO: [šŸ•›]
9979
- const selectResult = await this.supabaseClient
9980
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
9981
- .insert({
9982
- agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
10516
+ const { agentName, agentHash } = agentProfile;
10517
+ const insertAgentResult = await this.supabaseClient.from(this.getTableName('Agent')).insert({
10518
+ agentName,
10519
+ agentHash,
9983
10520
  agentProfile,
9984
10521
  createdAt: new Date().toISOString(),
9985
10522
  updatedAt: null,
9986
- agentVersion: 0,
9987
10523
  promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
9988
10524
  usage: ZERO_USAGE,
9989
10525
  agentSource: agentSource,
9990
10526
  });
9991
- if (selectResult.error) {
10527
+ if (insertAgentResult.error) {
9992
10528
  throw new DatabaseError(spaceTrim((block) => `
9993
10529
  Error creating agent "${agentProfile.agentName}" in Supabase:
9994
10530
 
9995
- ${block(selectResult.error.message)}
10531
+ ${block(insertAgentResult.error.message)}
9996
10532
  `));
9997
10533
  }
10534
+ await this.supabaseClient.from(this.getTableName('AgentHistory')).insert({
10535
+ createdAt: new Date().toISOString(),
10536
+ agentName,
10537
+ agentHash,
10538
+ previousAgentHash: null,
10539
+ agentSource,
10540
+ promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10541
+ });
10542
+ // <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
9998
10543
  return agentProfile;
9999
10544
  }
10000
10545
  /**
10001
10546
  * Updates an existing agent in the collection
10002
10547
  */
10003
10548
  async updateAgentSource(agentName, agentSource) {
10004
- const selectResult = await this.supabaseClient
10005
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10006
- .select('agentVersion')
10549
+ const selectPreviousAgentResult = await this.supabaseClient
10550
+ .from(this.getTableName('Agent'))
10551
+ .select('agentHash,agentName')
10007
10552
  .eq('agentName', agentName)
10008
10553
  .single();
10009
- if (!selectResult.data) {
10010
- throw new NotFoundError(`Agent "${agentName}" not found`);
10554
+ if (selectPreviousAgentResult.error) {
10555
+ throw new DatabaseError(spaceTrim((block) => `
10556
+
10557
+ Error fetching agent "${agentName}" from Supabase:
10558
+
10559
+ ${block(selectPreviousAgentResult.error.message)}
10560
+ `));
10561
+ // <- TODO: [šŸ±ā€šŸš€] First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10011
10562
  }
10563
+ selectPreviousAgentResult.data.agentName;
10564
+ const previousAgentHash = selectPreviousAgentResult.data.agentHash;
10012
10565
  const agentProfile = parseAgentSource(agentSource);
10013
- // TODO: !!!!!! What about agentName change
10014
- console.log('!!! agentName', agentName);
10015
- const oldAgentSource = await this.getAgentSource(agentName);
10016
- const updateResult = await this.supabaseClient
10017
- .from('AgentCollection' /* <- TODO: !!!! Change to `Agent` */)
10566
+ // <- TODO: [šŸ•›]
10567
+ const { agentHash } = agentProfile;
10568
+ const updateAgentResult = await this.supabaseClient
10569
+ .from(this.getTableName('Agent'))
10018
10570
  .update({
10019
- // TODO: !!!! Compare not update> agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
10571
+ // TODO: [šŸ±ā€šŸš€] Compare not update> agentName: agentProfile.agentName || '[šŸ±ā€šŸš€]' /* <- TODO: [šŸ±ā€šŸš€] Remove */,
10020
10572
  agentProfile,
10021
10573
  updatedAt: new Date().toISOString(),
10022
- agentVersion: selectResult.data.agentVersion + 1,
10574
+ agentHash: agentProfile.agentHash,
10023
10575
  agentSource,
10024
10576
  promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10025
10577
  })
10026
10578
  .eq('agentName', agentName);
10027
- const newAgentSource = await this.getAgentSource(agentName);
10028
- console.log('!!! updateAgent', updateResult);
10029
- console.log('!!! old', oldAgentSource);
10030
- console.log('!!! new', newAgentSource);
10031
- if (updateResult.error) {
10579
+ // console.log('[šŸ±ā€šŸš€] updateAgent', updateResult);
10580
+ // console.log('[šŸ±ā€šŸš€] old', oldAgentSource);
10581
+ // console.log('[šŸ±ā€šŸš€] new', newAgentSource);
10582
+ if (updateAgentResult.error) {
10032
10583
  throw new DatabaseError(spaceTrim((block) => `
10033
10584
  Error updating agent "${agentName}" in Supabase:
10034
10585
 
10035
- ${block(updateResult.error.message)}
10586
+ ${block(updateAgentResult.error.message)}
10036
10587
  `));
10037
10588
  }
10589
+ await this.supabaseClient.from(this.getTableName('AgentHistory')).insert({
10590
+ createdAt: new Date().toISOString(),
10591
+ agentName,
10592
+ agentHash,
10593
+ previousAgentHash,
10594
+ agentSource,
10595
+ promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10596
+ });
10597
+ // <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
10038
10598
  }
10039
- // TODO: !!!! public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
10599
+ // TODO: [šŸ±ā€šŸš€] public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
10040
10600
  // Use Supabase realtime logic
10041
10601
  /**
10042
10602
  * Deletes an agent from the collection
@@ -10044,9 +10604,19 @@ class AgentCollectionInSupabase /* TODO: !!!! implements AgentCollection */ {
10044
10604
  async deleteAgent(agentName) {
10045
10605
  throw new NotYetImplementedError('Method not implemented.');
10046
10606
  }
10607
+ /**
10608
+ * Get the Supabase table name with prefix
10609
+ *
10610
+ * @param tableName - The original table name
10611
+ * @returns The prefixed table name
10612
+ */
10613
+ getTableName(tableName) {
10614
+ const { tablePrefix = '' } = this.options || {};
10615
+ return `${tablePrefix}${tableName}`;
10616
+ }
10047
10617
  }
10048
10618
  /**
10049
- * TODO: !!!! Implement it here correctly and update JSDoc comments here, and on interface + other implementations
10619
+ * TODO: [šŸ±ā€šŸš€] Implement it here correctly and update JSDoc comments here, and on interface + other implementations
10050
10620
  * TODO: Write unit test
10051
10621
  * TODO: [🧠][šŸš™] `AgentXxx` vs `AgentsXxx` naming convention
10052
10622
  */
@@ -10654,109 +11224,40 @@ const bookVersionCommandParser = {
10654
11224
  },
10655
11225
  /**
10656
11226
  * Apply the BOOK_VERSION command to the `pipelineJson`
10657
- *
10658
- * Note: `$` is used to indicate that this function mutates given `pipelineJson`
10659
- */
10660
- $applyToPipelineJson(command, $pipelineJson) {
10661
- // TODO: Warn if the version is overridden
10662
- $pipelineJson.bookVersion = command.bookVersion;
10663
- },
10664
- /**
10665
- * Converts the BOOK_VERSION command back to string
10666
- *
10667
- * Note: This is used in `pipelineJsonToString` utility
10668
- */
10669
- stringify(command) {
10670
- return `---`; // <- TODO: [šŸ›‹] Implement
10671
- },
10672
- /**
10673
- * Reads the BOOK_VERSION command from the `PipelineJson`
10674
- *
10675
- * Note: This is used in `pipelineJsonToString` utility
10676
- */
10677
- takeFromPipelineJson(pipelineJson) {
10678
- throw new NotYetImplementedError(`[šŸ›‹] Not implemented yet`); // <- TODO: [šŸ›‹] Implement
10679
- },
10680
- };
10681
-
10682
- /**
10683
- * Units of text measurement
10684
- *
10685
- * @see https://github.com/webgptorg/promptbook/discussions/30
10686
- * @public exported from `@promptbook/core`
10687
- */
10688
- const EXPECTATION_UNITS = ['CHARACTERS', 'WORDS', 'SENTENCES', 'LINES', 'PARAGRAPHS', 'PAGES'];
10689
- /**
10690
- * TODO: [šŸ’] Unite object for expecting amount and format - remove format
10691
- */
10692
-
10693
- /**
10694
- * Function parseNumber will parse number from string
10695
- *
10696
- * Note: [šŸ”‚] This function is idempotent.
10697
- * Unlike Number.parseInt, Number.parseFloat it will never ever result in NaN
10698
- * Note: it also works only with decimal numbers
10699
- *
10700
- * @returns parsed number
10701
- * @throws {ParseError} if the value is not a number
10702
- *
10703
- * @public exported from `@promptbook/utils`
10704
- */
10705
- function parseNumber(value) {
10706
- const originalValue = value;
10707
- if (typeof value === 'number') {
10708
- value = value.toString(); // <- TODO: Maybe more efficient way to do this
10709
- }
10710
- if (typeof value !== 'string') {
10711
- return 0;
10712
- }
10713
- value = value.trim();
10714
- if (value.startsWith('+')) {
10715
- return parseNumber(value.substring(1));
10716
- }
10717
- if (value.startsWith('-')) {
10718
- const number = parseNumber(value.substring(1));
10719
- if (number === 0) {
10720
- return 0; // <- Note: To prevent -0
10721
- }
10722
- return -number;
10723
- }
10724
- value = value.replace(/,/g, '.');
10725
- value = value.toUpperCase();
10726
- if (value === '') {
10727
- return 0;
10728
- }
10729
- if (value === '♾' || value.startsWith('INF')) {
10730
- return Infinity;
10731
- }
10732
- if (value.includes('/')) {
10733
- const [numerator_, denominator_] = value.split('/');
10734
- const numerator = parseNumber(numerator_);
10735
- const denominator = parseNumber(denominator_);
10736
- if (denominator === 0) {
10737
- throw new ParseError(`Unable to parse number from "${originalValue}" because denominator is zero`);
10738
- }
10739
- return numerator / denominator;
10740
- }
10741
- if (/^(NAN|NULL|NONE|UNDEFINED|ZERO|NO.*)$/.test(value)) {
10742
- return 0;
10743
- }
10744
- if (value.includes('E')) {
10745
- const [significand, exponent] = value.split('E');
10746
- return parseNumber(significand) * 10 ** parseNumber(exponent);
10747
- }
10748
- if (!/^[0-9.]+$/.test(value) || value.split('.').length > 2) {
10749
- throw new ParseError(`Unable to parse number from "${originalValue}"`);
10750
- }
10751
- const num = parseFloat(value);
10752
- if (isNaN(num)) {
10753
- throw new ParseError(`Unexpected NaN when parsing number from "${originalValue}"`);
10754
- }
10755
- return num;
10756
- }
11227
+ *
11228
+ * Note: `$` is used to indicate that this function mutates given `pipelineJson`
11229
+ */
11230
+ $applyToPipelineJson(command, $pipelineJson) {
11231
+ // TODO: Warn if the version is overridden
11232
+ $pipelineJson.bookVersion = command.bookVersion;
11233
+ },
11234
+ /**
11235
+ * Converts the BOOK_VERSION command back to string
11236
+ *
11237
+ * Note: This is used in `pipelineJsonToString` utility
11238
+ */
11239
+ stringify(command) {
11240
+ return `---`; // <- TODO: [šŸ›‹] Implement
11241
+ },
11242
+ /**
11243
+ * Reads the BOOK_VERSION command from the `PipelineJson`
11244
+ *
11245
+ * Note: This is used in `pipelineJsonToString` utility
11246
+ */
11247
+ takeFromPipelineJson(pipelineJson) {
11248
+ throw new NotYetImplementedError(`[šŸ›‹] Not implemented yet`); // <- TODO: [šŸ›‹] Implement
11249
+ },
11250
+ };
11251
+
10757
11252
  /**
10758
- * TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
10759
- * TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
11253
+ * Units of text measurement
11254
+ *
11255
+ * @see https://github.com/webgptorg/promptbook/discussions/30
11256
+ * @public exported from `@promptbook/core`
11257
+ */
11258
+ const EXPECTATION_UNITS = ['CHARACTERS', 'WORDS', 'SENTENCES', 'LINES', 'PARAGRAPHS', 'PAGES'];
11259
+ /**
11260
+ * TODO: [šŸ’] Unite object for expecting amount and format - remove format
10760
11261
  */
10761
11262
 
10762
11263
  /**
@@ -10901,30 +11402,6 @@ const expectCommandParser = {
10901
11402
  },
10902
11403
  };
10903
11404
 
10904
- /**
10905
- * Removes quotes from a string
10906
- *
10907
- * Note: [šŸ”‚] This function is idempotent.
10908
- * Tip: This is very useful for post-processing of the result of the LLM model
10909
- * Note: This function removes only the same quotes from the beginning and the end of the string
10910
- * Note: There are two similar functions:
10911
- * - `removeQuotes` which removes only bounding quotes
10912
- * - `unwrapResult` which removes whole introduce sentence
10913
- *
10914
- * @param text optionally quoted text
10915
- * @returns text without quotes
10916
- * @public exported from `@promptbook/utils`
10917
- */
10918
- function removeQuotes(text) {
10919
- if (text.startsWith('"') && text.endsWith('"')) {
10920
- return text.slice(1, -1);
10921
- }
10922
- if (text.startsWith("'") && text.endsWith("'")) {
10923
- return text.slice(1, -1);
10924
- }
10925
- return text;
10926
- }
10927
-
10928
11405
  /**
10929
11406
  * Function `validateParameterName` will normalize and validate a parameter name for use in pipelines.
10930
11407
  * It removes diacritics, emojis, and quotes, normalizes to camelCase, and checks for reserved names and invalid characters.
@@ -12111,20 +12588,6 @@ function $applyToTaskJson(command, $taskJson, $pipelineJson) {
12111
12588
  persona.description += spaceTrim$1('\n\n' + personaDescription);
12112
12589
  }
12113
12590
 
12114
- /**
12115
- * Checks if the given value is a valid JavaScript identifier name.
12116
- *
12117
- * @param javascriptName The value to check for JavaScript identifier validity.
12118
- * @returns `true` if the value is a valid JavaScript name, false otherwise.
12119
- * @public exported from `@promptbook/utils`
12120
- */
12121
- function isValidJavascriptName(javascriptName) {
12122
- if (typeof javascriptName !== 'string') {
12123
- return false;
12124
- }
12125
- return /^[a-zA-Z_$][0-9a-zA-Z_$]*$/i.test(javascriptName);
12126
- }
12127
-
12128
12591
  /**
12129
12592
  * Parses the postprocess command
12130
12593
  *
@@ -13693,114 +14156,6 @@ function addAutoGeneratedSection(content, options) {
13693
14156
  * TODO: [šŸ›] This can be part of markdown builder
13694
14157
  */
13695
14158
 
13696
- /**
13697
- * Creates a Mermaid graph based on the promptbook
13698
- *
13699
- * Note: The result is not wrapped in a Markdown code block
13700
- *
13701
- * @public exported from `@promptbook/utils`
13702
- */
13703
- function renderPromptbookMermaid(pipelineJson, options) {
13704
- const { linkTask = () => null } = options || {};
13705
- const MERMAID_PREFIX = 'pipeline_';
13706
- const MERMAID_KNOWLEDGE_NAME = MERMAID_PREFIX + 'knowledge';
13707
- const MERMAID_RESERVED_NAME = MERMAID_PREFIX + 'reserved';
13708
- const MERMAID_INPUT_NAME = MERMAID_PREFIX + 'input';
13709
- const MERMAID_OUTPUT_NAME = MERMAID_PREFIX + 'output';
13710
- const parameterNameToTaskName = (parameterName) => {
13711
- if (parameterName === 'knowledge') {
13712
- return MERMAID_KNOWLEDGE_NAME;
13713
- }
13714
- else if (RESERVED_PARAMETER_NAMES.includes(parameterName)) {
13715
- return MERMAID_RESERVED_NAME;
13716
- }
13717
- const parameter = pipelineJson.parameters.find((parameter) => parameter.name === parameterName);
13718
- if (!parameter) {
13719
- throw new UnexpectedError(`Could not find {${parameterName}}`);
13720
- // <- TODO: This causes problems when {knowledge} and other reserved parameters are used
13721
- }
13722
- if (parameter.isInput) {
13723
- return MERMAID_INPUT_NAME;
13724
- }
13725
- const task = pipelineJson.tasks.find((task) => task.resultingParameterName === parameterName);
13726
- if (!task) {
13727
- throw new Error(`Could not find task for {${parameterName}}`);
13728
- }
13729
- return MERMAID_PREFIX + (task.name || normalizeTo_camelCase('task-' + titleToName(task.title)));
13730
- };
13731
- const inputAndIntermediateParametersMermaid = pipelineJson.tasks
13732
- .flatMap(({ title, dependentParameterNames, resultingParameterName }) => [
13733
- `${parameterNameToTaskName(resultingParameterName)}("${title}")`,
13734
- ...dependentParameterNames.map((dependentParameterName) => `${parameterNameToTaskName(dependentParameterName)}--"{${dependentParameterName}}"-->${parameterNameToTaskName(resultingParameterName)}`),
13735
- ])
13736
- .join('\n');
13737
- const outputParametersMermaid = pipelineJson.parameters
13738
- .filter(({ isOutput }) => isOutput)
13739
- .map(({ name }) => `${parameterNameToTaskName(name)}--"{${name}}"-->${MERMAID_OUTPUT_NAME}`)
13740
- .join('\n');
13741
- const linksMermaid = pipelineJson.tasks
13742
- .map((task) => {
13743
- const link = linkTask(task);
13744
- if (link === null) {
13745
- return '';
13746
- }
13747
- const { href, title } = link;
13748
- const taskName = parameterNameToTaskName(task.resultingParameterName);
13749
- return `click ${taskName} href "${href}" "${title}";`;
13750
- })
13751
- .filter((line) => line !== '')
13752
- .join('\n');
13753
- const interactionPointsMermaid = Object.entries({
13754
- [MERMAID_INPUT_NAME]: 'Input',
13755
- [MERMAID_OUTPUT_NAME]: 'Output',
13756
- [MERMAID_RESERVED_NAME]: 'Other',
13757
- [MERMAID_KNOWLEDGE_NAME]: 'Knowledge',
13758
- })
13759
- .filter(([MERMAID_NAME]) => (inputAndIntermediateParametersMermaid + outputParametersMermaid).includes(MERMAID_NAME))
13760
- .map(([MERMAID_NAME, title]) => `${MERMAID_NAME}((${title})):::${MERMAID_NAME}`)
13761
- .join('\n');
13762
- const promptbookMermaid = spaceTrim$2((block) => `
13763
-
13764
- %% šŸ”® Tip: Open this on GitHub or in the VSCode website to see the Mermaid graph visually
13765
-
13766
- flowchart LR
13767
- subgraph "${pipelineJson.title}"
13768
-
13769
- %% Basic configuration
13770
- direction TB
13771
-
13772
- %% Interaction points from pipeline to outside
13773
- ${block(interactionPointsMermaid)}
13774
-
13775
- %% Input and intermediate parameters
13776
- ${block(inputAndIntermediateParametersMermaid)}
13777
-
13778
-
13779
- %% Output parameters
13780
- ${block(outputParametersMermaid)}
13781
-
13782
- %% Links
13783
- ${block(linksMermaid)}
13784
-
13785
- %% Styles
13786
- classDef ${MERMAID_INPUT_NAME} color: grey;
13787
- classDef ${MERMAID_OUTPUT_NAME} color: grey;
13788
- classDef ${MERMAID_RESERVED_NAME} color: grey;
13789
- classDef ${MERMAID_KNOWLEDGE_NAME} color: grey;
13790
-
13791
- end;
13792
-
13793
- `);
13794
- return promptbookMermaid;
13795
- }
13796
- /**
13797
- * TODO: [🧠] FOREACH in mermaid graph
13798
- * TODO: [🧠] Knowledge in mermaid graph
13799
- * TODO: [🧠] Personas in mermaid graph
13800
- * TODO: Maybe use some Mermaid package instead of string templating
13801
- * TODO: [šŸ•Œ] When more than 2 functionalities, split into separate functions
13802
- */
13803
-
13804
14159
  /**
13805
14160
  * Prettyfies Promptbook string and adds Mermaid graph
13806
14161
  *
@@ -14361,64 +14716,6 @@ const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
14361
14716
  * TODO: [Ā®] DRY Register logic
14362
14717
  */
14363
14718
 
14364
- /**
14365
- * Detects if the code is running in a browser environment in main thread (Not in a web worker)
14366
- *
14367
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14368
- *
14369
- * @public exported from `@promptbook/utils`
14370
- */
14371
- const $isRunningInBrowser = new Function(`
14372
- try {
14373
- return this === window;
14374
- } catch (e) {
14375
- return false;
14376
- }
14377
- `);
14378
- /**
14379
- * TODO: [šŸŽŗ]
14380
- */
14381
-
14382
- /**
14383
- * Detects if the code is running in a Node.js environment
14384
- *
14385
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14386
- *
14387
- * @public exported from `@promptbook/utils`
14388
- */
14389
- const $isRunningInNode = new Function(`
14390
- try {
14391
- return this === global;
14392
- } catch (e) {
14393
- return false;
14394
- }
14395
- `);
14396
- /**
14397
- * TODO: [šŸŽŗ]
14398
- */
14399
-
14400
- /**
14401
- * Detects if the code is running in a web worker
14402
- *
14403
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
14404
- *
14405
- * @public exported from `@promptbook/utils`
14406
- */
14407
- const $isRunningInWebWorker = new Function(`
14408
- try {
14409
- if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
14410
- return true;
14411
- } else {
14412
- return false;
14413
- }
14414
- } catch (e) {
14415
- return false;
14416
- }
14417
- `);
14418
- /**
14419
- * TODO: [šŸŽŗ]
14420
- */
14421
-
14422
14719
  /**
14423
14720
  * Creates a message with all registered LLM tools
14424
14721
  *
@@ -14652,18 +14949,6 @@ class MemoryStorage {
14652
14949
  }
14653
14950
  }
14654
14951
 
14655
- /**
14656
- * Simple wrapper `new Date().toISOString()`
14657
- *
14658
- * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
14659
- *
14660
- * @returns string_date branded type
14661
- * @public exported from `@promptbook/utils`
14662
- */
14663
- function $getCurrentDate() {
14664
- return new Date().toISOString();
14665
- }
14666
-
14667
14952
  /**
14668
14953
  * Intercepts LLM tools and counts total usage of the tools
14669
14954
  *
@@ -15290,17 +15575,17 @@ const OPENAI_MODELS = exportJson({
15290
15575
  },
15291
15576
  /**/
15292
15577
  /*/
15293
- {
15294
- modelTitle: 'tts-1-hd-1106',
15295
- modelName: 'tts-1-hd-1106',
15296
- },
15297
- /**/
15578
+ {
15579
+ modelTitle: 'tts-1-hd-1106',
15580
+ modelName: 'tts-1-hd-1106',
15581
+ },
15582
+ /**/
15298
15583
  /*/
15299
- {
15300
- modelTitle: 'tts-1-hd',
15301
- modelName: 'tts-1-hd',
15302
- },
15303
- /**/
15584
+ {
15585
+ modelTitle: 'tts-1-hd',
15586
+ modelName: 'tts-1-hd',
15587
+ },
15588
+ /**/
15304
15589
  /**/
15305
15590
  {
15306
15591
  modelVariant: 'CHAT',
@@ -16486,11 +16771,12 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
16486
16771
  *
16487
16772
  * This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
16488
16773
  *
16489
- * !!! Note: [šŸ¦–] There are several different things in Promptbook:
16774
+ * Note: [šŸ¦–] There are several different things in Promptbook:
16490
16775
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16491
16776
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16492
16777
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16493
16778
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
16779
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
16494
16780
  *
16495
16781
  * @public exported from `@promptbook/openai`
16496
16782
  */
@@ -16525,6 +16811,12 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16525
16811
  * Calls OpenAI API to use a chat model.
16526
16812
  */
16527
16813
  async callChatModel(prompt) {
16814
+ return this.callChatModelStream(prompt, () => { });
16815
+ }
16816
+ /**
16817
+ * Calls OpenAI API to use a chat model with streaming.
16818
+ */
16819
+ async callChatModelStream(prompt, onProgress) {
16528
16820
  var _a, _b, _c;
16529
16821
  if (this.options.isVerbose) {
16530
16822
  console.info('šŸ’¬ OpenAI callChatModel call', { prompt });
@@ -16592,21 +16884,24 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16592
16884
  console.info('connect', stream.currentEvent);
16593
16885
  }
16594
16886
  });
16595
- /*
16596
- stream.on('messageDelta', (messageDelta) => {
16597
- if (
16598
- this.options.isVerbose &&
16599
- messageDelta &&
16600
- messageDelta.content &&
16601
- messageDelta.content[0] &&
16602
- messageDelta.content[0].type === 'text'
16603
- ) {
16604
- console.info('messageDelta', messageDelta.content[0].text?.value);
16887
+ stream.on('textDelta', (textDelta, snapshot) => {
16888
+ if (this.options.isVerbose && textDelta.value) {
16889
+ console.info('textDelta', textDelta.value);
16605
16890
  }
16606
-
16607
- // <- TODO: [🐚] Make streaming and running tasks working
16891
+ const chunk = {
16892
+ content: textDelta.value || '',
16893
+ modelName: 'assistant',
16894
+ timing: {
16895
+ start,
16896
+ complete: $getCurrentDate(),
16897
+ },
16898
+ usage: UNCERTAIN_USAGE,
16899
+ rawPromptContent,
16900
+ rawRequest,
16901
+ rawResponse: snapshot,
16902
+ };
16903
+ onProgress(chunk);
16608
16904
  });
16609
- */
16610
16905
  stream.on('messageCreated', (message) => {
16611
16906
  if (this.options.isVerbose) {
16612
16907
  console.info('messageCreated', message);
@@ -16642,7 +16937,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16642
16937
  }
16643
16938
  return exportJson({
16644
16939
  name: 'promptResult',
16645
- message: `Result of \`OpenAiAssistantExecutionTools.callChatModel\``,
16940
+ message: `Result of \`OpenAiAssistantExecutionTools.callChatModelStream\``,
16646
16941
  order: [],
16647
16942
  value: {
16648
16943
  content: resultContent,
@@ -16661,15 +16956,19 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16661
16956
  },
16662
16957
  });
16663
16958
  }
16664
- async playground() {
16959
+ /*
16960
+ public async playground() {
16665
16961
  const client = await this.getClient();
16962
+
16666
16963
  // List all assistants
16667
16964
  const assistants = await client.beta.assistants.list();
16668
16965
  console.log('!!! Assistants:', assistants);
16966
+
16669
16967
  // Get details of a specific assistant
16670
16968
  const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
16671
16969
  const assistant = await client.beta.assistants.retrieve(assistantId);
16672
16970
  console.log('!!! Assistant Details:', assistant);
16971
+
16673
16972
  // Update an assistant
16674
16973
  const updatedAssistant = await client.beta.assistants.update(assistantId, {
16675
16974
  name: assistant.name + '(M)',
@@ -16679,8 +16978,19 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16679
16978
  },
16680
16979
  });
16681
16980
  console.log('!!! Updated Assistant:', updatedAssistant);
16981
+
16682
16982
  await forEver();
16683
16983
  }
16984
+ */
16985
+ /**
16986
+ * Get an existing assistant tool wrapper
16987
+ */
16988
+ getAssistant(assistantId) {
16989
+ return new OpenAiAssistantExecutionTools({
16990
+ ...this.options,
16991
+ assistantId,
16992
+ });
16993
+ }
16684
16994
  async createNewAssistant(options) {
16685
16995
  if (!this.isCreatingNewAssistantsAllowed) {
16686
16996
  throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
@@ -16766,9 +17076,98 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16766
17076
  }
16767
17077
  const assistant = await client.beta.assistants.create(assistantConfig);
16768
17078
  console.log(`āœ… Assistant created: ${assistant.id}`);
16769
- // TODO: !!!! Try listing existing assistants
16770
- // TODO: !!!! Try marking existing assistants by DISCRIMINANT
16771
- // TODO: !!!! Allow to update and reconnect to existing assistants
17079
+ // TODO: [šŸ±ā€šŸš€] Try listing existing assistants
17080
+ // TODO: [šŸ±ā€šŸš€] Try marking existing assistants by DISCRIMINANT
17081
+ // TODO: [šŸ±ā€šŸš€] Allow to update and reconnect to existing assistants
17082
+ return new OpenAiAssistantExecutionTools({
17083
+ ...this.options,
17084
+ isCreatingNewAssistantsAllowed: false,
17085
+ assistantId: assistant.id,
17086
+ });
17087
+ }
17088
+ async updateAssistant(options) {
17089
+ if (!this.isCreatingNewAssistantsAllowed) {
17090
+ throw new NotAllowed(`Updating assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
17091
+ }
17092
+ const { assistantId, name, instructions, knowledgeSources } = options;
17093
+ const client = await this.getClient();
17094
+ let vectorStoreId;
17095
+ // If knowledge sources are provided, create a vector store with them
17096
+ // TODO: [🧠] Reuse vector store creation logic from createNewAssistant
17097
+ if (knowledgeSources && knowledgeSources.length > 0) {
17098
+ if (this.options.isVerbose) {
17099
+ console.info(`šŸ“š Creating vector store for update with ${knowledgeSources.length} knowledge sources...`);
17100
+ }
17101
+ // Create a vector store
17102
+ const vectorStore = await client.beta.vectorStores.create({
17103
+ name: `${name} Knowledge Base`,
17104
+ });
17105
+ vectorStoreId = vectorStore.id;
17106
+ if (this.options.isVerbose) {
17107
+ console.info(`āœ… Vector store created: ${vectorStoreId}`);
17108
+ }
17109
+ // Upload files from knowledge sources to the vector store
17110
+ const fileStreams = [];
17111
+ for (const source of knowledgeSources) {
17112
+ try {
17113
+ // Check if it's a URL
17114
+ if (source.startsWith('http://') || source.startsWith('https://')) {
17115
+ // Download the file
17116
+ const response = await fetch(source);
17117
+ if (!response.ok) {
17118
+ console.error(`Failed to download ${source}: ${response.statusText}`);
17119
+ continue;
17120
+ }
17121
+ const buffer = await response.arrayBuffer();
17122
+ const filename = source.split('/').pop() || 'downloaded-file';
17123
+ const blob = new Blob([buffer]);
17124
+ const file = new File([blob], filename);
17125
+ fileStreams.push(file);
17126
+ }
17127
+ else {
17128
+ // Assume it's a local file path
17129
+ // Note: This will work in Node.js environment
17130
+ // For browser environments, this would need different handling
17131
+ const fs = await import('fs');
17132
+ const fileStream = fs.createReadStream(source);
17133
+ fileStreams.push(fileStream);
17134
+ }
17135
+ }
17136
+ catch (error) {
17137
+ console.error(`Error processing knowledge source ${source}:`, error);
17138
+ }
17139
+ }
17140
+ // Batch upload files to the vector store
17141
+ if (fileStreams.length > 0) {
17142
+ try {
17143
+ await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
17144
+ files: fileStreams,
17145
+ });
17146
+ if (this.options.isVerbose) {
17147
+ console.info(`āœ… Uploaded ${fileStreams.length} files to vector store`);
17148
+ }
17149
+ }
17150
+ catch (error) {
17151
+ console.error('Error uploading files to vector store:', error);
17152
+ }
17153
+ }
17154
+ }
17155
+ const assistantUpdate = {
17156
+ name,
17157
+ instructions,
17158
+ tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
17159
+ };
17160
+ if (vectorStoreId) {
17161
+ assistantUpdate.tool_resources = {
17162
+ file_search: {
17163
+ vector_store_ids: [vectorStoreId],
17164
+ },
17165
+ };
17166
+ }
17167
+ const assistant = await client.beta.assistants.update(assistantId, assistantUpdate);
17168
+ if (this.options.isVerbose) {
17169
+ console.log(`āœ… Assistant updated: ${assistant.id}`);
17170
+ }
16772
17171
  return new OpenAiAssistantExecutionTools({
16773
17172
  ...this.options,
16774
17173
  isCreatingNewAssistantsAllowed: false,
@@ -16807,11 +17206,12 @@ const DISCRIMINANT = 'OPEN_AI_ASSISTANT_V1';
16807
17206
  * Execution Tools for calling LLM models with a predefined agent "soul"
16808
17207
  * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
16809
17208
  *
16810
- * !!! Note: [šŸ¦–] There are several different things in Promptbook:
17209
+ * Note: [šŸ¦–] There are several different things in Promptbook:
16811
17210
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16812
17211
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16813
17212
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16814
17213
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17214
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
16815
17215
  *
16816
17216
  * @public exported from `@promptbook/core`
16817
17217
  */
@@ -16905,9 +17305,12 @@ class AgentLlmExecutionTools {
16905
17305
  * Calls the chat model with agent-specific system prompt and requirements
16906
17306
  */
16907
17307
  async callChatModel(prompt) {
16908
- if (!this.options.llmTools.callChatModel) {
16909
- throw new Error('Underlying LLM execution tools do not support chat model calls');
16910
- }
17308
+ return this.callChatModelStream(prompt, () => { });
17309
+ }
17310
+ /**
17311
+ * Calls the chat model with agent-specific system prompt and requirements with streaming
17312
+ */
17313
+ async callChatModelStream(prompt, onProgress) {
16911
17314
  // Ensure we're working with a chat prompt
16912
17315
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
16913
17316
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
@@ -16916,27 +17319,58 @@ class AgentLlmExecutionTools {
16916
17319
  const chatPrompt = prompt;
16917
17320
  let underlyingLlmResult;
16918
17321
  if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
16919
- if (this.options.isVerbose) {
16920
- console.log(`Creating new OpenAI Assistant for agent ${this.title}...`);
17322
+ const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
17323
+ const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
17324
+ let assistant;
17325
+ if (cached) {
17326
+ if (cached.requirementsHash === requirementsHash) {
17327
+ if (this.options.isVerbose) {
17328
+ console.log(`1ļøāƒ£ Using cached OpenAI Assistant for agent ${this.title}...`);
17329
+ }
17330
+ assistant = this.options.llmTools.getAssistant(cached.assistantId);
17331
+ }
17332
+ else {
17333
+ if (this.options.isVerbose) {
17334
+ console.log(`1ļøāƒ£ Updating OpenAI Assistant for agent ${this.title}...`);
17335
+ }
17336
+ assistant = await this.options.llmTools.updateAssistant({
17337
+ assistantId: cached.assistantId,
17338
+ name: this.title,
17339
+ instructions: modelRequirements.systemMessage,
17340
+ knowledgeSources: modelRequirements.knowledgeSources,
17341
+ });
17342
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
17343
+ assistantId: assistant.assistantId,
17344
+ requirementsHash,
17345
+ });
17346
+ }
16921
17347
  }
16922
- // <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
16923
- const assistant = await this.options.llmTools.createNewAssistant({
16924
- name: this.title,
16925
- instructions: modelRequirements.systemMessage,
16926
- knowledgeSources: modelRequirements.knowledgeSources,
16927
- /*
16928
- !!!
16929
- metadata: {
16930
- agentModelName: this.modelName,
17348
+ else {
17349
+ if (this.options.isVerbose) {
17350
+ console.log(`1ļøāƒ£ Creating new OpenAI Assistant for agent ${this.title}...`);
16931
17351
  }
16932
- */
16933
- });
16934
- // <- TODO: !!! Cache the assistant in prepareCache
16935
- underlyingLlmResult = await assistant.callChatModel(chatPrompt);
17352
+ // <- TODO: [šŸ±ā€šŸš€] Check also `isCreatingNewAssistantsAllowed` and warn about it
17353
+ assistant = await this.options.llmTools.createNewAssistant({
17354
+ name: this.title,
17355
+ instructions: modelRequirements.systemMessage,
17356
+ knowledgeSources: modelRequirements.knowledgeSources,
17357
+ /*
17358
+ !!!
17359
+ metadata: {
17360
+ agentModelName: this.modelName,
17361
+ }
17362
+ */
17363
+ });
17364
+ AgentLlmExecutionTools.assistantCache.set(this.title, {
17365
+ assistantId: assistant.assistantId,
17366
+ requirementsHash,
17367
+ });
17368
+ }
17369
+ underlyingLlmResult = await assistant.callChatModelStream(chatPrompt, onProgress);
16936
17370
  }
16937
17371
  else {
16938
17372
  if (this.options.isVerbose) {
16939
- console.log(`Creating Assistant ${this.title} on generic LLM execution tools...`);
17373
+ console.log(`2ļøāƒ£ Creating Assistant ${this.title} on generic LLM execution tools...`);
16940
17374
  }
16941
17375
  // Create modified chat prompt with agent system message
16942
17376
  const modifiedChatPrompt = {
@@ -16951,7 +17385,16 @@ class AgentLlmExecutionTools {
16951
17385
  : ''),
16952
17386
  },
16953
17387
  };
16954
- underlyingLlmResult = await this.options.llmTools.callChatModel(modifiedChatPrompt);
17388
+ if (this.options.llmTools.callChatModelStream) {
17389
+ underlyingLlmResult = await this.options.llmTools.callChatModelStream(modifiedChatPrompt, onProgress);
17390
+ }
17391
+ else if (this.options.llmTools.callChatModel) {
17392
+ underlyingLlmResult = await this.options.llmTools.callChatModel(modifiedChatPrompt);
17393
+ onProgress(underlyingLlmResult);
17394
+ }
17395
+ else {
17396
+ throw new Error('Underlying LLM execution tools do not support chat model calls');
17397
+ }
16955
17398
  }
16956
17399
  let content = underlyingLlmResult.content;
16957
17400
  // Note: Cleanup the AI artifacts from the content
@@ -16966,6 +17409,10 @@ class AgentLlmExecutionTools {
16966
17409
  return agentResult;
16967
17410
  }
16968
17411
  }
17412
+ /**
17413
+ * Cache of OpenAI assistants to avoid creating duplicates
17414
+ */
17415
+ AgentLlmExecutionTools.assistantCache = new Map();
16969
17416
  /**
16970
17417
  * TODO: [šŸš] Implement Destroyable pattern to free resources
16971
17418
  * TODO: [🧠] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
@@ -16974,15 +17421,28 @@ class AgentLlmExecutionTools {
16974
17421
  /**
16975
17422
  * Represents one AI Agent
16976
17423
  *
16977
- * !!! Note: [šŸ¦–] There are several different things in Promptbook:
17424
+ * Note: [šŸ¦–] There are several different things in Promptbook:
16978
17425
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16979
17426
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16980
17427
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16981
17428
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17429
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
16982
17430
  *
16983
17431
  * @public exported from `@promptbook/core`
16984
17432
  */
16985
17433
  class Agent extends AgentLlmExecutionTools {
17434
+ /**
17435
+ * Name of the agent
17436
+ */
17437
+ get agentName() {
17438
+ return this._agentName || createDefaultAgentName(this.agentSource.value);
17439
+ }
17440
+ /**
17441
+ * Computed hash of the agent source for integrity verification
17442
+ */
17443
+ get agentHash() {
17444
+ return computeAgentHash(this.agentSource.value);
17445
+ }
16986
17446
  /**
16987
17447
  * Not used in Agent, always returns empty array
16988
17448
  */
@@ -16996,34 +17456,40 @@ class Agent extends AgentLlmExecutionTools {
16996
17456
  super({
16997
17457
  isVerbose: options.isVerbose,
16998
17458
  llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
16999
- agentSource: agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
17459
+ agentSource: agentSource.value, // <- TODO: [šŸ±ā€šŸš€] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
17000
17460
  });
17001
- /**
17002
- * Name of the agent
17003
- */
17004
- this.agentName = null;
17461
+ this._agentName = undefined;
17005
17462
  /**
17006
17463
  * Description of the agent
17007
17464
  */
17008
17465
  this.personaDescription = null;
17466
+ /**
17467
+ * The initial message shown to the user when the chat starts
17468
+ */
17469
+ this.initialMessage = null;
17470
+ /**
17471
+ * Links found in the agent source
17472
+ */
17473
+ this.links = [];
17009
17474
  /**
17010
17475
  * Metadata like image or color
17011
17476
  */
17012
17477
  this.meta = {};
17013
- // TODO: !!!! Add `Agent` simple "mocked" learning by appending to agent source
17014
- // TODO: !!!! Add `Agent` learning by promptbookAgent
17478
+ // TODO: [šŸ±ā€šŸš€] Add `Agent` simple "mocked" learning by appending to agent source
17479
+ // TODO: [šŸ±ā€šŸš€] Add `Agent` learning by promptbookAgent
17015
17480
  this.agentSource = agentSource;
17016
17481
  this.agentSource.subscribe((source) => {
17017
- const { agentName, personaDescription, meta } = parseAgentSource(source);
17018
- this.agentName = agentName;
17482
+ const { agentName, personaDescription, initialMessage, links, meta } = parseAgentSource(source);
17483
+ this._agentName = agentName;
17019
17484
  this.personaDescription = personaDescription;
17485
+ this.initialMessage = initialMessage;
17486
+ this.links = links;
17020
17487
  this.meta = { ...this.meta, ...meta };
17021
17488
  });
17022
17489
  }
17023
17490
  }
17024
17491
  /**
17025
17492
  * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
17026
- * TODO: !!! Agent on remote server
17027
17493
  */
17028
17494
 
17029
17495
  /**
@@ -17089,24 +17555,24 @@ const _AgentRegistration = $llmToolsRegister.register(createAgentLlmExecutionToo
17089
17555
  /**
17090
17556
  * Represents one AI Agent
17091
17557
  *
17092
- * !!! Note: [šŸ¦–] There are several different things in Promptbook:
17558
+ * Note: [šŸ¦–] There are several different things in Promptbook:
17093
17559
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17094
- * !!!! `RemoteAgent`
17095
17560
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
17096
17561
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17097
17562
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17563
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
17098
17564
  *
17099
17565
  * @public exported from `@promptbook/core`
17100
17566
  */
17101
17567
  class RemoteAgent extends Agent {
17102
17568
  static async connect(options) {
17103
- console.log('!!!!!', `${options.agentUrl}/api/book`);
17569
+ console.log('[šŸ±ā€šŸš€]', `${options.agentUrl}/api/book`);
17104
17570
  const bookResponse = await fetch(`${options.agentUrl}/api/book`);
17105
- // <- TODO: !!!! What about closed-source agents?
17106
- // <- TODO: !!!! Maybe use promptbookFetch
17571
+ // <- TODO: [šŸ±ā€šŸš€] What about closed-source agents?
17572
+ // <- TODO: [šŸ±ā€šŸš€] Maybe use promptbookFetch
17107
17573
  const agentSourceValue = (await bookResponse.text());
17108
17574
  const agentSource = new BehaviorSubject(agentSourceValue);
17109
- // <- TODO: !!!!!! Support updating
17575
+ // <- TODO: [šŸ±ā€šŸš€] Support updating and self-updating
17110
17576
  return new RemoteAgent({
17111
17577
  ...options,
17112
17578
  executionTools: {
@@ -17133,13 +17599,29 @@ class RemoteAgent extends Agent {
17133
17599
  * Calls the agent on agents remote server
17134
17600
  */
17135
17601
  async callChatModel(prompt) {
17602
+ return this.callChatModelStream(prompt, () => { });
17603
+ }
17604
+ /**
17605
+ * Calls the agent on agents remote server with streaming
17606
+ */
17607
+ async callChatModelStream(prompt, onProgress) {
17136
17608
  // Ensure we're working with a chat prompt
17137
17609
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
17138
17610
  throw new Error('Agents only supports chat prompts');
17139
17611
  }
17140
- const bookResponse = await fetch(`${this.agentUrl}/api/chat?message=${encodeURIComponent(prompt.content)}`);
17141
- // <- TODO: !!!! What about closed-source agents?
17142
- // <- TODO: !!!! Maybe use promptbookFetch
17612
+ const chatPrompt = prompt;
17613
+ const bookResponse = await fetch(`${this.agentUrl}/api/chat`, {
17614
+ method: 'POST',
17615
+ headers: {
17616
+ 'Content-Type': 'application/json',
17617
+ },
17618
+ body: JSON.stringify({
17619
+ message: prompt.content,
17620
+ thread: chatPrompt.thread,
17621
+ }),
17622
+ });
17623
+ // <- TODO: [šŸ±ā€šŸš€] What about closed-source agents?
17624
+ // <- TODO: [šŸ±ā€šŸš€] Maybe use promptbookFetch
17143
17625
  let content = '';
17144
17626
  if (!bookResponse.body) {
17145
17627
  content = await bookResponse.text();
@@ -17158,16 +17640,37 @@ class RemoteAgent extends Agent {
17158
17640
  const textChunk = decoder.decode(value, { stream: true });
17159
17641
  // console.debug('RemoteAgent chunk:', textChunk);
17160
17642
  content += textChunk;
17643
+ onProgress({
17644
+ content,
17645
+ modelName: this.modelName,
17646
+ timing: {},
17647
+ usage: {},
17648
+ rawPromptContent: {},
17649
+ rawRequest: {},
17650
+ rawResponse: {},
17651
+ });
17161
17652
  }
17162
17653
  }
17163
17654
  // Flush any remaining decoder internal state
17164
- content += decoder.decode();
17655
+ const lastChunk = decoder.decode();
17656
+ if (lastChunk) {
17657
+ content += lastChunk;
17658
+ onProgress({
17659
+ content: lastChunk,
17660
+ modelName: this.modelName,
17661
+ timing: {},
17662
+ usage: {},
17663
+ rawPromptContent: {},
17664
+ rawRequest: {},
17665
+ rawResponse: {},
17666
+ });
17667
+ }
17165
17668
  }
17166
17669
  finally {
17167
17670
  reader.releaseLock();
17168
17671
  }
17169
17672
  }
17170
- // <- TODO: !!!!!!!! Transfer metadata
17673
+ // <- TODO: [šŸ±ā€šŸš€] Transfer metadata
17171
17674
  const agentResult = {
17172
17675
  content,
17173
17676
  modelName: this.modelName,
@@ -17176,7 +17679,7 @@ class RemoteAgent extends Agent {
17176
17679
  rawPromptContent: {},
17177
17680
  rawRequest: {},
17178
17681
  rawResponse: {},
17179
- // <- TODO: !!!!!!!! Transfer and proxy the metadata
17682
+ // <- TODO: [šŸ±ā€šŸš€] Transfer and proxy the metadata
17180
17683
  };
17181
17684
  return agentResult;
17182
17685
  }
@@ -17307,24 +17810,6 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
17307
17810
  * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
17308
17811
  */
17309
17812
 
17310
- /**
17311
- * Detects if the code is running in jest environment
17312
- *
17313
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
17314
- *
17315
- * @public exported from `@promptbook/utils`
17316
- */
17317
- const $isRunningInJest = new Function(`
17318
- try {
17319
- return process.env.JEST_WORKER_ID !== undefined;
17320
- } catch (e) {
17321
- return false;
17322
- }
17323
- `);
17324
- /**
17325
- * TODO: [šŸŽŗ]
17326
- */
17327
-
17328
17813
  /**
17329
17814
  * Registration of LLM provider metadata
17330
17815
  *
@@ -17677,61 +18162,6 @@ function isValidPipelineString(pipelineString) {
17677
18162
  * TODO: [🧠][🈓] Where is the best location for this file
17678
18163
  */
17679
18164
 
17680
- /**
17681
- * Tag function for notating a prompt as template literal
17682
- *
17683
- * Note: There are 3 similar functions:
17684
- * 1) `prompt` for notating single prompt exported from `@promptbook/utils`
17685
- * 2) `promptTemplate` alias for `prompt`
17686
- * 3) `book` for notating and validating entire books exported from `@promptbook/utils`
17687
- *
17688
- * @param strings
17689
- * @param values
17690
- * @returns the prompt string
17691
- * @public exported from `@promptbook/utils`
17692
- */
17693
- function prompt(strings, ...values) {
17694
- if (values.length === 0) {
17695
- return spaceTrim$1(strings.join(''));
17696
- }
17697
- const stringsWithHiddenParameters = strings.map((stringsItem) =>
17698
- // TODO: [0] DRY
17699
- stringsItem.split('{').join(`${REPLACING_NONCE}beginbracket`).split('}').join(`${REPLACING_NONCE}endbracket`));
17700
- const placeholderParameterNames = values.map((value, i) => `${REPLACING_NONCE}${i}`);
17701
- const parameters = Object.fromEntries(values.map((value, i) => [placeholderParameterNames[i], value]));
17702
- // Combine strings and values
17703
- let pipelineString = stringsWithHiddenParameters.reduce((result, stringsItem, i) => placeholderParameterNames[i] === undefined
17704
- ? `${result}${stringsItem}`
17705
- : `${result}${stringsItem}{${placeholderParameterNames[i]}}`, '');
17706
- pipelineString = spaceTrim$1(pipelineString);
17707
- try {
17708
- pipelineString = templateParameters(pipelineString, parameters);
17709
- }
17710
- catch (error) {
17711
- if (!(error instanceof PipelineExecutionError)) {
17712
- throw error;
17713
- }
17714
- console.error({ pipelineString, parameters, placeholderParameterNames, error });
17715
- throw new UnexpectedError(spaceTrim$1((block) => `
17716
- Internal error in prompt template literal
17717
-
17718
- ${block(JSON.stringify({ strings, values }, null, 4))}}
17719
-
17720
- `));
17721
- }
17722
- // TODO: [0] DRY
17723
- pipelineString = pipelineString
17724
- .split(`${REPLACING_NONCE}beginbracket`)
17725
- .join('{')
17726
- .split(`${REPLACING_NONCE}endbracket`)
17727
- .join('}');
17728
- return pipelineString;
17729
- }
17730
- /**
17731
- * TODO: [🧠][🈓] Where is the best location for this file
17732
- * Note: [šŸ’ž] Ignore a discrepancy between file name and entity name
17733
- */
17734
-
17735
18165
  /**
17736
18166
  * Tag function for notating a pipeline with a book\`...\ notation as template literal
17737
18167
  *
@@ -18267,7 +18697,7 @@ const OpenAiSdkTranspiler = {
18267
18697
  });
18268
18698
 
18269
18699
  const answer = response.choices[0].message.content;
18270
- console.log('\\n🧠 ${agentName}:', answer, '\\n');
18700
+ console.log('\\n🧠 ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
18271
18701
 
18272
18702
  chatHistory.push({ role: 'assistant', content: answer });
18273
18703
  promptUser();
@@ -18286,7 +18716,7 @@ const OpenAiSdkTranspiler = {
18286
18716
 
18287
18717
  (async () => {
18288
18718
  await setupKnowledge();
18289
- console.log("šŸ¤– Chat with ${agentName} (type 'exit' to quit)\\n");
18719
+ console.log("šŸ¤– Chat with ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
18290
18720
  promptUser();
18291
18721
  })();
18292
18722
  `);
@@ -18333,7 +18763,7 @@ const OpenAiSdkTranspiler = {
18333
18763
  });
18334
18764
 
18335
18765
  const answer = response.choices[0].message.content;
18336
- console.log('\\n🧠 ${agentName}:', answer, '\\n');
18766
+ console.log('\\n🧠 ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */}:', answer, '\\n');
18337
18767
 
18338
18768
  chatHistory.push({ role: 'assistant', content: answer });
18339
18769
  promptUser();
@@ -18350,7 +18780,7 @@ const OpenAiSdkTranspiler = {
18350
18780
  });
18351
18781
  }
18352
18782
 
18353
- console.log("šŸ¤– Chat with ${agentName} (type 'exit' to quit)\\n");
18783
+ console.log("šŸ¤– Chat with ${agentName /* <- TODO: [šŸ•›] There should be `agentFullname` not `agentName` */} (type 'exit' to quit)\\n");
18354
18784
  promptUser();
18355
18785
 
18356
18786
  `);
@@ -18358,25 +18788,6 @@ const OpenAiSdkTranspiler = {
18358
18788
  },
18359
18789
  };
18360
18790
 
18361
- /**
18362
- * Returns information about the current runtime environment
18363
- *
18364
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environments
18365
- *
18366
- * @public exported from `@promptbook/utils`
18367
- */
18368
- function $detectRuntimeEnvironment() {
18369
- return {
18370
- isRunningInBrowser: $isRunningInBrowser(),
18371
- isRunningInJest: $isRunningInJest(),
18372
- isRunningInNode: $isRunningInNode(),
18373
- isRunningInWebWorker: $isRunningInWebWorker(),
18374
- };
18375
- }
18376
- /**
18377
- * TODO: [šŸŽŗ] Also detect and report node version here
18378
- */
18379
-
18380
18791
  /**
18381
18792
  * Provide information about Promptbook, engine version, book language version, servers, ...
18382
18793
  *
@@ -18555,7 +18966,7 @@ function $generateBookBoilerplate(options) {
18555
18966
  const agentSource = validateBook(spaceTrim$1((block) => `
18556
18967
  ${agentName}
18557
18968
 
18558
- META COLOR ${color || '#3498db' /* <- TODO: !!!! Best default color */}
18969
+ META COLOR ${color || '#3498db' /* <- TODO: [🧠] [šŸ±ā€šŸš€] Best default color */}
18559
18970
  PERSONA ${block(personaDescription)}
18560
18971
  `));
18561
18972
  return agentSource;
@@ -18564,5 +18975,5 @@ function $generateBookBoilerplate(options) {
18564
18975
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
18565
18976
  */
18566
18977
 
18567
- export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
18978
+ export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeAgentHash, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createDefaultAgentName, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, normalizeAgentName, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
18568
18979
  //# sourceMappingURL=index.es.js.map