@promptbook/core 0.103.0-48 → 0.103.0-49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/esm/index.es.js +527 -337
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/servers.d.ts +1 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +12 -2
  7. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +14 -8
  8. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabaseOptions.d.ts +10 -0
  9. package/esm/typings/src/commitments/MESSAGE/InitialMessageCommitmentDefinition.d.ts +28 -0
  10. package/esm/typings/src/commitments/index.d.ts +2 -1
  11. package/esm/typings/src/config.d.ts +1 -0
  12. package/esm/typings/src/errors/DatabaseError.d.ts +2 -2
  13. package/esm/typings/src/errors/WrappedError.d.ts +2 -2
  14. package/esm/typings/src/execution/ExecutionTask.d.ts +2 -2
  15. package/esm/typings/src/execution/LlmExecutionTools.d.ts +6 -1
  16. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +2 -2
  17. package/esm/typings/src/llm-providers/agent/Agent.d.ts +11 -3
  18. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +6 -1
  19. package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +6 -2
  20. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +6 -1
  21. package/esm/typings/src/remote-server/startAgentServer.d.ts +2 -2
  22. package/esm/typings/src/utils/color/Color.d.ts +7 -0
  23. package/esm/typings/src/utils/color/Color.test.d.ts +1 -0
  24. package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +2 -2
  25. package/esm/typings/src/utils/misc/computeHash.d.ts +11 -0
  26. package/esm/typings/src/utils/misc/computeHash.test.d.ts +1 -0
  27. package/esm/typings/src/utils/organization/$sideEffect.d.ts +2 -2
  28. package/esm/typings/src/utils/organization/$side_effect.d.ts +2 -2
  29. package/esm/typings/src/utils/organization/TODO_USE.d.ts +2 -2
  30. package/esm/typings/src/utils/organization/keepUnused.d.ts +2 -2
  31. package/esm/typings/src/utils/organization/preserve.d.ts +3 -3
  32. package/esm/typings/src/utils/organization/really_any.d.ts +7 -0
  33. package/esm/typings/src/utils/serialization/asSerializable.d.ts +2 -2
  34. package/esm/typings/src/version.d.ts +1 -1
  35. package/package.json +1 -1
  36. package/umd/index.umd.js +527 -337
  37. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -28,140 +28,12 @@
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-48';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-49';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
35
35
  */
36
36
 
37
- /**
38
- * Computes SHA-256 hash of the agent source
39
- *
40
- * @public exported from `@promptbook/core`
41
- */
42
- function computeAgentHash(agentSource) {
43
- return cryptoJs.SHA256(hexEncoder__default["default"].parse(agentSource /* <- TODO: !!!!! spaceTrim */)).toString( /* hex */);
44
- }
45
-
46
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
47
-
48
- /**
49
- * Checks if value is valid email
50
- *
51
- * @public exported from `@promptbook/utils`
52
- */
53
- function isValidEmail(email) {
54
- if (typeof email !== 'string') {
55
- return false;
56
- }
57
- if (email.split('\n').length > 1) {
58
- return false;
59
- }
60
- return /^.+@.+\..+$/.test(email);
61
- }
62
-
63
- /**
64
- * Tests if given string is valid file path.
65
- *
66
- * Note: This does not check if the file exists only if the path is valid
67
- * @public exported from `@promptbook/utils`
68
- */
69
- function isValidFilePath(filename) {
70
- if (typeof filename !== 'string') {
71
- return false;
72
- }
73
- if (filename.split('\n').length > 1) {
74
- return false;
75
- }
76
- // Normalize slashes early so heuristics can detect path-like inputs
77
- const filenameSlashes = filename.replace(/\\/g, '/');
78
- // Reject strings that look like sentences (informational text)
79
- // Heuristic: contains multiple spaces and ends with a period, or contains typical sentence punctuation
80
- // But skip this heuristic if the string looks like a path (contains '/' or starts with a drive letter)
81
- if (filename.trim().length > 60 && // long enough to be a sentence
82
- /[.!?]/.test(filename) && // contains sentence punctuation
83
- filename.split(' ').length > 8 && // has many words
84
- !/\/|^[A-Z]:/i.test(filenameSlashes) // do NOT treat as sentence if looks like a path
85
- ) {
86
- return false;
87
- }
88
- // Absolute Unix path: /hello.txt
89
- if (/^(\/)/i.test(filenameSlashes)) {
90
- // console.log(filename, 'Absolute Unix path: /hello.txt');
91
- return true;
92
- }
93
- // Absolute Windows path: C:/ or C:\ (allow spaces and multiple dots in filename)
94
- if (/^[A-Z]:\/.+$/i.test(filenameSlashes)) {
95
- // console.log(filename, 'Absolute Windows path: /hello.txt');
96
- return true;
97
- }
98
- // Relative path: ./hello.txt
99
- if (/^(\.\.?\/)+/i.test(filenameSlashes)) {
100
- // console.log(filename, 'Relative path: ./hello.txt');
101
- return true;
102
- }
103
- // Allow paths like foo/hello
104
- if (/^[^/]+\/[^/]+/i.test(filenameSlashes)) {
105
- // console.log(filename, 'Allow paths like foo/hello');
106
- return true;
107
- }
108
- // Allow paths like hello.book
109
- if (/^[^/]+\.[^/]+$/i.test(filenameSlashes)) {
110
- // console.log(filename, 'Allow paths like hello.book');
111
- return true;
112
- }
113
- return false;
114
- }
115
- /**
116
- * TODO: [🍏] Implement for MacOs
117
- */
118
-
119
- /**
120
- * Tests if given string is valid URL.
121
- *
122
- * Note: [🔂] This function is idempotent.
123
- * Note: Dataurl are considered perfectly valid.
124
- * Note: There are two similar functions:
125
- * - `isValidUrl` which tests any URL
126
- * - `isValidPipelineUrl` *(this one)* which tests just promptbook URL
127
- *
128
- * @public exported from `@promptbook/utils`
129
- */
130
- function isValidUrl(url) {
131
- if (typeof url !== 'string') {
132
- return false;
133
- }
134
- try {
135
- if (url.startsWith('blob:')) {
136
- url = url.replace(/^blob:/, '');
137
- }
138
- const urlObject = new URL(url /* because fail is handled */);
139
- if (!['http:', 'https:', 'data:'].includes(urlObject.protocol)) {
140
- return false;
141
- }
142
- return true;
143
- }
144
- catch (error) {
145
- return false;
146
- }
147
- }
148
-
149
- /**
150
- * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
151
- *
152
- * @public exported from `@promptbook/core`
153
- */
154
- class ParseError extends Error {
155
- constructor(message) {
156
- super(message);
157
- this.name = 'ParseError';
158
- Object.setPrototypeOf(this, ParseError.prototype);
159
- }
160
- }
161
- /**
162
- * TODO: Maybe split `ParseError` and `ApplyError`
163
- */
164
-
165
37
  /**
166
38
  * Available remote servers for the Promptbook
167
39
  *
@@ -195,6 +67,7 @@
195
67
  */
196
68
  ];
197
69
  /**
70
+ * TODO: [🐱‍🚀] Auto-federated server from url in here
198
71
  * Note: [💞] Ignore a discrepancy between file name and entity name
199
72
  */
200
73
 
@@ -528,6 +401,9 @@
528
401
  if (hex.length === 3) {
529
402
  return Color.fromHex3(hex);
530
403
  }
404
+ if (hex.length === 4) {
405
+ return Color.fromHex4(hex);
406
+ }
531
407
  if (hex.length === 6) {
532
408
  return Color.fromHex6(hex);
533
409
  }
@@ -548,6 +424,19 @@
548
424
  const b = parseInt(hex.substr(2, 1), 16) * 16;
549
425
  return take(new Color(r, g, b));
550
426
  }
427
+ /**
428
+ * Creates a new Color instance from color in hex format with 4 digits (with alpha channel)
429
+ *
430
+ * @param color in hex for example `09df`
431
+ * @returns Color object
432
+ */
433
+ static fromHex4(hex) {
434
+ const r = parseInt(hex.substr(0, 1), 16) * 16;
435
+ const g = parseInt(hex.substr(1, 1), 16) * 16;
436
+ const b = parseInt(hex.substr(2, 1), 16) * 16;
437
+ const a = parseInt(hex.substr(3, 1), 16) * 16;
438
+ return take(new Color(r, g, b, a));
439
+ }
551
440
  /**
552
441
  * Creates a new Color instance from color in hex format with 6 color digits (without alpha channel)
553
442
  *
@@ -738,7 +627,8 @@
738
627
  * @returns true if the value is a valid hex color string (e.g., `#009edd`, `#fff`, etc.)
739
628
  */
740
629
  static isHexColorString(value) {
741
- return typeof value === 'string' && /^#(?:[0-9a-fA-F]{3}){1,2}$/.test(value);
630
+ return (typeof value === 'string' &&
631
+ /^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$/.test(value));
742
632
  }
743
633
  /**
744
634
  * Creates new Color object
@@ -1079,6 +969,7 @@
1079
969
  const PROMPTBOOK_SYNTAX_COLORS = {
1080
970
  TITLE: Color.fromHex('#244EA8'),
1081
971
  LINE: Color.fromHex('#eeeeee'),
972
+ SEPARATOR: Color.fromHex('#cccccc'),
1082
973
  COMMITMENT: Color.fromHex('#DA0F78'),
1083
974
  PARAMETER: Color.fromHex('#8e44ad'),
1084
975
  };
@@ -1457,76 +1348,297 @@
1457
1348
  }
1458
1349
 
1459
1350
  /**
1460
- * This error type indicates that the error should not happen and its last check before crashing with some other error
1351
+ * This error type indicates that the error should not happen and its last check before crashing with some other error
1352
+ *
1353
+ * @public exported from `@promptbook/core`
1354
+ */
1355
+ class UnexpectedError extends Error {
1356
+ constructor(message) {
1357
+ super(spaceTrim$1.spaceTrim((block) => `
1358
+ ${block(message)}
1359
+
1360
+ Note: This error should not happen.
1361
+ It's probably a bug in the pipeline collection
1362
+
1363
+ Please report issue:
1364
+ ${block(getErrorReportUrl(new Error(message)).href)}
1365
+
1366
+ Or contact us on ${ADMIN_EMAIL}
1367
+
1368
+ `));
1369
+ this.name = 'UnexpectedError';
1370
+ Object.setPrototypeOf(this, UnexpectedError.prototype);
1371
+ }
1372
+ }
1373
+
1374
+ /**
1375
+ * This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
1376
+ *
1377
+ * @public exported from `@promptbook/core`
1378
+ */
1379
+ class WrappedError extends Error {
1380
+ constructor(whatWasThrown) {
1381
+ const tag = `[🤮]`;
1382
+ console.error(tag, whatWasThrown);
1383
+ super(spaceTrim$1.spaceTrim(`
1384
+ Non-Error object was thrown
1385
+
1386
+ Note: Look for ${tag} in the console for more details
1387
+ Please report issue on ${ADMIN_EMAIL}
1388
+ `));
1389
+ this.name = 'WrappedError';
1390
+ Object.setPrototypeOf(this, WrappedError.prototype);
1391
+ }
1392
+ }
1393
+
1394
+ /**
1395
+ * Helper used in catch blocks to assert that the error is an instance of `Error`
1396
+ *
1397
+ * @param whatWasThrown Any object that was thrown
1398
+ * @returns Nothing if the error is an instance of `Error`
1399
+ * @throws `WrappedError` or `UnexpectedError` if the error is not standard
1400
+ *
1401
+ * @private within the repository
1402
+ */
1403
+ function assertsError(whatWasThrown) {
1404
+ // Case 1: Handle error which was rethrown as `WrappedError`
1405
+ if (whatWasThrown instanceof WrappedError) {
1406
+ const wrappedError = whatWasThrown;
1407
+ throw wrappedError;
1408
+ }
1409
+ // Case 2: Handle unexpected errors
1410
+ if (whatWasThrown instanceof UnexpectedError) {
1411
+ const unexpectedError = whatWasThrown;
1412
+ throw unexpectedError;
1413
+ }
1414
+ // Case 3: Handle standard errors - keep them up to consumer
1415
+ if (whatWasThrown instanceof Error) {
1416
+ return;
1417
+ }
1418
+ // Case 4: Handle non-standard errors - wrap them into `WrappedError` and throw
1419
+ throw new WrappedError(whatWasThrown);
1420
+ }
1421
+
1422
+ /**
1423
+ * Format either small or big number
1424
+ *
1425
+ * @public exported from `@promptbook/utils`
1426
+ */
1427
+ function numberToString(value) {
1428
+ if (value === 0) {
1429
+ return '0';
1430
+ }
1431
+ else if (Number.isNaN(value)) {
1432
+ return VALUE_STRINGS.nan;
1433
+ }
1434
+ else if (value === Infinity) {
1435
+ return VALUE_STRINGS.infinity;
1436
+ }
1437
+ else if (value === -Infinity) {
1438
+ return VALUE_STRINGS.negativeInfinity;
1439
+ }
1440
+ for (let exponent = 0; exponent < 15; exponent++) {
1441
+ const factor = 10 ** exponent;
1442
+ const valueRounded = Math.round(value * factor) / factor;
1443
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
1444
+ return valueRounded.toFixed(exponent);
1445
+ }
1446
+ }
1447
+ return value.toString();
1448
+ }
1449
+
1450
+ /**
1451
+ * Function `valueToString` will convert the given value to string
1452
+ * This is useful and used in the `templateParameters` function
1453
+ *
1454
+ * Note: This function is not just calling `toString` method
1455
+ * It's more complex and can handle this conversion specifically for LLM models
1456
+ * See `VALUE_STRINGS`
1457
+ *
1458
+ * Note: There are 2 similar functions
1459
+ * - `valueToString` converts value to string for LLM models as human-readable string
1460
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
1461
+ *
1462
+ * @public exported from `@promptbook/utils`
1463
+ */
1464
+ function valueToString(value) {
1465
+ try {
1466
+ if (value === '') {
1467
+ return VALUE_STRINGS.empty;
1468
+ }
1469
+ else if (value === null) {
1470
+ return VALUE_STRINGS.null;
1471
+ }
1472
+ else if (value === undefined) {
1473
+ return VALUE_STRINGS.undefined;
1474
+ }
1475
+ else if (typeof value === 'string') {
1476
+ return value;
1477
+ }
1478
+ else if (typeof value === 'number') {
1479
+ return numberToString(value);
1480
+ }
1481
+ else if (value instanceof Date) {
1482
+ return value.toISOString();
1483
+ }
1484
+ else {
1485
+ try {
1486
+ return JSON.stringify(value);
1487
+ }
1488
+ catch (error) {
1489
+ if (error instanceof TypeError && error.message.includes('circular structure')) {
1490
+ return VALUE_STRINGS.circular;
1491
+ }
1492
+ throw error;
1493
+ }
1494
+ }
1495
+ }
1496
+ catch (error) {
1497
+ assertsError(error);
1498
+ console.error(error);
1499
+ return VALUE_STRINGS.unserializable;
1500
+ }
1501
+ }
1502
+
1503
+ /**
1504
+ * Computes SHA-256 hash of the given object
1505
+ *
1506
+ * @public exported from `@promptbook/utils`
1507
+ */
1508
+ function computeHash(value) {
1509
+ return cryptoJs.SHA256(hexEncoder__default["default"].parse(spaceTrim__default["default"](valueToString(value)))).toString( /* hex */);
1510
+ }
1511
+ /**
1512
+ * TODO: [🥬][🥬] Use this ACRY
1513
+ */
1514
+
1515
+ /**
1516
+ * Computes SHA-256 hash of the agent source
1517
+ *
1518
+ * @public exported from `@promptbook/core`
1519
+ */
1520
+ function computeAgentHash(agentSource) {
1521
+ return computeHash(agentSource);
1522
+ }
1523
+
1524
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1525
+
1526
+ /**
1527
+ * Checks if value is valid email
1528
+ *
1529
+ * @public exported from `@promptbook/utils`
1530
+ */
1531
+ function isValidEmail(email) {
1532
+ if (typeof email !== 'string') {
1533
+ return false;
1534
+ }
1535
+ if (email.split('\n').length > 1) {
1536
+ return false;
1537
+ }
1538
+ return /^.+@.+\..+$/.test(email);
1539
+ }
1540
+
1541
+ /**
1542
+ * Tests if given string is valid file path.
1543
+ *
1544
+ * Note: This does not check if the file exists only if the path is valid
1545
+ * @public exported from `@promptbook/utils`
1546
+ */
1547
+ function isValidFilePath(filename) {
1548
+ if (typeof filename !== 'string') {
1549
+ return false;
1550
+ }
1551
+ if (filename.split('\n').length > 1) {
1552
+ return false;
1553
+ }
1554
+ // Normalize slashes early so heuristics can detect path-like inputs
1555
+ const filenameSlashes = filename.replace(/\\/g, '/');
1556
+ // Reject strings that look like sentences (informational text)
1557
+ // Heuristic: contains multiple spaces and ends with a period, or contains typical sentence punctuation
1558
+ // But skip this heuristic if the string looks like a path (contains '/' or starts with a drive letter)
1559
+ if (filename.trim().length > 60 && // long enough to be a sentence
1560
+ /[.!?]/.test(filename) && // contains sentence punctuation
1561
+ filename.split(' ').length > 8 && // has many words
1562
+ !/\/|^[A-Z]:/i.test(filenameSlashes) // do NOT treat as sentence if looks like a path
1563
+ ) {
1564
+ return false;
1565
+ }
1566
+ // Absolute Unix path: /hello.txt
1567
+ if (/^(\/)/i.test(filenameSlashes)) {
1568
+ // console.log(filename, 'Absolute Unix path: /hello.txt');
1569
+ return true;
1570
+ }
1571
+ // Absolute Windows path: C:/ or C:\ (allow spaces and multiple dots in filename)
1572
+ if (/^[A-Z]:\/.+$/i.test(filenameSlashes)) {
1573
+ // console.log(filename, 'Absolute Windows path: /hello.txt');
1574
+ return true;
1575
+ }
1576
+ // Relative path: ./hello.txt
1577
+ if (/^(\.\.?\/)+/i.test(filenameSlashes)) {
1578
+ // console.log(filename, 'Relative path: ./hello.txt');
1579
+ return true;
1580
+ }
1581
+ // Allow paths like foo/hello
1582
+ if (/^[^/]+\/[^/]+/i.test(filenameSlashes)) {
1583
+ // console.log(filename, 'Allow paths like foo/hello');
1584
+ return true;
1585
+ }
1586
+ // Allow paths like hello.book
1587
+ if (/^[^/]+\.[^/]+$/i.test(filenameSlashes)) {
1588
+ // console.log(filename, 'Allow paths like hello.book');
1589
+ return true;
1590
+ }
1591
+ return false;
1592
+ }
1593
+ /**
1594
+ * TODO: [🍏] Implement for MacOs
1595
+ */
1596
+
1597
+ /**
1598
+ * Tests if given string is valid URL.
1461
1599
  *
1462
- * @public exported from `@promptbook/core`
1600
+ * Note: [🔂] This function is idempotent.
1601
+ * Note: Dataurl are considered perfectly valid.
1602
+ * Note: There are two similar functions:
1603
+ * - `isValidUrl` which tests any URL
1604
+ * - `isValidPipelineUrl` *(this one)* which tests just promptbook URL
1605
+ *
1606
+ * @public exported from `@promptbook/utils`
1463
1607
  */
1464
- class UnexpectedError extends Error {
1465
- constructor(message) {
1466
- super(spaceTrim$1.spaceTrim((block) => `
1467
- ${block(message)}
1468
-
1469
- Note: This error should not happen.
1470
- It's probably a bug in the pipeline collection
1471
-
1472
- Please report issue:
1473
- ${block(getErrorReportUrl(new Error(message)).href)}
1474
-
1475
- Or contact us on ${ADMIN_EMAIL}
1476
-
1477
- `));
1478
- this.name = 'UnexpectedError';
1479
- Object.setPrototypeOf(this, UnexpectedError.prototype);
1608
+ function isValidUrl(url) {
1609
+ if (typeof url !== 'string') {
1610
+ return false;
1611
+ }
1612
+ try {
1613
+ if (url.startsWith('blob:')) {
1614
+ url = url.replace(/^blob:/, '');
1615
+ }
1616
+ const urlObject = new URL(url /* because fail is handled */);
1617
+ if (!['http:', 'https:', 'data:'].includes(urlObject.protocol)) {
1618
+ return false;
1619
+ }
1620
+ return true;
1621
+ }
1622
+ catch (error) {
1623
+ return false;
1480
1624
  }
1481
1625
  }
1482
1626
 
1483
1627
  /**
1484
- * This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
1628
+ * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
1485
1629
  *
1486
1630
  * @public exported from `@promptbook/core`
1487
1631
  */
1488
- class WrappedError extends Error {
1489
- constructor(whatWasThrown) {
1490
- const tag = `[🤮]`;
1491
- console.error(tag, whatWasThrown);
1492
- super(spaceTrim$1.spaceTrim(`
1493
- Non-Error object was thrown
1494
-
1495
- Note: Look for ${tag} in the console for more details
1496
- Please report issue on ${ADMIN_EMAIL}
1497
- `));
1498
- this.name = 'WrappedError';
1499
- Object.setPrototypeOf(this, WrappedError.prototype);
1632
+ class ParseError extends Error {
1633
+ constructor(message) {
1634
+ super(message);
1635
+ this.name = 'ParseError';
1636
+ Object.setPrototypeOf(this, ParseError.prototype);
1500
1637
  }
1501
1638
  }
1502
-
1503
1639
  /**
1504
- * Helper used in catch blocks to assert that the error is an instance of `Error`
1505
- *
1506
- * @param whatWasThrown Any object that was thrown
1507
- * @returns Nothing if the error is an instance of `Error`
1508
- * @throws `WrappedError` or `UnexpectedError` if the error is not standard
1509
- *
1510
- * @private within the repository
1640
+ * TODO: Maybe split `ParseError` and `ApplyError`
1511
1641
  */
1512
- function assertsError(whatWasThrown) {
1513
- // Case 1: Handle error which was rethrown as `WrappedError`
1514
- if (whatWasThrown instanceof WrappedError) {
1515
- const wrappedError = whatWasThrown;
1516
- throw wrappedError;
1517
- }
1518
- // Case 2: Handle unexpected errors
1519
- if (whatWasThrown instanceof UnexpectedError) {
1520
- const unexpectedError = whatWasThrown;
1521
- throw unexpectedError;
1522
- }
1523
- // Case 3: Handle standard errors - keep them up to consumer
1524
- if (whatWasThrown instanceof Error) {
1525
- return;
1526
- }
1527
- // Case 4: Handle non-standard errors - wrap them into `WrappedError` and throw
1528
- throw new WrappedError(whatWasThrown);
1529
- }
1530
1642
 
1531
1643
  /**
1532
1644
  * Function isValidJsonString will tell you if the string is valid JSON or not
@@ -1940,7 +2052,7 @@
1940
2052
  TODO: [🧠] Is there a better implementation?
1941
2053
  > const propertyNames = Object.getOwnPropertyNames(objectValue);
1942
2054
  > for (const propertyName of propertyNames) {
1943
- > const value = (objectValue as really_any)[propertyName];
2055
+ > const value = (objectValue as chococake)[propertyName];
1944
2056
  > if (value && typeof value === 'object') {
1945
2057
  > deepClone(value);
1946
2058
  > }
@@ -2793,7 +2905,7 @@
2793
2905
  }
2794
2906
  }
2795
2907
  /**
2796
- * TODO: !!!! Explain that NotFoundError (!!! and other specific errors) has priority over DatabaseError in some contexts
2908
+ * TODO: [🐱‍🚀] Explain that NotFoundError ([🐱‍🚀] and other specific errors) has priority over DatabaseError in some contexts
2797
2909
  */
2798
2910
 
2799
2911
  /**
@@ -5062,87 +5174,6 @@
5062
5174
  * @see https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/increase-consistency#specify-the-desired-output-format
5063
5175
  */
5064
5176
 
5065
- /**
5066
- * Format either small or big number
5067
- *
5068
- * @public exported from `@promptbook/utils`
5069
- */
5070
- function numberToString(value) {
5071
- if (value === 0) {
5072
- return '0';
5073
- }
5074
- else if (Number.isNaN(value)) {
5075
- return VALUE_STRINGS.nan;
5076
- }
5077
- else if (value === Infinity) {
5078
- return VALUE_STRINGS.infinity;
5079
- }
5080
- else if (value === -Infinity) {
5081
- return VALUE_STRINGS.negativeInfinity;
5082
- }
5083
- for (let exponent = 0; exponent < 15; exponent++) {
5084
- const factor = 10 ** exponent;
5085
- const valueRounded = Math.round(value * factor) / factor;
5086
- if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
5087
- return valueRounded.toFixed(exponent);
5088
- }
5089
- }
5090
- return value.toString();
5091
- }
5092
-
5093
- /**
5094
- * Function `valueToString` will convert the given value to string
5095
- * This is useful and used in the `templateParameters` function
5096
- *
5097
- * Note: This function is not just calling `toString` method
5098
- * It's more complex and can handle this conversion specifically for LLM models
5099
- * See `VALUE_STRINGS`
5100
- *
5101
- * Note: There are 2 similar functions
5102
- * - `valueToString` converts value to string for LLM models as human-readable string
5103
- * - `asSerializable` converts value to string to preserve full information to be able to convert it back
5104
- *
5105
- * @public exported from `@promptbook/utils`
5106
- */
5107
- function valueToString(value) {
5108
- try {
5109
- if (value === '') {
5110
- return VALUE_STRINGS.empty;
5111
- }
5112
- else if (value === null) {
5113
- return VALUE_STRINGS.null;
5114
- }
5115
- else if (value === undefined) {
5116
- return VALUE_STRINGS.undefined;
5117
- }
5118
- else if (typeof value === 'string') {
5119
- return value;
5120
- }
5121
- else if (typeof value === 'number') {
5122
- return numberToString(value);
5123
- }
5124
- else if (value instanceof Date) {
5125
- return value.toISOString();
5126
- }
5127
- else {
5128
- try {
5129
- return JSON.stringify(value);
5130
- }
5131
- catch (error) {
5132
- if (error instanceof TypeError && error.message.includes('circular structure')) {
5133
- return VALUE_STRINGS.circular;
5134
- }
5135
- throw error;
5136
- }
5137
- }
5138
- }
5139
- catch (error) {
5140
- assertsError(error);
5141
- console.error(error);
5142
- return VALUE_STRINGS.unserializable;
5143
- }
5144
- }
5145
-
5146
5177
  /**
5147
5178
  * Parses the given script and returns the list of all used variables that are not defined in the script
5148
5179
  *
@@ -8069,6 +8100,60 @@
8069
8100
  * Note: [💞] Ignore a discrepancy between file name and entity name
8070
8101
  */
8071
8102
 
8103
+ /**
8104
+ * INITIAL MESSAGE commitment definition
8105
+ *
8106
+ * The INITIAL MESSAGE commitment defines the first message that the user sees when opening the chat.
8107
+ * It is used to greet the user and set the tone of the conversation.
8108
+ *
8109
+ * Example usage in agent source:
8110
+ *
8111
+ * ```book
8112
+ * INITIAL MESSAGE Hello! I am ready to help you with your tasks.
8113
+ * ```
8114
+ *
8115
+ * @private [🪔] Maybe export the commitments through some package
8116
+ */
8117
+ class InitialMessageCommitmentDefinition extends BaseCommitmentDefinition {
8118
+ constructor() {
8119
+ super('INITIAL MESSAGE');
8120
+ }
8121
+ /**
8122
+ * Short one-line description of INITIAL MESSAGE.
8123
+ */
8124
+ get description() {
8125
+ return 'Defines the **initial message** shown to the user when the chat starts.';
8126
+ }
8127
+ /**
8128
+ * Markdown documentation for INITIAL MESSAGE commitment.
8129
+ */
8130
+ get documentation() {
8131
+ return spaceTrim$1.spaceTrim(`
8132
+ # ${this.type}
8133
+
8134
+ Defines the first message that the user sees when opening the chat. This message is purely for display purposes in the UI and does not inherently become part of the LLM's system prompt context (unless also included via other means).
8135
+
8136
+ ## Key aspects
8137
+
8138
+ - Used to greet the user.
8139
+ - Sets the tone of the conversation.
8140
+ - Displayed immediately when the chat interface loads.
8141
+
8142
+ ## Examples
8143
+
8144
+ \`\`\`book
8145
+ Support Agent
8146
+
8147
+ PERSONA You are a helpful support agent.
8148
+ INITIAL MESSAGE Hi there! How can I assist you today?
8149
+ \`\`\`
8150
+ `);
8151
+ }
8152
+ applyToAgentModelRequirements(requirements, content) {
8153
+ return requirements;
8154
+ }
8155
+ }
8156
+
8072
8157
  /**
8073
8158
  * MESSAGE commitment definition
8074
8159
  *
@@ -9230,6 +9315,7 @@
9230
9315
  new NoteCommitmentDefinition('NONCE'),
9231
9316
  new GoalCommitmentDefinition('GOAL'),
9232
9317
  new GoalCommitmentDefinition('GOALS'),
9318
+ new InitialMessageCommitmentDefinition(),
9233
9319
  new MessageCommitmentDefinition('MESSAGE'),
9234
9320
  new MessageCommitmentDefinition('MESSAGES'),
9235
9321
  new ScenarioCommitmentDefinition('SCENARIO'),
@@ -10118,13 +10204,31 @@
10118
10204
  }
10119
10205
  personaDescription += commitment.content;
10120
10206
  }
10207
+ let initialMessage = null;
10208
+ for (const commitment of parseResult.commitments) {
10209
+ if (commitment.type !== 'INITIAL MESSAGE') {
10210
+ continue;
10211
+ }
10212
+ // Note: Initial message override logic - later overrides earlier
10213
+ // Or should it append? Usually initial message is just one block.
10214
+ // Let's stick to "later overrides earlier" for simplicity, or just take the last one.
10215
+ initialMessage = commitment.content;
10216
+ }
10121
10217
  const meta = {};
10218
+ const links = [];
10122
10219
  for (const commitment of parseResult.commitments) {
10220
+ if (commitment.type === 'META LINK') {
10221
+ links.push(spaceTrim__default["default"](commitment.content));
10222
+ continue;
10223
+ }
10123
10224
  if (commitment.type !== 'META') {
10124
10225
  continue;
10125
10226
  }
10126
10227
  // Parse META commitments - format is "META TYPE content"
10127
10228
  const metaTypeRaw = commitment.content.split(' ')[0] || 'NONE';
10229
+ if (metaTypeRaw === 'LINK') {
10230
+ links.push(spaceTrim__default["default"](commitment.content.substring(metaTypeRaw.length)));
10231
+ }
10128
10232
  const metaType = normalizeTo_camelCase(metaTypeRaw);
10129
10233
  meta[metaType] = spaceTrim__default["default"](commitment.content.substring(metaTypeRaw.length));
10130
10234
  }
@@ -10140,7 +10244,9 @@
10140
10244
  agentName: normalizeAgentName(parseResult.agentName || createDefaultAgentName(agentSource)),
10141
10245
  agentHash,
10142
10246
  personaDescription,
10247
+ initialMessage,
10143
10248
  meta,
10249
+ links,
10144
10250
  parameters,
10145
10251
  };
10146
10252
  }
@@ -10311,26 +10417,28 @@
10311
10417
  PERSONA A friendly AI assistant that helps you with your tasks
10312
10418
  `)));
10313
10419
  // <- Note: Not using book`...` notation to avoid strange error in jest unit tests `TypeError: (0 , book_notation_1.book) is not a function`
10314
- // <- TODO: !!! `GENESIS_BOOK` / `ADAM_BOOK` in `/agents/adam.book`
10315
- // <- !!! Buttons into genesis book
10316
- // <- TODO: !!! generateBookBoilerplate and deprecate `DEFAULT_BOOK`
10420
+ // <- TODO: [🐱‍🚀] `GENESIS_BOOK` / `ADAM_BOOK` in `/agents/adam.book`
10421
+ // <- [🐱‍🚀] Buttons into genesis book
10422
+ // <- TODO: [🐱‍🚀] generateBookBoilerplate and deprecate `DEFAULT_BOOK`
10317
10423
 
10424
+ // import { getTableName } from '../../../../../apps/agents-server/src/database/getTableName';
10425
+ // <- TODO: [🐱‍🚀] Prevent imports from `/apps` -> `/src`
10318
10426
  /**
10319
10427
  * Agent collection stored in Supabase table
10320
10428
  *
10321
10429
  * Note: This object can work both from Node.js and browser environment depending on the Supabase client provided
10322
10430
  *
10323
10431
  * @public exported from `@promptbook/core`
10324
- * <- TODO: !!! Move to `@promptbook/supabase` package
10432
+ * <- TODO: [🐱‍🚀] Move to `@promptbook/supabase` package
10325
10433
  */
10326
- class AgentCollectionInSupabase /* TODO: !!!!!! implements Agent */ {
10434
+ class AgentCollectionInSupabase /* TODO: [🐱‍🚀] implements Agent */ {
10327
10435
  /**
10328
10436
  * @param rootPath - path to the directory with agents
10329
- * @param tools - Execution tools to be used in !!! `Agent` itself and listing the agents
10437
+ * @param tools - Execution tools to be used in [🐱‍🚀] `Agent` itself and listing the agents
10330
10438
  * @param options - Options for the collection creation
10331
10439
  */
10332
10440
  constructor(supabaseClient,
10333
- /// TODO: !!! Remove> private readonly tools?: Pick<ExecutionTools, 'llm' | 'fs' | 'scrapers'>,
10441
+ /// TODO: [🐱‍🚀] Remove> private readonly tools?: Pick<ExecutionTools, 'llm' | 'fs' | 'scrapers'>,
10334
10442
  options) {
10335
10443
  this.supabaseClient = supabaseClient;
10336
10444
  this.options = options;
@@ -10344,7 +10452,9 @@
10344
10452
  */
10345
10453
  async listAgents( /* TODO: [🧠] Allow to pass some condition here */) {
10346
10454
  const { isVerbose = exports.DEFAULT_IS_VERBOSE } = this.options || {};
10347
- const selectResult = await this.supabaseClient.from('Agent').select('agentName,agentProfile');
10455
+ const selectResult = await this.supabaseClient
10456
+ .from(this.getTableName('Agent'))
10457
+ .select('agentName,agentProfile');
10348
10458
  if (selectResult.error) {
10349
10459
  throw new DatabaseError(spaceTrim((block) => `
10350
10460
 
@@ -10372,11 +10482,11 @@
10372
10482
  });
10373
10483
  }
10374
10484
  /**
10375
- * !!!@@@
10485
+ * [🐱‍🚀]@@@
10376
10486
  */
10377
10487
  async getAgentSource(agentName) {
10378
10488
  const selectResult = await this.supabaseClient
10379
- .from('Agent')
10489
+ .from(this.getTableName('Agent'))
10380
10490
  .select('agentSource')
10381
10491
  .eq('agentName', agentName)
10382
10492
  .single();
@@ -10392,7 +10502,7 @@
10392
10502
 
10393
10503
  ${block(selectResult.error.message)}
10394
10504
  `));
10395
- // <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10505
+ // <- TODO: [🐱‍🚀] First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10396
10506
  }
10397
10507
  return selectResult.data.agentSource;
10398
10508
  }
@@ -10405,7 +10515,7 @@
10405
10515
  const agentProfile = parseAgentSource(agentSource);
10406
10516
  // <- TODO: [🕛]
10407
10517
  const { agentName, agentHash } = agentProfile;
10408
- const insertAgentResult = await this.supabaseClient.from('Agent').insert({
10518
+ const insertAgentResult = await this.supabaseClient.from(this.getTableName('Agent')).insert({
10409
10519
  agentName,
10410
10520
  agentHash,
10411
10521
  agentProfile,
@@ -10422,7 +10532,7 @@
10422
10532
  ${block(insertAgentResult.error.message)}
10423
10533
  `));
10424
10534
  }
10425
- await this.supabaseClient.from('AgentHistory').insert({
10535
+ await this.supabaseClient.from(this.getTableName('AgentHistory')).insert({
10426
10536
  createdAt: new Date().toISOString(),
10427
10537
  agentName,
10428
10538
  agentHash,
@@ -10438,7 +10548,7 @@
10438
10548
  */
10439
10549
  async updateAgentSource(agentName, agentSource) {
10440
10550
  const selectPreviousAgentResult = await this.supabaseClient
10441
- .from('Agent')
10551
+ .from(this.getTableName('Agent'))
10442
10552
  .select('agentHash,agentName')
10443
10553
  .eq('agentName', agentName)
10444
10554
  .single();
@@ -10449,7 +10559,7 @@
10449
10559
 
10450
10560
  ${block(selectPreviousAgentResult.error.message)}
10451
10561
  `));
10452
- // <- TODO: !!! First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10562
+ // <- TODO: [🐱‍🚀] First check if the error is "not found" and throw `NotFoundError` instead then throw `DatabaseError`
10453
10563
  }
10454
10564
  selectPreviousAgentResult.data.agentName;
10455
10565
  const previousAgentHash = selectPreviousAgentResult.data.agentHash;
@@ -10457,9 +10567,9 @@
10457
10567
  // <- TODO: [🕛]
10458
10568
  const { agentHash } = agentProfile;
10459
10569
  const updateAgentResult = await this.supabaseClient
10460
- .from('Agent')
10570
+ .from(this.getTableName('Agent'))
10461
10571
  .update({
10462
- // TODO: !!!! Compare not update> agentName: agentProfile.agentName || '!!!!!' /* <- TODO: !!!! Remove */,
10572
+ // TODO: [🐱‍🚀] Compare not update> agentName: agentProfile.agentName || '[🐱‍🚀]' /* <- TODO: [🐱‍🚀] Remove */,
10463
10573
  agentProfile,
10464
10574
  updatedAt: new Date().toISOString(),
10465
10575
  agentHash: agentProfile.agentHash,
@@ -10467,9 +10577,9 @@
10467
10577
  promptbookEngineVersion: PROMPTBOOK_ENGINE_VERSION,
10468
10578
  })
10469
10579
  .eq('agentName', agentName);
10470
- // console.log('!!! updateAgent', updateResult);
10471
- // console.log('!!! old', oldAgentSource);
10472
- // console.log('!!! new', newAgentSource);
10580
+ // console.log('[🐱‍🚀] updateAgent', updateResult);
10581
+ // console.log('[🐱‍🚀] old', oldAgentSource);
10582
+ // console.log('[🐱‍🚀] new', newAgentSource);
10473
10583
  if (updateAgentResult.error) {
10474
10584
  throw new DatabaseError(spaceTrim((block) => `
10475
10585
  Error updating agent "${agentName}" in Supabase:
@@ -10477,7 +10587,7 @@
10477
10587
  ${block(updateAgentResult.error.message)}
10478
10588
  `));
10479
10589
  }
10480
- await this.supabaseClient.from('AgentHistory').insert({
10590
+ await this.supabaseClient.from(this.getTableName('AgentHistory')).insert({
10481
10591
  createdAt: new Date().toISOString(),
10482
10592
  agentName,
10483
10593
  agentHash,
@@ -10487,7 +10597,7 @@
10487
10597
  });
10488
10598
  // <- TODO: [🧠] What to do with `insertAgentHistoryResult.error`, ignore? wait?
10489
10599
  }
10490
- // TODO: !!!! public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
10600
+ // TODO: [🐱‍🚀] public async getAgentSourceSubject(agentName: string_agent_name): Promise<BehaviorSubject<string_book>>
10491
10601
  // Use Supabase realtime logic
10492
10602
  /**
10493
10603
  * Deletes an agent from the collection
@@ -10495,9 +10605,19 @@
10495
10605
  async deleteAgent(agentName) {
10496
10606
  throw new NotYetImplementedError('Method not implemented.');
10497
10607
  }
10608
+ /**
10609
+ * Get the Supabase table name with prefix
10610
+ *
10611
+ * @param tableName - The original table name
10612
+ * @returns The prefixed table name
10613
+ */
10614
+ getTableName(tableName) {
10615
+ const { tablePrefix = '' } = this.options || {};
10616
+ return `${tablePrefix}${tableName}`;
10617
+ }
10498
10618
  }
10499
10619
  /**
10500
- * TODO: !!!! Implement it here correctly and update JSDoc comments here, and on interface + other implementations
10620
+ * TODO: [🐱‍🚀] Implement it here correctly and update JSDoc comments here, and on interface + other implementations
10501
10621
  * TODO: Write unit test
10502
10622
  * TODO: [🧠][🚙] `AgentXxx` vs `AgentsXxx` naming convention
10503
10623
  */
@@ -16652,11 +16772,12 @@
16652
16772
  *
16653
16773
  * This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
16654
16774
  *
16655
- * !!! Note: [🦖] There are several different things in Promptbook:
16775
+ * Note: [🦖] There are several different things in Promptbook:
16656
16776
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16657
16777
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16658
16778
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16659
16779
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
16780
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
16660
16781
  *
16661
16782
  * @public exported from `@promptbook/openai`
16662
16783
  */
@@ -16691,6 +16812,12 @@
16691
16812
  * Calls OpenAI API to use a chat model.
16692
16813
  */
16693
16814
  async callChatModel(prompt) {
16815
+ return this.callChatModelStream(prompt, () => { });
16816
+ }
16817
+ /**
16818
+ * Calls OpenAI API to use a chat model with streaming.
16819
+ */
16820
+ async callChatModelStream(prompt, onProgress) {
16694
16821
  var _a, _b, _c;
16695
16822
  if (this.options.isVerbose) {
16696
16823
  console.info('💬 OpenAI callChatModel call', { prompt });
@@ -16758,21 +16885,24 @@
16758
16885
  console.info('connect', stream.currentEvent);
16759
16886
  }
16760
16887
  });
16761
- /*
16762
- stream.on('messageDelta', (messageDelta) => {
16763
- if (
16764
- this.options.isVerbose &&
16765
- messageDelta &&
16766
- messageDelta.content &&
16767
- messageDelta.content[0] &&
16768
- messageDelta.content[0].type === 'text'
16769
- ) {
16770
- console.info('messageDelta', messageDelta.content[0].text?.value);
16888
+ stream.on('textDelta', (textDelta, snapshot) => {
16889
+ if (this.options.isVerbose && textDelta.value) {
16890
+ console.info('textDelta', textDelta.value);
16771
16891
  }
16772
-
16773
- // <- TODO: [🐚] Make streaming and running tasks working
16892
+ const chunk = {
16893
+ content: textDelta.value || '',
16894
+ modelName: 'assistant',
16895
+ timing: {
16896
+ start,
16897
+ complete: $getCurrentDate(),
16898
+ },
16899
+ usage: UNCERTAIN_USAGE,
16900
+ rawPromptContent,
16901
+ rawRequest,
16902
+ rawResponse: snapshot,
16903
+ };
16904
+ onProgress(chunk);
16774
16905
  });
16775
- */
16776
16906
  stream.on('messageCreated', (message) => {
16777
16907
  if (this.options.isVerbose) {
16778
16908
  console.info('messageCreated', message);
@@ -16808,7 +16938,7 @@
16808
16938
  }
16809
16939
  return exportJson({
16810
16940
  name: 'promptResult',
16811
- message: `Result of \`OpenAiAssistantExecutionTools.callChatModel\``,
16941
+ message: `Result of \`OpenAiAssistantExecutionTools.callChatModelStream\``,
16812
16942
  order: [],
16813
16943
  value: {
16814
16944
  content: resultContent,
@@ -16947,9 +17077,9 @@
16947
17077
  }
16948
17078
  const assistant = await client.beta.assistants.create(assistantConfig);
16949
17079
  console.log(`✅ Assistant created: ${assistant.id}`);
16950
- // TODO: !!!! Try listing existing assistants
16951
- // TODO: !!!! Try marking existing assistants by DISCRIMINANT
16952
- // TODO: !!!! Allow to update and reconnect to existing assistants
17080
+ // TODO: [🐱‍🚀] Try listing existing assistants
17081
+ // TODO: [🐱‍🚀] Try marking existing assistants by DISCRIMINANT
17082
+ // TODO: [🐱‍🚀] Allow to update and reconnect to existing assistants
16953
17083
  return new OpenAiAssistantExecutionTools({
16954
17084
  ...this.options,
16955
17085
  isCreatingNewAssistantsAllowed: false,
@@ -17077,11 +17207,12 @@
17077
17207
  * Execution Tools for calling LLM models with a predefined agent "soul"
17078
17208
  * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
17079
17209
  *
17080
- * !!! Note: [🦖] There are several different things in Promptbook:
17210
+ * Note: [🦖] There are several different things in Promptbook:
17081
17211
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17082
17212
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
17083
17213
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17084
17214
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17215
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
17085
17216
  *
17086
17217
  * @public exported from `@promptbook/core`
17087
17218
  */
@@ -17175,9 +17306,12 @@
17175
17306
  * Calls the chat model with agent-specific system prompt and requirements
17176
17307
  */
17177
17308
  async callChatModel(prompt) {
17178
- if (!this.options.llmTools.callChatModel) {
17179
- throw new Error('Underlying LLM execution tools do not support chat model calls');
17180
- }
17309
+ return this.callChatModelStream(prompt, () => { });
17310
+ }
17311
+ /**
17312
+ * Calls the chat model with agent-specific system prompt and requirements with streaming
17313
+ */
17314
+ async callChatModelStream(prompt, onProgress) {
17181
17315
  // Ensure we're working with a chat prompt
17182
17316
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
17183
17317
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
@@ -17216,7 +17350,7 @@
17216
17350
  if (this.options.isVerbose) {
17217
17351
  console.log(`1️⃣ Creating new OpenAI Assistant for agent ${this.title}...`);
17218
17352
  }
17219
- // <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
17353
+ // <- TODO: [🐱‍🚀] Check also `isCreatingNewAssistantsAllowed` and warn about it
17220
17354
  assistant = await this.options.llmTools.createNewAssistant({
17221
17355
  name: this.title,
17222
17356
  instructions: modelRequirements.systemMessage,
@@ -17233,7 +17367,7 @@
17233
17367
  requirementsHash,
17234
17368
  });
17235
17369
  }
17236
- underlyingLlmResult = await assistant.callChatModel(chatPrompt);
17370
+ underlyingLlmResult = await assistant.callChatModelStream(chatPrompt, onProgress);
17237
17371
  }
17238
17372
  else {
17239
17373
  if (this.options.isVerbose) {
@@ -17252,7 +17386,16 @@
17252
17386
  : ''),
17253
17387
  },
17254
17388
  };
17255
- underlyingLlmResult = await this.options.llmTools.callChatModel(modifiedChatPrompt);
17389
+ if (this.options.llmTools.callChatModelStream) {
17390
+ underlyingLlmResult = await this.options.llmTools.callChatModelStream(modifiedChatPrompt, onProgress);
17391
+ }
17392
+ else if (this.options.llmTools.callChatModel) {
17393
+ underlyingLlmResult = await this.options.llmTools.callChatModel(modifiedChatPrompt);
17394
+ onProgress(underlyingLlmResult);
17395
+ }
17396
+ else {
17397
+ throw new Error('Underlying LLM execution tools do not support chat model calls');
17398
+ }
17256
17399
  }
17257
17400
  let content = underlyingLlmResult.content;
17258
17401
  // Note: Cleanup the AI artifacts from the content
@@ -17279,11 +17422,12 @@
17279
17422
  /**
17280
17423
  * Represents one AI Agent
17281
17424
  *
17282
- * !!! Note: [🦖] There are several different things in Promptbook:
17425
+ * Note: [🦖] There are several different things in Promptbook:
17283
17426
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17284
17427
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
17285
17428
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17286
17429
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17430
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
17287
17431
  *
17288
17432
  * @public exported from `@promptbook/core`
17289
17433
  */
@@ -17313,31 +17457,40 @@
17313
17457
  super({
17314
17458
  isVerbose: options.isVerbose,
17315
17459
  llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
17316
- agentSource: agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
17460
+ agentSource: agentSource.value, // <- TODO: [🐱‍🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
17317
17461
  });
17318
17462
  this._agentName = undefined;
17319
17463
  /**
17320
17464
  * Description of the agent
17321
17465
  */
17322
17466
  this.personaDescription = null;
17467
+ /**
17468
+ * The initial message shown to the user when the chat starts
17469
+ */
17470
+ this.initialMessage = null;
17471
+ /**
17472
+ * Links found in the agent source
17473
+ */
17474
+ this.links = [];
17323
17475
  /**
17324
17476
  * Metadata like image or color
17325
17477
  */
17326
17478
  this.meta = {};
17327
- // TODO: !!!!! Add `Agent` simple "mocked" learning by appending to agent source
17328
- // TODO: !!!!! Add `Agent` learning by promptbookAgent
17479
+ // TODO: [🐱‍🚀] Add `Agent` simple "mocked" learning by appending to agent source
17480
+ // TODO: [🐱‍🚀] Add `Agent` learning by promptbookAgent
17329
17481
  this.agentSource = agentSource;
17330
17482
  this.agentSource.subscribe((source) => {
17331
- const { agentName, personaDescription, meta } = parseAgentSource(source);
17483
+ const { agentName, personaDescription, initialMessage, links, meta } = parseAgentSource(source);
17332
17484
  this._agentName = agentName;
17333
17485
  this.personaDescription = personaDescription;
17486
+ this.initialMessage = initialMessage;
17487
+ this.links = links;
17334
17488
  this.meta = { ...this.meta, ...meta };
17335
17489
  });
17336
17490
  }
17337
17491
  }
17338
17492
  /**
17339
17493
  * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
17340
- * TODO: !!! Agent on remote server
17341
17494
  */
17342
17495
 
17343
17496
  /**
@@ -17403,24 +17556,24 @@
17403
17556
  /**
17404
17557
  * Represents one AI Agent
17405
17558
  *
17406
- * !!!!!! Note: [🦖] There are several different things in Promptbook:
17559
+ * Note: [🦖] There are several different things in Promptbook:
17407
17560
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
17408
- * !!!!!! `RemoteAgent`
17409
17561
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
17410
17562
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
17411
17563
  * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
17564
+ * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
17412
17565
  *
17413
17566
  * @public exported from `@promptbook/core`
17414
17567
  */
17415
17568
  class RemoteAgent extends Agent {
17416
17569
  static async connect(options) {
17417
- console.log('!!!!!', `${options.agentUrl}/api/book`);
17570
+ console.log('[🐱‍🚀]', `${options.agentUrl}/api/book`);
17418
17571
  const bookResponse = await fetch(`${options.agentUrl}/api/book`);
17419
- // <- TODO: !!!! What about closed-source agents?
17420
- // <- TODO: !!!! Maybe use promptbookFetch
17572
+ // <- TODO: [🐱‍🚀] What about closed-source agents?
17573
+ // <- TODO: [🐱‍🚀] Maybe use promptbookFetch
17421
17574
  const agentSourceValue = (await bookResponse.text());
17422
17575
  const agentSource = new rxjs.BehaviorSubject(agentSourceValue);
17423
- // <- TODO: !!!! Support updating and self-updating
17576
+ // <- TODO: [🐱‍🚀] Support updating and self-updating
17424
17577
  return new RemoteAgent({
17425
17578
  ...options,
17426
17579
  executionTools: {
@@ -17447,13 +17600,29 @@
17447
17600
  * Calls the agent on agents remote server
17448
17601
  */
17449
17602
  async callChatModel(prompt) {
17603
+ return this.callChatModelStream(prompt, () => { });
17604
+ }
17605
+ /**
17606
+ * Calls the agent on agents remote server with streaming
17607
+ */
17608
+ async callChatModelStream(prompt, onProgress) {
17450
17609
  // Ensure we're working with a chat prompt
17451
17610
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
17452
17611
  throw new Error('Agents only supports chat prompts');
17453
17612
  }
17454
- const bookResponse = await fetch(`${this.agentUrl}/api/chat?message=${encodeURIComponent(prompt.content)}`);
17455
- // <- TODO: !!!! What about closed-source agents?
17456
- // <- TODO: !!!! Maybe use promptbookFetch
17613
+ const chatPrompt = prompt;
17614
+ const bookResponse = await fetch(`${this.agentUrl}/api/chat`, {
17615
+ method: 'POST',
17616
+ headers: {
17617
+ 'Content-Type': 'application/json',
17618
+ },
17619
+ body: JSON.stringify({
17620
+ message: prompt.content,
17621
+ thread: chatPrompt.thread,
17622
+ }),
17623
+ });
17624
+ // <- TODO: [🐱‍🚀] What about closed-source agents?
17625
+ // <- TODO: [🐱‍🚀] Maybe use promptbookFetch
17457
17626
  let content = '';
17458
17627
  if (!bookResponse.body) {
17459
17628
  content = await bookResponse.text();
@@ -17472,16 +17641,37 @@
17472
17641
  const textChunk = decoder.decode(value, { stream: true });
17473
17642
  // console.debug('RemoteAgent chunk:', textChunk);
17474
17643
  content += textChunk;
17644
+ onProgress({
17645
+ content,
17646
+ modelName: this.modelName,
17647
+ timing: {},
17648
+ usage: {},
17649
+ rawPromptContent: {},
17650
+ rawRequest: {},
17651
+ rawResponse: {},
17652
+ });
17475
17653
  }
17476
17654
  }
17477
17655
  // Flush any remaining decoder internal state
17478
- content += decoder.decode();
17656
+ const lastChunk = decoder.decode();
17657
+ if (lastChunk) {
17658
+ content += lastChunk;
17659
+ onProgress({
17660
+ content: lastChunk,
17661
+ modelName: this.modelName,
17662
+ timing: {},
17663
+ usage: {},
17664
+ rawPromptContent: {},
17665
+ rawRequest: {},
17666
+ rawResponse: {},
17667
+ });
17668
+ }
17479
17669
  }
17480
17670
  finally {
17481
17671
  reader.releaseLock();
17482
17672
  }
17483
17673
  }
17484
- // <- TODO: !!!! Transfer metadata
17674
+ // <- TODO: [🐱‍🚀] Transfer metadata
17485
17675
  const agentResult = {
17486
17676
  content,
17487
17677
  modelName: this.modelName,
@@ -17490,7 +17680,7 @@
17490
17680
  rawPromptContent: {},
17491
17681
  rawRequest: {},
17492
17682
  rawResponse: {},
17493
- // <- TODO: !!!! Transfer and proxy the metadata
17683
+ // <- TODO: [🐱‍🚀] Transfer and proxy the metadata
17494
17684
  };
17495
17685
  return agentResult;
17496
17686
  }
@@ -18777,7 +18967,7 @@
18777
18967
  const agentSource = validateBook(spaceTrim__default["default"]((block) => `
18778
18968
  ${agentName}
18779
18969
 
18780
- META COLOR ${color || '#3498db' /* <- TODO: [🧠] !!!! Best default color */}
18970
+ META COLOR ${color || '#3498db' /* <- TODO: [🧠] [🐱‍🚀] Best default color */}
18781
18971
  PERSONA ${block(personaDescription)}
18782
18972
  `));
18783
18973
  return agentSource;