@promptbook/node 0.89.0-9 โ†’ 0.92.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +9 -7
  2. package/esm/index.es.js +320 -97
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/servers.d.ts +40 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +14 -4
  6. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  8. package/esm/typings/src/_packages/types.index.d.ts +18 -0
  9. package/esm/typings/src/_packages/utils.index.d.ts +6 -0
  10. package/esm/typings/src/cli/cli-commands/login.d.ts +0 -1
  11. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +16 -3
  12. package/esm/typings/src/cli/test/ptbk.d.ts +1 -1
  13. package/esm/typings/src/commands/EXPECT/expectCommandParser.d.ts +2 -0
  14. package/esm/typings/src/config.d.ts +10 -19
  15. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  16. package/esm/typings/src/errors/0-index.d.ts +7 -4
  17. package/esm/typings/src/errors/PipelineExecutionError.d.ts +1 -1
  18. package/esm/typings/src/errors/WrappedError.d.ts +10 -0
  19. package/esm/typings/src/errors/assertsError.d.ts +11 -0
  20. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  21. package/esm/typings/src/execution/PromptbookFetch.d.ts +1 -1
  22. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  23. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  24. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  25. package/esm/typings/src/formats/csv/utils/isValidCsvString.d.ts +9 -0
  26. package/esm/typings/src/formats/csv/utils/isValidCsvString.test.d.ts +1 -0
  27. package/esm/typings/src/formats/json/utils/isValidJsonString.d.ts +3 -0
  28. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  29. package/esm/typings/src/formats/xml/utils/isValidXmlString.d.ts +9 -0
  30. package/esm/typings/src/formats/xml/utils/isValidXmlString.test.d.ts +1 -0
  31. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  32. package/esm/typings/src/llm-providers/_common/register/{$provideEnvFilepath.d.ts โ†’ $provideEnvFilename.d.ts} +2 -2
  33. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  34. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  35. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizzardOrCli.d.ts +11 -2
  36. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +1 -1
  37. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  38. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  39. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  40. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  41. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  42. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  43. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  44. package/esm/typings/src/remote-server/openapi-types.d.ts +626 -0
  45. package/esm/typings/src/remote-server/openapi.d.ts +581 -0
  46. package/esm/typings/src/remote-server/socket-types/_subtypes/Identification.d.ts +7 -1
  47. package/esm/typings/src/remote-server/socket-types/_subtypes/identificationToPromptbookToken.d.ts +11 -0
  48. package/esm/typings/src/remote-server/socket-types/_subtypes/promptbookTokenToIdentification.d.ts +10 -0
  49. package/esm/typings/src/remote-server/startRemoteServer.d.ts +1 -2
  50. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -9
  51. package/esm/typings/src/storage/env-storage/$EnvStorage.d.ts +40 -0
  52. package/esm/typings/src/types/typeAliases.d.ts +26 -0
  53. package/package.json +9 -5
  54. package/umd/index.umd.js +320 -97
  55. package/umd/index.umd.js.map +1 -1
  56. package/esm/typings/src/cli/test/ptbk2.d.ts +0 -5
package/umd/index.umd.js CHANGED
@@ -46,7 +46,7 @@
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0-9';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-10';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -92,6 +92,7 @@
92
92
  * @public exported from `@promptbook/core`
93
93
  */
94
94
  const ADMIN_GITHUB_NAME = 'hejny';
95
+ // <- TODO: [๐ŸŠ] Pick the best claim
95
96
  /**
96
97
  * When the title is not provided, the default title is used
97
98
  *
@@ -136,6 +137,7 @@
136
137
  infinity: '(infinity; โˆž)',
137
138
  negativeInfinity: '(negative infinity; -โˆž)',
138
139
  unserializable: '(unserializable value)',
140
+ circular: '(circular JSON)',
139
141
  };
140
142
  /**
141
143
  * Small number limit
@@ -175,7 +177,7 @@
175
177
  */
176
178
  const DEFAULT_MAX_EXECUTION_ATTEMPTS = 10; // <- TODO: [๐Ÿคนโ€โ™‚๏ธ]
177
179
  // <- TODO: [๐Ÿ•] Make also `BOOKS_DIRNAME_ALTERNATIVES`
178
- // TODO: !!!!!! Just .promptbook dir, hardocode others
180
+ // TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
179
181
  /**
180
182
  * Where to store the temporary downloads
181
183
  *
@@ -313,6 +315,45 @@
313
315
  }
314
316
  }
315
317
 
318
+ /**
319
+ * Converts a JavaScript Object Notation (JSON) string into an object.
320
+ *
321
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
322
+ *
323
+ * @public exported from `@promptbook/utils`
324
+ */
325
+ function jsonParse(value) {
326
+ if (value === undefined) {
327
+ throw new Error(`Can not parse JSON from undefined value.`);
328
+ }
329
+ else if (typeof value !== 'string') {
330
+ console.error('Can not parse JSON from non-string value.', { text: value });
331
+ throw new Error(spaceTrim__default["default"](`
332
+ Can not parse JSON from non-string value.
333
+
334
+ The value type: ${typeof value}
335
+ See more in console.
336
+ `));
337
+ }
338
+ try {
339
+ return JSON.parse(value);
340
+ }
341
+ catch (error) {
342
+ if (!(error instanceof Error)) {
343
+ throw error;
344
+ }
345
+ throw new Error(spaceTrim__default["default"]((block) => `
346
+ ${block(error.message)}
347
+
348
+ The JSON text:
349
+ ${block(value)}
350
+ `));
351
+ }
352
+ }
353
+ /**
354
+ * TODO: !!!! Use in Promptbook.studio
355
+ */
356
+
316
357
  /**
317
358
  * Orders JSON object by keys
318
359
  *
@@ -355,6 +396,54 @@
355
396
  * TODO: [๐Ÿง ] Is there a way how to meaningfully test this utility
356
397
  */
357
398
 
399
+ /**
400
+ * This error type indicates that somewhere in the code non-Error object was thrown and it was wrapped into the `WrappedError`
401
+ *
402
+ * @public exported from `@promptbook/core`
403
+ */
404
+ class WrappedError extends Error {
405
+ constructor(whatWasThrown) {
406
+ const tag = `[๐Ÿคฎ]`;
407
+ console.error(tag, whatWasThrown);
408
+ super(spaceTrim.spaceTrim(`
409
+ Non-Error object was thrown
410
+
411
+ Note: Look for ${tag} in the console for more details
412
+ Please report issue on ${ADMIN_EMAIL}
413
+ `));
414
+ this.name = 'WrappedError';
415
+ Object.setPrototypeOf(this, WrappedError.prototype);
416
+ }
417
+ }
418
+
419
+ /**
420
+ * Helper used in catch blocks to assert that the error is an instance of `Error`
421
+ *
422
+ * @param whatWasThrown Any object that was thrown
423
+ * @returns Nothing if the error is an instance of `Error`
424
+ * @throws `WrappedError` or `UnexpectedError` if the error is not standard
425
+ *
426
+ * @private within the repository
427
+ */
428
+ function assertsError(whatWasThrown) {
429
+ // Case 1: Handle error which was rethrown as `WrappedError`
430
+ if (whatWasThrown instanceof WrappedError) {
431
+ const wrappedError = whatWasThrown;
432
+ throw wrappedError;
433
+ }
434
+ // Case 2: Handle unexpected errors
435
+ if (whatWasThrown instanceof UnexpectedError) {
436
+ const unexpectedError = whatWasThrown;
437
+ throw unexpectedError;
438
+ }
439
+ // Case 3: Handle standard errors - keep them up to consumer
440
+ if (whatWasThrown instanceof Error) {
441
+ return;
442
+ }
443
+ // Case 4: Handle non-standard errors - wrap them into `WrappedError` and throw
444
+ throw new WrappedError(whatWasThrown);
445
+ }
446
+
358
447
  /**
359
448
  * Checks if the value is [๐Ÿš‰] serializable as JSON
360
449
  * If not, throws an UnexpectedError with a rich error message and tracking
@@ -446,9 +535,7 @@
446
535
  JSON.stringify(value); // <- TODO: [0]
447
536
  }
448
537
  catch (error) {
449
- if (!(error instanceof Error)) {
450
- throw error;
451
- }
538
+ assertsError(error);
452
539
  throw new UnexpectedError(spaceTrim__default["default"]((block) => `
453
540
  \`${name}\` is not serializable
454
541
 
@@ -1056,7 +1143,7 @@
1056
1143
  if (!indexFile) {
1057
1144
  throw new UnexpectedError(`Archive does not contain 'index.book.json' file`);
1058
1145
  }
1059
- const collectionJson = JSON.parse(await indexFile.async('text'));
1146
+ const collectionJson = jsonParse(await indexFile.async('text'));
1060
1147
  for (const pipeline of collectionJson) {
1061
1148
  validatePipeline(pipeline);
1062
1149
  }
@@ -1066,7 +1153,7 @@
1066
1153
  * Note: [๐ŸŸข] Code in this file should never be never released in packages that could be imported into browser environment
1067
1154
  */
1068
1155
 
1069
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [๐Ÿ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [๐Ÿ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"โœ Convert Knowledge-piece to title\" but \"โœ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"โœ Convert Knowledge-piece to title\" but \"โœ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1156
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [๐Ÿ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [๐Ÿ†] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"โœ Convert Knowledge-piece to title\" but \"โœ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"โœ Convert Knowledge-piece to title\" but \"โœ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1070
1157
 
1071
1158
  /**
1072
1159
  * Checks if value is valid email
@@ -1135,6 +1222,9 @@
1135
1222
  /**
1136
1223
  * Function isValidJsonString will tell you if the string is valid JSON or not
1137
1224
  *
1225
+ * @param value The string to check
1226
+ * @returns True if the string is a valid JSON string, false otherwise
1227
+ *
1138
1228
  * @public exported from `@promptbook/utils`
1139
1229
  */
1140
1230
  function isValidJsonString(value /* <- [๐Ÿ‘จโ€โš–๏ธ] */) {
@@ -1143,9 +1233,7 @@
1143
1233
  return true;
1144
1234
  }
1145
1235
  catch (error) {
1146
- if (!(error instanceof Error)) {
1147
- throw error;
1148
- }
1236
+ assertsError(error);
1149
1237
  if (error.message.includes('Unexpected token')) {
1150
1238
  return false;
1151
1239
  }
@@ -1416,7 +1504,7 @@
1416
1504
  */
1417
1505
  function unpreparePipeline(pipeline) {
1418
1506
  let { personas, knowledgeSources, tasks } = pipeline;
1419
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
1507
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
1420
1508
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
1421
1509
  tasks = tasks.map((task) => {
1422
1510
  let { dependentParameterNames } = task;
@@ -1592,7 +1680,7 @@
1592
1680
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
1593
1681
  return false;
1594
1682
  }
1595
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
1683
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
1596
1684
  return false;
1597
1685
  }
1598
1686
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
@@ -1649,7 +1737,7 @@
1649
1737
  const newObject = { ...object };
1650
1738
  for (const [key, value] of Object.entries(object)) {
1651
1739
  if (typeof value === 'string' && isValidJsonString(value)) {
1652
- newObject[key] = JSON.parse(value);
1740
+ newObject[key] = jsonParse(value);
1653
1741
  }
1654
1742
  else {
1655
1743
  newObject[key] = jsonStringsToJsons(value);
@@ -1677,7 +1765,7 @@
1677
1765
  }
1678
1766
  }
1679
1767
  /**
1680
- * TODO: !!!!!! Add id to all errors
1768
+ * TODO: [๐Ÿง ][๐ŸŒ‚] Add id to all errors
1681
1769
  */
1682
1770
 
1683
1771
  /**
@@ -1847,7 +1935,10 @@
1847
1935
  PipelineExecutionError,
1848
1936
  PipelineLogicError,
1849
1937
  PipelineUrlError,
1938
+ AuthenticationError,
1939
+ PromptbookFetchError,
1850
1940
  UnexpectedError,
1941
+ WrappedError,
1851
1942
  // TODO: [๐Ÿช‘]> VersionMismatchError,
1852
1943
  };
1853
1944
  /**
@@ -1864,8 +1955,6 @@
1864
1955
  TypeError,
1865
1956
  URIError,
1866
1957
  AggregateError,
1867
- AuthenticationError,
1868
- PromptbookFetchError,
1869
1958
  /*
1870
1959
  Note: Not widely supported
1871
1960
  > InternalError,
@@ -1988,8 +2077,8 @@
1988
2077
  updatedAt = new Date();
1989
2078
  errors.push(...executionResult.errors);
1990
2079
  warnings.push(...executionResult.warnings);
1991
- // <- TODO: !!! Only unique errors and warnings should be added (or filtered)
1992
- // TODO: [๐Ÿง ] !!! errors, warning, isSuccessful are redundant both in `ExecutionTask` and `ExecutionTask.currentValue`
2080
+ // <- TODO: [๐ŸŒ‚] Only unique errors and warnings should be added (or filtered)
2081
+ // TODO: [๐Ÿง ] !! errors, warning, isSuccessful are redundant both in `ExecutionTask` and `ExecutionTask.currentValue`
1993
2082
  // Also maybe move `ExecutionTask.currentValue.usage` -> `ExecutionTask.usage`
1994
2083
  // And delete `ExecutionTask.currentValue.preparedPipeline`
1995
2084
  assertsTaskSuccessful(executionResult);
@@ -1999,6 +2088,7 @@
1999
2088
  partialResultSubject.next(executionResult);
2000
2089
  }
2001
2090
  catch (error) {
2091
+ assertsError(error);
2002
2092
  status = 'ERROR';
2003
2093
  errors.push(error);
2004
2094
  partialResultSubject.error(error);
@@ -2144,13 +2234,19 @@
2144
2234
  return value.toISOString();
2145
2235
  }
2146
2236
  else {
2147
- return JSON.stringify(value);
2237
+ try {
2238
+ return JSON.stringify(value);
2239
+ }
2240
+ catch (error) {
2241
+ if (error instanceof TypeError && error.message.includes('circular structure')) {
2242
+ return VALUE_STRINGS.circular;
2243
+ }
2244
+ throw error;
2245
+ }
2148
2246
  }
2149
2247
  }
2150
2248
  catch (error) {
2151
- if (!(error instanceof Error)) {
2152
- throw error;
2153
- }
2249
+ assertsError(error);
2154
2250
  console.error(error);
2155
2251
  return VALUE_STRINGS.unserializable;
2156
2252
  }
@@ -2322,9 +2418,7 @@
2322
2418
  }
2323
2419
  }
2324
2420
  catch (error) {
2325
- if (!(error instanceof Error)) {
2326
- throw error;
2327
- }
2421
+ assertsError(error);
2328
2422
  throw new ParseError(spaceTrim.spaceTrim((block) => `
2329
2423
  Can not extract variables from the script
2330
2424
  ${block(error.stack || error.message)}
@@ -2443,6 +2537,46 @@
2443
2537
  // encoding: 'utf-8',
2444
2538
  });
2445
2539
 
2540
+ /**
2541
+ * Function to check if a string is valid CSV
2542
+ *
2543
+ * @param value The string to check
2544
+ * @returns True if the string is a valid CSV string, false otherwise
2545
+ *
2546
+ * @public exported from `@promptbook/utils`
2547
+ */
2548
+ function isValidCsvString(value) {
2549
+ try {
2550
+ // A simple check for CSV format: at least one comma and no invalid characters
2551
+ if (value.includes(',') && /^[\w\s,"']+$/.test(value)) {
2552
+ return true;
2553
+ }
2554
+ return false;
2555
+ }
2556
+ catch (error) {
2557
+ assertsError(error);
2558
+ return false;
2559
+ }
2560
+ }
2561
+
2562
+ /**
2563
+ * Converts a CSV string into an object
2564
+ *
2565
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
2566
+ *
2567
+ * @private - for now until `@promptbook/csv` is released
2568
+ */
2569
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
2570
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
2571
+ // Note: Autoheal invalid '\n' characters
2572
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
2573
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
2574
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
2575
+ }
2576
+ const csv = papaparse.parse(value, settings);
2577
+ return csv;
2578
+ }
2579
+
2446
2580
  /**
2447
2581
  * Definition for CSV spreadsheet
2448
2582
  *
@@ -2453,7 +2587,7 @@
2453
2587
  formatName: 'CSV',
2454
2588
  aliases: ['SPREADSHEET', 'TABLE'],
2455
2589
  isValid(value, settings, schema) {
2456
- return true;
2590
+ return isValidCsvString(value);
2457
2591
  },
2458
2592
  canBeValid(partialValue, settings, schema) {
2459
2593
  return true;
@@ -2465,8 +2599,7 @@
2465
2599
  {
2466
2600
  subvalueName: 'ROW',
2467
2601
  async mapValues(value, outputParameterName, settings, mapCallback) {
2468
- // TODO: [๐Ÿ‘จ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ] DRY csv parsing
2469
- const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2602
+ const csv = csvParse(value, settings);
2470
2603
  if (csv.errors.length !== 0) {
2471
2604
  throw new CsvFormatError(spaceTrim__default["default"]((block) => `
2472
2605
  CSV parsing error
@@ -2496,8 +2629,7 @@
2496
2629
  {
2497
2630
  subvalueName: 'CELL',
2498
2631
  async mapValues(value, outputParameterName, settings, mapCallback) {
2499
- // TODO: [๐Ÿ‘จ๐Ÿพโ€๐Ÿคโ€๐Ÿ‘จ๐Ÿผ] DRY csv parsing
2500
- const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2632
+ const csv = csvParse(value, settings);
2501
2633
  if (csv.errors.length !== 0) {
2502
2634
  throw new CsvFormatError(spaceTrim__default["default"]((block) => `
2503
2635
  CSV parsing error
@@ -2607,6 +2739,30 @@
2607
2739
  * TODO: [๐Ÿข] Allow to expect something inside each item of list and other formats
2608
2740
  */
2609
2741
 
2742
+ /**
2743
+ * Function to check if a string is valid XML
2744
+ *
2745
+ * @param value
2746
+ * @returns True if the string is a valid XML string, false otherwise
2747
+ *
2748
+ * @public exported from `@promptbook/utils`
2749
+ */
2750
+ function isValidXmlString(value) {
2751
+ try {
2752
+ const parser = new DOMParser();
2753
+ const parsedDocument = parser.parseFromString(value, 'application/xml');
2754
+ const parserError = parsedDocument.getElementsByTagName('parsererror');
2755
+ if (parserError.length > 0) {
2756
+ return false;
2757
+ }
2758
+ return true;
2759
+ }
2760
+ catch (error) {
2761
+ assertsError(error);
2762
+ return false;
2763
+ }
2764
+ }
2765
+
2610
2766
  /**
2611
2767
  * Definition for XML format
2612
2768
  *
@@ -2616,7 +2772,7 @@
2616
2772
  formatName: 'XML',
2617
2773
  mimeType: 'application/xml',
2618
2774
  isValid(value, settings, schema) {
2619
- return true;
2775
+ return isValidXmlString(value);
2620
2776
  },
2621
2777
  canBeValid(partialValue, settings, schema) {
2622
2778
  return true;
@@ -2813,14 +2969,15 @@
2813
2969
  }
2814
2970
  }
2815
2971
  catch (error) {
2816
- if (!(error instanceof Error) || error instanceof UnexpectedError) {
2972
+ assertsError(error);
2973
+ if (error instanceof UnexpectedError) {
2817
2974
  throw error;
2818
2975
  }
2819
2976
  errors.push({ llmExecutionTools, error });
2820
2977
  }
2821
2978
  }
2822
2979
  if (errors.length === 1) {
2823
- throw errors[0];
2980
+ throw errors[0].error;
2824
2981
  }
2825
2982
  else if (errors.length > 1) {
2826
2983
  throw new PipelineExecutionError(
@@ -3677,9 +3834,7 @@
3677
3834
  break scripts;
3678
3835
  }
3679
3836
  catch (error) {
3680
- if (!(error instanceof Error)) {
3681
- throw error;
3682
- }
3837
+ assertsError(error);
3683
3838
  if (error instanceof UnexpectedError) {
3684
3839
  throw error;
3685
3840
  }
@@ -3749,9 +3904,7 @@
3749
3904
  break scripts;
3750
3905
  }
3751
3906
  catch (error) {
3752
- if (!(error instanceof Error)) {
3753
- throw error;
3754
- }
3907
+ assertsError(error);
3755
3908
  if (error instanceof UnexpectedError) {
3756
3909
  throw error;
3757
3910
  }
@@ -3994,13 +4147,79 @@
3994
4147
  /**
3995
4148
  * @@@
3996
4149
  *
4150
+ * Here is the place where RAG (retrieval-augmented generation) happens
4151
+ *
3997
4152
  * @private internal utility of `createPipelineExecutor`
3998
4153
  */
3999
4154
  async function getKnowledgeForTask(options) {
4000
- const { preparedPipeline, task } = options;
4001
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
4155
+ const { tools, preparedPipeline, task } = options;
4156
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
4157
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4158
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
4159
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
4160
+ return 'No knowledge pieces found';
4161
+ }
4162
+ // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
4163
+ const _llms = arrayableToArray(tools.llm);
4164
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4165
+ const taskEmbeddingPrompt = {
4166
+ title: 'Knowledge Search',
4167
+ modelRequirements: {
4168
+ modelVariant: 'EMBEDDING',
4169
+ modelName: firstKnowlegeIndex.modelName,
4170
+ },
4171
+ content: task.content,
4172
+ parameters: {
4173
+ /* !!!!!!!! */
4174
+ },
4175
+ };
4176
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4177
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4178
+ const { index } = knowledgePiece;
4179
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4180
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4181
+ if (knowledgePieceIndex === undefined) {
4182
+ return {
4183
+ content: knowledgePiece.content,
4184
+ relevance: 0,
4185
+ };
4186
+ }
4187
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4188
+ return {
4189
+ content: knowledgePiece.content,
4190
+ relevance,
4191
+ };
4192
+ });
4193
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4194
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4195
+ console.log('!!! Embedding', {
4196
+ task,
4197
+ taskEmbeddingPrompt,
4198
+ taskEmbeddingResult,
4199
+ firstKnowlegePiece,
4200
+ firstKnowlegeIndex,
4201
+ knowledgePiecesWithRelevance,
4202
+ knowledgePiecesSorted,
4203
+ knowledgePiecesLimited,
4204
+ });
4205
+ return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
4002
4206
  // <- TODO: [๐Ÿง ] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
4003
4207
  }
4208
+ // TODO: !!!!!! Annotate + to new file
4209
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
4210
+ if (embeddingVector1.length !== embeddingVector2.length) {
4211
+ throw new TypeError('Embedding vectors must have the same length');
4212
+ }
4213
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
4214
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
4215
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
4216
+ return 1 - dotProduct / (magnitude1 * magnitude2);
4217
+ }
4218
+ /**
4219
+ * TODO: !!!! Verify if this is working
4220
+ * TODO: [โ™จ] Implement Better - use keyword search
4221
+ * TODO: [โ™จ] Examples of values
4222
+ */
4004
4223
 
4005
4224
  /**
4006
4225
  * @@@
@@ -4008,9 +4227,9 @@
4008
4227
  * @private internal utility of `createPipelineExecutor`
4009
4228
  */
4010
4229
  async function getReservedParametersForTask(options) {
4011
- const { preparedPipeline, task, pipelineIdentification } = options;
4230
+ const { tools, preparedPipeline, task, pipelineIdentification } = options;
4012
4231
  const context = await getContextForTask(); // <- [๐Ÿ]
4013
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
4232
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
4014
4233
  const examples = await getExamplesForTask();
4015
4234
  const currentDate = new Date().toISOString(); // <- TODO: [๐Ÿง ][๐Ÿ’ฉ] Better
4016
4235
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -4072,6 +4291,7 @@
4072
4291
  }
4073
4292
  const definedParameters = Object.freeze({
4074
4293
  ...(await getReservedParametersForTask({
4294
+ tools,
4075
4295
  preparedPipeline,
4076
4296
  task: currentTask,
4077
4297
  pipelineIdentification,
@@ -4372,9 +4592,7 @@
4372
4592
  await Promise.all(resolving);
4373
4593
  }
4374
4594
  catch (error /* <- Note: [3] */) {
4375
- if (!(error instanceof Error)) {
4376
- throw error;
4377
- }
4595
+ assertsError(error);
4378
4596
  // Note: No need to rethrow UnexpectedError
4379
4597
  // if (error instanceof UnexpectedError) {
4380
4598
  // Note: Count usage, [๐Ÿง ] Maybe put to separate function executionReportJsonToUsage + DRY [๐Ÿคนโ€โ™‚๏ธ]
@@ -4630,27 +4848,48 @@
4630
4848
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
4631
4849
  tools,
4632
4850
  });
4633
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
4634
4851
  const _llms = arrayableToArray(tools.llm);
4635
4852
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4636
- const availableModels = await llmTools.listModels();
4637
- const availableModelNames = availableModels
4853
+ const availableModels = (await llmTools.listModels())
4638
4854
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
4639
- .map(({ modelName }) => modelName)
4640
- .join(',');
4641
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
4855
+ .map(({ modelName, modelDescription }) => ({
4856
+ modelName,
4857
+ modelDescription,
4858
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
4859
+ }));
4860
+ const result = await preparePersonaExecutor({
4861
+ availableModels /* <- Note: Passing as JSON */,
4862
+ personaDescription,
4863
+ }).asPromise();
4642
4864
  const { outputParameters } = result;
4643
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
4644
- const modelRequirements = JSON.parse(modelRequirementsRaw);
4865
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
4866
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
4645
4867
  if (isVerbose) {
4646
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
4868
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
4647
4869
  }
4648
- const { modelName, systemMessage, temperature } = modelRequirements;
4649
- return {
4870
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
4871
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
4872
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
4873
+ /*
4874
+ throw new UnexpectedError(
4875
+ spaceTrim(
4876
+ (block) => `
4877
+ Invalid \`modelsRequirements\`:
4878
+
4879
+ \`\`\`json
4880
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
4881
+ \`\`\`
4882
+ `,
4883
+ ),
4884
+ );
4885
+ */
4886
+ }
4887
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
4650
4888
  modelVariant: 'CHAT',
4651
- modelName,
4652
- systemMessage,
4653
- temperature,
4889
+ ...modelRequirements,
4890
+ }));
4891
+ return {
4892
+ modelsRequirements,
4654
4893
  };
4655
4894
  }
4656
4895
  /**
@@ -5084,9 +5323,7 @@
5084
5323
  return await fetch(urlOrRequest, init);
5085
5324
  }
5086
5325
  catch (error) {
5087
- if (!(error instanceof Error)) {
5088
- throw error;
5089
- }
5326
+ assertsError(error);
5090
5327
  let url;
5091
5328
  if (typeof urlOrRequest === 'string') {
5092
5329
  url = urlOrRequest;
@@ -5215,7 +5452,7 @@
5215
5452
  > },
5216
5453
  */
5217
5454
  async asJson() {
5218
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
5455
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
5219
5456
  },
5220
5457
  async asText() {
5221
5458
  return await tools.fs.readFile(filename, 'utf-8');
@@ -5317,9 +5554,7 @@
5317
5554
  knowledgePreparedUnflatten[index] = pieces;
5318
5555
  }
5319
5556
  catch (error) {
5320
- if (!(error instanceof Error)) {
5321
- throw error;
5322
- }
5557
+ assertsError(error);
5323
5558
  console.warn(error);
5324
5559
  // <- TODO: [๐Ÿฎ] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
5325
5560
  }
@@ -5475,14 +5710,14 @@
5475
5710
  // TODO: [๐Ÿ–Œ][๐Ÿง ] Implement some `mapAsync` function
5476
5711
  const preparedPersonas = new Array(personas.length);
5477
5712
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [๐Ÿช‚] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
5478
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
5713
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
5479
5714
  rootDirname,
5480
5715
  maxParallelCount /* <- TODO: [๐Ÿช‚] */,
5481
5716
  isVerbose,
5482
5717
  });
5483
5718
  const preparedPersona = {
5484
5719
  ...persona,
5485
- modelRequirements,
5720
+ modelsRequirements,
5486
5721
  preparationIds: [/* TODO: [๐ŸงŠ] -> */ currentPreparation.id],
5487
5722
  // <- TODO: [๐Ÿ™] Make some standard order of json properties
5488
5723
  };
@@ -6117,6 +6352,8 @@
6117
6352
  */
6118
6353
 
6119
6354
  /**
6355
+ import { WrappedError } from '../../errors/WrappedError';
6356
+ import { assertsError } from '../../errors/assertsError';
6120
6357
  * Parses the expect command
6121
6358
  *
6122
6359
  * @see `documentationUrl` for more details
@@ -6208,9 +6445,7 @@
6208
6445
  };
6209
6446
  }
6210
6447
  catch (error) {
6211
- if (!(error instanceof Error)) {
6212
- throw error;
6213
- }
6448
+ assertsError(error);
6214
6449
  throw new ParseError(spaceTrim__default["default"]((block) => `
6215
6450
  Invalid FORMAT command
6216
6451
  ${block(error.message)}:
@@ -9191,9 +9426,7 @@
9191
9426
  return result.trim();
9192
9427
  }
9193
9428
  catch (error) {
9194
- if (!(error instanceof Error)) {
9195
- throw error;
9196
- }
9429
+ assertsError(error);
9197
9430
  return null;
9198
9431
  }
9199
9432
  }
@@ -9271,9 +9504,7 @@
9271
9504
  return result.trim() + toExec;
9272
9505
  }
9273
9506
  catch (error) {
9274
- if (!(error instanceof Error)) {
9275
- throw error;
9276
- }
9507
+ assertsError(error);
9277
9508
  return null;
9278
9509
  }
9279
9510
  }
@@ -9304,9 +9535,7 @@
9304
9535
  throw new Error(`Can not locate app ${appName} on Windows.`);
9305
9536
  }
9306
9537
  catch (error) {
9307
- if (!(error instanceof Error)) {
9308
- throw error;
9309
- }
9538
+ assertsError(error);
9310
9539
  return null;
9311
9540
  }
9312
9541
  }
@@ -9597,13 +9826,13 @@
9597
9826
  /**
9598
9827
  * Provides the path to the `.env` file
9599
9828
  *
9600
- * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access .env file
9829
+ * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
9601
9830
  *
9602
9831
  * @private within the repository - for CLI utils
9603
9832
  */
9604
- async function $provideEnvFilepath() {
9833
+ async function $provideEnvFilename() {
9605
9834
  if (!$isRunningInNode()) {
9606
- throw new EnvironmentMismatchError('Function `$provideEnvFilepath` works only in Node.js environment');
9835
+ throw new EnvironmentMismatchError('Function `$provideEnvFilename` works only in Node.js environment');
9607
9836
  }
9608
9837
  const envFilePatterns = [
9609
9838
  '.env',
@@ -9642,7 +9871,7 @@
9642
9871
  * @@@
9643
9872
  *
9644
9873
  * @@@ .env
9645
- * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access .env file
9874
+ * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
9646
9875
  *
9647
9876
  * It looks for environment variables:
9648
9877
  * - `process.env.OPENAI_API_KEY`
@@ -9656,7 +9885,7 @@
9656
9885
  if (!$isRunningInNode()) {
9657
9886
  throw new EnvironmentMismatchError('Function `$provideLlmToolsFromEnv` works only in Node.js environment');
9658
9887
  }
9659
- const envFilepath = await $provideEnvFilepath();
9888
+ const envFilepath = await $provideEnvFilename();
9660
9889
  if (envFilepath !== null) {
9661
9890
  dotenv__namespace.config({ path: envFilepath });
9662
9891
  }
@@ -9727,7 +9956,7 @@
9727
9956
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
9728
9957
  *
9729
9958
  * @@@ .env
9730
- * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access .env file
9959
+ * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
9731
9960
  *
9732
9961
  * It looks for environment variables:
9733
9962
  * - `process.env.OPENAI_API_KEY`
@@ -10261,9 +10490,7 @@
10261
10490
  }
10262
10491
  }
10263
10492
  catch (error) {
10264
- if (!(error instanceof Error)) {
10265
- throw error;
10266
- }
10493
+ assertsError(error);
10267
10494
  if (error instanceof ReferenceError) {
10268
10495
  const undefinedName = error.message.split(' ')[0];
10269
10496
  /*
@@ -10538,9 +10765,7 @@
10538
10765
  // ---
10539
10766
  }
10540
10767
  catch (error) {
10541
- if (!(error instanceof Error)) {
10542
- throw error;
10543
- }
10768
+ assertsError(error);
10544
10769
  // TODO: [7] DRY
10545
10770
  const wrappedErrorMessage = spaceTrim__default["default"]((block) => `
10546
10771
  ${error.name} in pipeline ${fileName.split('\\').join('/')}โ :
@@ -10631,9 +10856,7 @@
10631
10856
  }
10632
10857
  }
10633
10858
  catch (error) {
10634
- if (!(error instanceof Error)) {
10635
- throw error;
10636
- }
10859
+ assertsError(error);
10637
10860
  // TODO: [7] DRY
10638
10861
  const wrappedErrorMessage = spaceTrim__default["default"]((block) => `
10639
10862
  ${error.name} in pipeline ${fileName.split('\\').join('/')}โ :
@@ -10749,7 +10972,7 @@
10749
10972
  return null;
10750
10973
  }
10751
10974
  const fileContent = await promises.readFile(filename, 'utf-8');
10752
- const value = JSON.parse(fileContent);
10975
+ const value = jsonParse(fileContent);
10753
10976
  // TODO: [๐ŸŒ—]
10754
10977
  return value;
10755
10978
  }