@promptbook/remote-server 0.94.0 → 0.98.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +6 -2
  2. package/esm/index.es.js +338 -216
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/openai.index.d.ts +10 -0
  8. package/esm/typings/src/_packages/types.index.d.ts +14 -4
  9. package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +6 -2
  10. package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
  11. package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
  12. package/esm/typings/src/config.d.ts +1 -1
  13. package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
  14. package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
  15. package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
  16. package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  17. package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
  18. package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +8 -0
  19. package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
  20. package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
  21. package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
  22. package/esm/typings/src/execution/utils/validatePromptResult.d.ts +53 -0
  23. package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
  24. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -3
  25. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +2 -2
  26. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
  28. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
  29. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
  30. package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
  31. package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
  33. package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
  34. package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
  35. package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
  36. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
  37. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +2 -2
  38. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +4 -4
  39. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionToolsOptions.d.ts +52 -0
  40. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +3 -5
  41. package/esm/typings/src/llm-providers/openai/createOpenAiCompatibleExecutionTools.d.ts +74 -0
  42. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +13 -2
  43. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +16 -2
  44. package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
  45. package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
  46. package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
  47. package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
  48. package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
  49. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
  50. package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
  51. package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
  52. package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
  53. package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
  54. package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
  55. package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
  56. package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
  57. package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
  58. package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
  59. package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
  60. package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
  61. package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
  62. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
  63. package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
  64. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
  65. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
  66. package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
  67. package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
  68. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  69. package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
  70. package/esm/typings/src/version.d.ts +1 -1
  71. package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
  72. package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
  73. package/package.json +2 -14
  74. package/umd/index.umd.js +338 -216
  75. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -33,7 +33,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
33
33
  * @generated
34
34
  * @see https://github.com/webgptorg/promptbook
35
35
  */
36
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0';
36
+ const PROMPTBOOK_ENGINE_VERSION = '0.98.0-10';
37
37
  /**
38
38
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
39
39
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -173,7 +173,7 @@ const DEFAULT_MAX_PARALLEL_COUNT = 5; // <- TODO: [🤹‍♂️]
173
173
  *
174
174
  * @public exported from `@promptbook/core`
175
175
  */
176
- const DEFAULT_MAX_EXECUTION_ATTEMPTS = 10; // <- TODO: [🤹‍♂️]
176
+ const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7; // <- TODO: [🤹‍♂️]
177
177
  // <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
178
178
  // TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
179
179
  /**
@@ -1226,7 +1226,7 @@ function checkSerializableAsJson(options) {
1226
1226
  else {
1227
1227
  for (const [subName, subValue] of Object.entries(value)) {
1228
1228
  if (subValue === undefined) {
1229
- // Note: undefined in object is serializable - it is just omited
1229
+ // Note: undefined in object is serializable - it is just omitted
1230
1230
  continue;
1231
1231
  }
1232
1232
  checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
@@ -1891,7 +1891,7 @@ function jsonParse(value) {
1891
1891
  throw new Error(spaceTrim((block) => `
1892
1892
  ${block(error.message)}
1893
1893
 
1894
- The JSON text:
1894
+ The expected JSON text:
1895
1895
  ${block(value)}
1896
1896
  `));
1897
1897
  }
@@ -2172,7 +2172,7 @@ const UNCERTAIN_USAGE = $deepFreeze({
2172
2172
  * Note: [💞] Ignore a discrepancy between file name and entity name
2173
2173
  */
2174
2174
 
2175
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
2175
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
2176
2176
 
2177
2177
  /**
2178
2178
  * Checks if value is valid email
@@ -2292,7 +2292,7 @@ function prettifyMarkdown(content) {
2292
2292
  });
2293
2293
  }
2294
2294
  catch (error) {
2295
- // TODO: [🟥] Detect browser / node and make it colorfull
2295
+ // TODO: [🟥] Detect browser / node and make it colorful
2296
2296
  console.error('There was an error with prettifying the markdown, using the original as the fallback', {
2297
2297
  error,
2298
2298
  html: content,
@@ -2557,7 +2557,7 @@ class SimplePipelineCollection {
2557
2557
 
2558
2558
  Note: You have probably forgotten to run "ptbk make" to update the collection
2559
2559
  Note: Pipelines with the same URL are not allowed
2560
- Only exepction is when the pipelines are identical
2560
+ Only exception is when the pipelines are identical
2561
2561
 
2562
2562
  `));
2563
2563
  }
@@ -2710,12 +2710,12 @@ function countUsage(llmTools) {
2710
2710
  get title() {
2711
2711
  return `${llmTools.title} (+usage)`;
2712
2712
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2713
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2713
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
2714
2714
  },
2715
2715
  get description() {
2716
2716
  return `${llmTools.description} (+usage)`;
2717
2717
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2718
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2718
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
2719
2719
  },
2720
2720
  checkConfiguration() {
2721
2721
  return /* not await */ llmTools.checkConfiguration();
@@ -2942,13 +2942,13 @@ function joinLlmExecutionTools(...llmExecutionTools) {
2942
2942
 
2943
2943
  Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
2944
2944
  `);
2945
- // TODO: [🟥] Detect browser / node and make it colorfull
2945
+ // TODO: [🟥] Detect browser / node and make it colorful
2946
2946
  console.warn(warningMessage);
2947
2947
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
2948
2948
  /*
2949
2949
  return {
2950
2950
  async listModels() {
2951
- // TODO: [🟥] Detect browser / node and make it colorfull
2951
+ // TODO: [🟥] Detect browser / node and make it colorful
2952
2952
  console.warn(
2953
2953
  spaceTrim(
2954
2954
  (block) => `
@@ -3224,17 +3224,17 @@ function $registeredScrapersMessage(availableScrapers) {
3224
3224
  * Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
3225
3225
  */
3226
3226
  const all = [];
3227
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersMetadataRegister.list()) {
3227
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
3228
3228
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
3229
3229
  continue;
3230
3230
  }
3231
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
3231
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
3232
3232
  }
3233
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersRegister.list()) {
3233
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
3234
3234
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
3235
3235
  continue;
3236
3236
  }
3237
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
3237
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
3238
3238
  }
3239
3239
  for (const { metadata } of availableScrapers) {
3240
3240
  all.push(metadata);
@@ -3246,8 +3246,8 @@ function $registeredScrapersMessage(availableScrapers) {
3246
3246
  const isInstalled = $scrapersRegister
3247
3247
  .list()
3248
3248
  .find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
3249
- const isAvilableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
3250
- return { ...metadata, isMetadataAviailable, isInstalled, isAvilableInTools };
3249
+ const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
3250
+ return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
3251
3251
  });
3252
3252
  if (metadata.length === 0) {
3253
3253
  return spaceTrim(`
@@ -3260,7 +3260,7 @@ function $registeredScrapersMessage(availableScrapers) {
3260
3260
  return spaceTrim((block) => `
3261
3261
  Available scrapers are:
3262
3262
  ${block(metadata
3263
- .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvilableInBrowser, isAvilableInTools, }, i) => {
3263
+ .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
3264
3264
  const more = [];
3265
3265
  // TODO: [🧠] Maybe use `documentationUrl`
3266
3266
  if (isMetadataAviailable) {
@@ -3269,16 +3269,16 @@ function $registeredScrapersMessage(availableScrapers) {
3269
3269
  if (isInstalled) {
3270
3270
  more.push(`🟩 Installed`);
3271
3271
  } // not else
3272
- if (isAvilableInTools) {
3272
+ if (isAvailableInTools) {
3273
3273
  more.push(`🟦 Available in tools`);
3274
3274
  } // not else
3275
3275
  if (!isMetadataAviailable && isInstalled) {
3276
3276
  more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
3277
3277
  } // not else
3278
- if (!isInstalled && isAvilableInTools) {
3278
+ if (!isInstalled && isAvailableInTools) {
3279
3279
  more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
3280
3280
  } // not else
3281
- if (!isAvilableInBrowser) {
3281
+ if (!isAvailableInBrowser) {
3282
3282
  more.push(`Not usable in browser`);
3283
3283
  }
3284
3284
  const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
@@ -4005,7 +4005,7 @@ TODO: [🧊] This is how it can look in future
4005
4005
  /**
4006
4006
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
4007
4007
  * Put `knowledgePieces` into `PrepareKnowledgeOptions`
4008
- * TODO: [🪂] More than max things can run in parallel by acident [1,[2a,2b,_],[3a,3b,_]]
4008
+ * TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
4009
4009
  * TODO: [🧠][❎] Do here proper M:N mapping
4010
4010
  * [x] One source can make multiple pieces
4011
4011
  * [ ] One piece can have multiple sources
@@ -4813,6 +4813,94 @@ function mapAvailableToExpectedParameters(options) {
4813
4813
  return mappedParameters;
4814
4814
  }
4815
4815
 
4816
+ /**
4817
+ * Just says that the variable is not used but should be kept
4818
+ * No side effects.
4819
+ *
4820
+ * Note: It can be useful for:
4821
+ *
4822
+ * 1) Suppressing eager optimization of unused imports
4823
+ * 2) Suppressing eslint errors of unused variables in the tests
4824
+ * 3) Keeping the type of the variable for type testing
4825
+ *
4826
+ * @param value any values
4827
+ * @returns void
4828
+ * @private within the repository
4829
+ */
4830
+ function keepUnused(...valuesToKeep) {
4831
+ }
4832
+
4833
+ /**
4834
+ * Replaces parameters in template with values from parameters object
4835
+ *
4836
+ * Note: This function is not places strings into string,
4837
+ * It's more complex and can handle this operation specifically for LLM models
4838
+ *
4839
+ * @param template the template with parameters in {curly} braces
4840
+ * @param parameters the object with parameters
4841
+ * @returns the template with replaced parameters
4842
+ * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
4843
+ * @public exported from `@promptbook/utils`
4844
+ */
4845
+ function templateParameters(template, parameters) {
4846
+ for (const [parameterName, parameterValue] of Object.entries(parameters)) {
4847
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
4848
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
4849
+ }
4850
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
4851
+ // TODO: [🍵]
4852
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
4853
+ }
4854
+ }
4855
+ let replacedTemplates = template;
4856
+ let match;
4857
+ let loopLimit = LOOP_LIMIT;
4858
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
4859
+ .exec(replacedTemplates))) {
4860
+ if (loopLimit-- < 0) {
4861
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
4862
+ }
4863
+ const precol = match.groups.precol;
4864
+ const parameterName = match.groups.parameterName;
4865
+ if (parameterName === '') {
4866
+ // Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
4867
+ continue;
4868
+ }
4869
+ if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
4870
+ throw new PipelineExecutionError('Parameter is already opened or not closed');
4871
+ }
4872
+ if (parameters[parameterName] === undefined) {
4873
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4874
+ }
4875
+ let parameterValue = parameters[parameterName];
4876
+ if (parameterValue === undefined) {
4877
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4878
+ }
4879
+ parameterValue = valueToString(parameterValue);
4880
+ // Escape curly braces in parameter values to prevent prompt-injection
4881
+ parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
4882
+ if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
4883
+ parameterValue = parameterValue
4884
+ .split('\n')
4885
+ .map((line, index) => (index === 0 ? line : `${precol}${line}`))
4886
+ .join('\n');
4887
+ }
4888
+ replacedTemplates =
4889
+ replacedTemplates.substring(0, match.index + precol.length) +
4890
+ parameterValue +
4891
+ replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
4892
+ }
4893
+ // [💫] Check if there are parameters that are not closed properly
4894
+ if (/{\w+$/.test(replacedTemplates)) {
4895
+ throw new PipelineExecutionError('Parameter is not closed');
4896
+ }
4897
+ // [💫] Check if there are parameters that are not opened properly
4898
+ if (/^\w+}/.test(replacedTemplates)) {
4899
+ throw new PipelineExecutionError('Parameter is not opened');
4900
+ }
4901
+ return replacedTemplates;
4902
+ }
4903
+
4816
4904
  /**
4817
4905
  * Extracts all code blocks from markdown.
4818
4906
  *
@@ -4915,94 +5003,6 @@ function extractJsonBlock(markdown) {
4915
5003
  * TODO: [🏢] Make this logic part of `JsonFormatParser` or `isValidJsonString`
4916
5004
  */
4917
5005
 
4918
- /**
4919
- * Just says that the variable is not used but should be kept
4920
- * No side effects.
4921
- *
4922
- * Note: It can be useful for:
4923
- *
4924
- * 1) Suppressing eager optimization of unused imports
4925
- * 2) Suppressing eslint errors of unused variables in the tests
4926
- * 3) Keeping the type of the variable for type testing
4927
- *
4928
- * @param value any values
4929
- * @returns void
4930
- * @private within the repository
4931
- */
4932
- function keepUnused(...valuesToKeep) {
4933
- }
4934
-
4935
- /**
4936
- * Replaces parameters in template with values from parameters object
4937
- *
4938
- * Note: This function is not places strings into string,
4939
- * It's more complex and can handle this operation specifically for LLM models
4940
- *
4941
- * @param template the template with parameters in {curly} braces
4942
- * @param parameters the object with parameters
4943
- * @returns the template with replaced parameters
4944
- * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
4945
- * @public exported from `@promptbook/utils`
4946
- */
4947
- function templateParameters(template, parameters) {
4948
- for (const [parameterName, parameterValue] of Object.entries(parameters)) {
4949
- if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
4950
- throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
4951
- }
4952
- else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
4953
- // TODO: [🍵]
4954
- throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
4955
- }
4956
- }
4957
- let replacedTemplates = template;
4958
- let match;
4959
- let loopLimit = LOOP_LIMIT;
4960
- while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
4961
- .exec(replacedTemplates))) {
4962
- if (loopLimit-- < 0) {
4963
- throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
4964
- }
4965
- const precol = match.groups.precol;
4966
- const parameterName = match.groups.parameterName;
4967
- if (parameterName === '') {
4968
- // Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
4969
- continue;
4970
- }
4971
- if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
4972
- throw new PipelineExecutionError('Parameter is already opened or not closed');
4973
- }
4974
- if (parameters[parameterName] === undefined) {
4975
- throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4976
- }
4977
- let parameterValue = parameters[parameterName];
4978
- if (parameterValue === undefined) {
4979
- throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
4980
- }
4981
- parameterValue = valueToString(parameterValue);
4982
- // Escape curly braces in parameter values to prevent prompt-injection
4983
- parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
4984
- if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
4985
- parameterValue = parameterValue
4986
- .split('\n')
4987
- .map((line, index) => (index === 0 ? line : `${precol}${line}`))
4988
- .join('\n');
4989
- }
4990
- replacedTemplates =
4991
- replacedTemplates.substring(0, match.index + precol.length) +
4992
- parameterValue +
4993
- replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
4994
- }
4995
- // [💫] Check if there are parameters that are not closed properly
4996
- if (/{\w+$/.test(replacedTemplates)) {
4997
- throw new PipelineExecutionError('Parameter is not closed');
4998
- }
4999
- // [💫] Check if there are parameters that are not opened properly
5000
- if (/^\w+}/.test(replacedTemplates)) {
5001
- throw new PipelineExecutionError('Parameter is not opened');
5002
- }
5003
- return replacedTemplates;
5004
- }
5005
-
5006
5006
  /**
5007
5007
  * Counts number of characters in the text
5008
5008
  *
@@ -5163,6 +5163,68 @@ function checkExpectations(expectations, value) {
5163
5163
  * Note: [💝] and [🤠] are interconnected together
5164
5164
  */
5165
5165
 
5166
+ /**
5167
+ * Validates a prompt result against expectations and format requirements.
5168
+ * This function provides a common abstraction for result validation that can be used
5169
+ * by both execution logic and caching logic to ensure consistency.
5170
+ *
5171
+ * @param options - The validation options including result string, expectations, and format
5172
+ * @returns Validation result with processed string and validity status
5173
+ * @private internal function of `createPipelineExecutor` and `cacheLlmTools`
5174
+ */
5175
+ function validatePromptResult(options) {
5176
+ const { resultString, expectations, format } = options;
5177
+ let processedResultString = resultString;
5178
+ let validationError;
5179
+ try {
5180
+ // TODO: [💝] Unite object for expecting amount and format
5181
+ if (format) {
5182
+ if (format === 'JSON') {
5183
+ if (!isValidJsonString(processedResultString)) {
5184
+ // TODO: [🏢] Do more universally via `FormatParser`
5185
+ try {
5186
+ processedResultString = extractJsonBlock(processedResultString);
5187
+ }
5188
+ catch (error) {
5189
+ keepUnused(error);
5190
+ throw new ExpectError(spaceTrim$1((block) => `
5191
+ Expected valid JSON string
5192
+
5193
+ The expected JSON text:
5194
+ ${block(processedResultString)}
5195
+ `));
5196
+ }
5197
+ }
5198
+ }
5199
+ else {
5200
+ throw new UnexpectedError(`Unknown format "${format}"`);
5201
+ }
5202
+ }
5203
+ // TODO: [💝] Unite object for expecting amount and format
5204
+ if (expectations) {
5205
+ checkExpectations(expectations, processedResultString);
5206
+ }
5207
+ return {
5208
+ isValid: true,
5209
+ processedResultString,
5210
+ };
5211
+ }
5212
+ catch (error) {
5213
+ if (error instanceof ExpectError) {
5214
+ validationError = error;
5215
+ }
5216
+ else {
5217
+ // Re-throw non-ExpectError errors (like UnexpectedError)
5218
+ throw error;
5219
+ }
5220
+ return {
5221
+ isValid: false,
5222
+ processedResultString,
5223
+ error: validationError,
5224
+ };
5225
+ }
5226
+ }
5227
+
5166
5228
  /**
5167
5229
  * Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
5168
5230
  * (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
@@ -5180,17 +5242,18 @@ async function executeAttempts(options) {
5180
5242
  $resultString: null,
5181
5243
  $expectError: null,
5182
5244
  $scriptPipelineExecutionErrors: [],
5245
+ $failedResults: [], // Track all failed attempts
5183
5246
  };
5184
5247
  // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5185
5248
  const _llms = arrayableToArray(tools.llm);
5186
5249
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5187
- attempts: for (let attempt = -jokerParameterNames.length; attempt < maxAttempts; attempt++) {
5188
- const isJokerAttempt = attempt < 0;
5189
- const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attempt];
5250
+ attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5251
+ const isJokerAttempt = attemptIndex < 0;
5252
+ const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
5190
5253
  // TODO: [🧠][🍭] JOKERS, EXPECTATIONS, POSTPROCESSING and FOREACH
5191
5254
  if (isJokerAttempt && !jokerParameterName) {
5192
5255
  throw new UnexpectedError(spaceTrim$1((block) => `
5193
- Joker not found in attempt ${attempt}
5256
+ Joker not found in attempt ${attemptIndex}
5194
5257
 
5195
5258
  ${block(pipelineIdentification)}
5196
5259
  `));
@@ -5388,35 +5451,18 @@ async function executeAttempts(options) {
5388
5451
  }
5389
5452
  }
5390
5453
  // TODO: [💝] Unite object for expecting amount and format
5391
- if (task.format) {
5392
- if (task.format === 'JSON') {
5393
- if (!isValidJsonString($ongoingTaskResult.$resultString || '')) {
5394
- // TODO: [🏢] Do more universally via `FormatParser`
5395
- try {
5396
- $ongoingTaskResult.$resultString = extractJsonBlock($ongoingTaskResult.$resultString || '');
5397
- }
5398
- catch (error) {
5399
- keepUnused(error);
5400
- throw new ExpectError(spaceTrim$1((block) => `
5401
- Expected valid JSON string
5402
-
5403
- ${block(
5404
- /*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ '')}
5405
- `));
5406
- }
5407
- }
5408
- }
5409
- else {
5410
- throw new UnexpectedError(spaceTrim$1((block) => `
5411
- Unknown format "${task.format}"
5412
-
5413
- ${block(pipelineIdentification)}
5414
- `));
5454
+ // Use the common validation function for both format and expectations
5455
+ if (task.format || task.expectations) {
5456
+ const validationResult = validatePromptResult({
5457
+ resultString: $ongoingTaskResult.$resultString || '',
5458
+ expectations: task.expectations,
5459
+ format: task.format,
5460
+ });
5461
+ if (!validationResult.isValid) {
5462
+ throw validationResult.error;
5415
5463
  }
5416
- }
5417
- // TODO: [💝] Unite object for expecting amount and format
5418
- if (task.expectations) {
5419
- checkExpectations(task.expectations, $ongoingTaskResult.$resultString || '');
5464
+ // Update the result string in case format processing modified it (e.g., JSON extraction)
5465
+ $ongoingTaskResult.$resultString = validationResult.processedResultString;
5420
5466
  }
5421
5467
  break attempts;
5422
5468
  }
@@ -5425,6 +5471,15 @@ async function executeAttempts(options) {
5425
5471
  throw error;
5426
5472
  }
5427
5473
  $ongoingTaskResult.$expectError = error;
5474
+ // Store each failed attempt
5475
+ if (!Array.isArray($ongoingTaskResult.$failedResults)) {
5476
+ $ongoingTaskResult.$failedResults = [];
5477
+ }
5478
+ $ongoingTaskResult.$failedResults.push({
5479
+ attemptIndex,
5480
+ result: $ongoingTaskResult.$resultString,
5481
+ error: error,
5482
+ });
5428
5483
  }
5429
5484
  finally {
5430
5485
  if (!isJokerAttempt &&
@@ -5446,35 +5501,41 @@ async function executeAttempts(options) {
5446
5501
  });
5447
5502
  }
5448
5503
  }
5449
- if ($ongoingTaskResult.$expectError !== null && attempt === maxAttempts - 1) {
5504
+ if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
5505
+ // Note: Create a summary of all failures
5506
+ const failuresSummary = $ongoingTaskResult.$failedResults
5507
+ .map((failure) => spaceTrim$1((block) => {
5508
+ var _a, _b;
5509
+ return `
5510
+ Attempt ${failure.attemptIndex + 1}:
5511
+ Error ${((_a = failure.error) === null || _a === void 0 ? void 0 : _a.name) || ''}:
5512
+ ${block((_b = failure.error) === null || _b === void 0 ? void 0 : _b.message.split('\n').map((line) => `> ${line}`).join('\n'))}
5513
+
5514
+ Result:
5515
+ ${block(failure.result === null
5516
+ ? 'null'
5517
+ : spaceTrim$1(failure.result)
5518
+ .split('\n')
5519
+ .map((line) => `> ${line}`)
5520
+ .join('\n'))}
5521
+ `;
5522
+ }))
5523
+ .join('\n\n---\n\n');
5450
5524
  throw new PipelineExecutionError(spaceTrim$1((block) => {
5451
- var _a, _b, _c;
5525
+ var _a;
5452
5526
  return `
5453
5527
  LLM execution failed ${maxExecutionAttempts}x
5454
5528
 
5455
5529
  ${block(pipelineIdentification)}
5456
5530
 
5457
- ---
5458
5531
  The Prompt:
5459
5532
  ${block((((_a = $ongoingTaskResult.$prompt) === null || _a === void 0 ? void 0 : _a.content) || '')
5460
5533
  .split('\n')
5461
5534
  .map((line) => `> ${line}`)
5462
5535
  .join('\n'))}
5463
5536
 
5464
- Last error ${((_b = $ongoingTaskResult.$expectError) === null || _b === void 0 ? void 0 : _b.name) || ''}:
5465
- ${block((((_c = $ongoingTaskResult.$expectError) === null || _c === void 0 ? void 0 : _c.message) || '')
5466
- .split('\n')
5467
- .map((line) => `> ${line}`)
5468
- .join('\n'))}
5469
-
5470
- Last result:
5471
- ${block($ongoingTaskResult.$resultString === null
5472
- ? 'null'
5473
- : spaceTrim$1($ongoingTaskResult.$resultString)
5474
- .split('\n')
5475
- .map((line) => `> ${line}`)
5476
- .join('\n'))}
5477
- ---
5537
+ All Failed Attempts:
5538
+ ${block(failuresSummary)}
5478
5539
  `;
5479
5540
  }));
5480
5541
  }
@@ -5694,10 +5755,10 @@ function knowledgePiecesToString(knowledgePieces) {
5694
5755
  */
5695
5756
  async function getKnowledgeForTask(options) {
5696
5757
  const { tools, preparedPipeline, task, parameters } = options;
5697
- const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5698
- const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5758
+ const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
5759
+ const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
5699
5760
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5700
- if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5761
+ if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
5701
5762
  return ''; // <- Note: Np knowledge present, return empty string
5702
5763
  }
5703
5764
  try {
@@ -5708,7 +5769,7 @@ async function getKnowledgeForTask(options) {
5708
5769
  title: 'Knowledge Search',
5709
5770
  modelRequirements: {
5710
5771
  modelVariant: 'EMBEDDING',
5711
- modelName: firstKnowlegeIndex.modelName,
5772
+ modelName: firstKnowledgeIndex.modelName,
5712
5773
  },
5713
5774
  content: task.content,
5714
5775
  parameters,
@@ -5716,7 +5777,7 @@ async function getKnowledgeForTask(options) {
5716
5777
  const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5717
5778
  const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5718
5779
  const { index } = knowledgePiece;
5719
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5780
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
5720
5781
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5721
5782
  if (knowledgePieceIndex === undefined) {
5722
5783
  return {
@@ -5737,8 +5798,8 @@ async function getKnowledgeForTask(options) {
5737
5798
  task,
5738
5799
  taskEmbeddingPrompt,
5739
5800
  taskEmbeddingResult,
5740
- firstKnowlegePiece,
5741
- firstKnowlegeIndex,
5801
+ firstKnowledgePiece,
5802
+ firstKnowledgeIndex,
5742
5803
  knowledgePiecesWithRelevance,
5743
5804
  knowledgePiecesSorted,
5744
5805
  knowledgePiecesLimited,
@@ -5807,7 +5868,7 @@ async function getReservedParametersForTask(options) {
5807
5868
  * @private internal utility of `createPipelineExecutor`
5808
5869
  */
5809
5870
  async function executeTask(options) {
5810
- const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
5871
+ const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
5811
5872
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
5812
5873
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
5813
5874
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
@@ -5895,7 +5956,7 @@ async function executeTask(options) {
5895
5956
  cacheDirname,
5896
5957
  intermediateFilesStrategy,
5897
5958
  isAutoInstalled,
5898
- isNotPreparedWarningSupressed,
5959
+ isNotPreparedWarningSuppressed,
5899
5960
  });
5900
5961
  await onProgress({
5901
5962
  outputParameters: {
@@ -5990,7 +6051,7 @@ async function executePipeline(options) {
5990
6051
  }
5991
6052
  return exportJson({
5992
6053
  name: `executionReport`,
5993
- message: `Unuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
6054
+ message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
5994
6055
  order: [],
5995
6056
  value: {
5996
6057
  isSuccessful: false,
@@ -6027,7 +6088,7 @@ async function executePipeline(options) {
6027
6088
  return exportJson({
6028
6089
  name: 'pipelineExecutorResult',
6029
6090
  message: spaceTrim$1((block) => `
6030
- Unuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
6091
+ Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
6031
6092
 
6032
6093
  ${block(pipelineIdentification)}
6033
6094
  `),
@@ -6168,7 +6229,7 @@ async function executePipeline(options) {
6168
6229
  }
6169
6230
  return exportJson({
6170
6231
  name: 'pipelineExecutorResult',
6171
- message: `Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
6232
+ message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
6172
6233
  order: [],
6173
6234
  value: {
6174
6235
  isSuccessful: false,
@@ -6219,7 +6280,7 @@ async function executePipeline(options) {
6219
6280
  * @public exported from `@promptbook/core`
6220
6281
  */
6221
6282
  function createPipelineExecutor(options) {
6222
- const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSupressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
6283
+ const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
6223
6284
  validatePipeline(pipeline);
6224
6285
  const pipelineIdentification = (() => {
6225
6286
  // Note: This is a 😐 implementation of [🚞]
@@ -6236,7 +6297,7 @@ function createPipelineExecutor(options) {
6236
6297
  if (isPipelinePrepared(pipeline)) {
6237
6298
  preparedPipeline = pipeline;
6238
6299
  }
6239
- else if (isNotPreparedWarningSupressed !== true) {
6300
+ else if (isNotPreparedWarningSuppressed !== true) {
6240
6301
  console.warn(spaceTrim$1((block) => `
6241
6302
  Pipeline is not prepared
6242
6303
 
@@ -6269,7 +6330,7 @@ function createPipelineExecutor(options) {
6269
6330
  maxParallelCount,
6270
6331
  csvSettings,
6271
6332
  isVerbose,
6272
- isNotPreparedWarningSupressed,
6333
+ isNotPreparedWarningSuppressed,
6273
6334
  rootDirname,
6274
6335
  cacheDirname,
6275
6336
  intermediateFilesStrategy,
@@ -6278,7 +6339,7 @@ function createPipelineExecutor(options) {
6278
6339
  assertsError(error);
6279
6340
  return exportJson({
6280
6341
  name: 'pipelineExecutorResult',
6281
- message: `Unuccessful PipelineExecutorResult, last catch`,
6342
+ message: `Unsuccessful PipelineExecutorResult, last catch`,
6282
6343
  order: [],
6283
6344
  value: {
6284
6345
  isSuccessful: false,
@@ -6305,6 +6366,46 @@ function createPipelineExecutor(options) {
6305
6366
  return pipelineExecutor;
6306
6367
  }
6307
6368
 
6369
+ /**
6370
+ * Detects if the code is running in a browser environment in main thread (Not in a web worker)
6371
+ *
6372
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
6373
+ *
6374
+ * @public exported from `@promptbook/utils`
6375
+ */
6376
+ const $isRunningInBrowser = new Function(`
6377
+ try {
6378
+ return this === window;
6379
+ } catch (e) {
6380
+ return false;
6381
+ }
6382
+ `);
6383
+ /**
6384
+ * TODO: [🎺]
6385
+ */
6386
+
6387
+ /**
6388
+ * Detects if the code is running in a web worker
6389
+ *
6390
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
6391
+ *
6392
+ * @public exported from `@promptbook/utils`
6393
+ */
6394
+ const $isRunningInWebWorker = new Function(`
6395
+ try {
6396
+ if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
6397
+ return true;
6398
+ } else {
6399
+ return false;
6400
+ }
6401
+ } catch (e) {
6402
+ return false;
6403
+ }
6404
+ `);
6405
+ /**
6406
+ * TODO: [🎺]
6407
+ */
6408
+
6308
6409
  /**
6309
6410
  * Register for LLM tools.
6310
6411
  *
@@ -6473,8 +6574,10 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
6473
6574
  .list()
6474
6575
  .find(({ packageName, className }) => llmConfiguration.packageName === packageName && llmConfiguration.className === className);
6475
6576
  if (registeredItem === undefined) {
6577
+ console.log('!!! $llmToolsRegister.list()', $llmToolsRegister.list());
6476
6578
  throw new Error(spaceTrim((block) => `
6477
6579
  There is no constructor for LLM provider \`${llmConfiguration.className}\` from \`${llmConfiguration.packageName}\`
6580
+ Running in ${!$isRunningInBrowser() ? '' : 'browser environment'}${!$isRunningInNode() ? '' : 'node environment'}${!$isRunningInWebWorker() ? '' : 'worker environment'}
6478
6581
 
6479
6582
  You have probably forgotten install and import the provider package.
6480
6583
  To fix this issue, you can:
@@ -6592,24 +6695,6 @@ function normalizeTo_camelCase(text, _isFirstLetterCapital = false) {
6592
6695
  * TODO: [🌺] Use some intermediate util splitWords
6593
6696
  */
6594
6697
 
6595
- /**
6596
- * Detects if the code is running in a browser environment in main thread (Not in a web worker)
6597
- *
6598
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
6599
- *
6600
- * @public exported from `@promptbook/utils`
6601
- */
6602
- new Function(`
6603
- try {
6604
- return this === window;
6605
- } catch (e) {
6606
- return false;
6607
- }
6608
- `);
6609
- /**
6610
- * TODO: [🎺]
6611
- */
6612
-
6613
6698
  /**
6614
6699
  * Detects if the code is running in jest environment
6615
6700
  *
@@ -6628,28 +6713,6 @@ new Function(`
6628
6713
  * TODO: [🎺]
6629
6714
  */
6630
6715
 
6631
- /**
6632
- * Detects if the code is running in a web worker
6633
- *
6634
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
6635
- *
6636
- * @public exported from `@promptbook/utils`
6637
- */
6638
- new Function(`
6639
- try {
6640
- if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
6641
- return true;
6642
- } else {
6643
- return false;
6644
- }
6645
- } catch (e) {
6646
- return false;
6647
- }
6648
- `);
6649
- /**
6650
- * TODO: [🎺]
6651
- */
6652
-
6653
6716
  /**
6654
6717
  * Makes first letter of a string uppercase
6655
6718
  *
@@ -7821,6 +7884,66 @@ function startRemoteServer(options) {
7821
7884
  response.setHeader('X-Powered-By', 'Promptbook engine');
7822
7885
  next();
7823
7886
  });
7887
+ // Note: OpenAI-compatible chat completions endpoint
7888
+ app.post('/v1/chat/completions', async (request, response) => {
7889
+ // TODO: !!!! Make more promptbook-native:
7890
+ try {
7891
+ const params = request.body;
7892
+ const { model, messages } = params;
7893
+ // Convert messages to a single prompt
7894
+ const prompt = messages
7895
+ .map((message) => `${message.role}: ${message.content}`)
7896
+ .join('\n');
7897
+ // Get pipeline for the book
7898
+ if (!collection) {
7899
+ throw new Error('No collection available');
7900
+ }
7901
+ const pipeline = await collection.getPipelineByUrl(model);
7902
+ const pipelineExecutor = createPipelineExecutor({
7903
+ pipeline,
7904
+ tools: await getExecutionToolsFromIdentification({
7905
+ isAnonymous: true,
7906
+ llmToolsConfiguration: [],
7907
+ }),
7908
+ });
7909
+ // Execute the pipeline with the prompt content as input
7910
+ const result = await pipelineExecutor({ prompt }).asPromise({ isCrashedOnError: true });
7911
+ if (!result.isSuccessful) {
7912
+ throw new Error(`Failed to execute book: ${result.errors.join(', ')}`);
7913
+ }
7914
+ // Return the result in OpenAI-compatible format
7915
+ response.json({
7916
+ id: 'chatcmpl-' + Math.random().toString(36).substring(2),
7917
+ object: 'chat.completion',
7918
+ created: Math.floor(Date.now() / 1000),
7919
+ model,
7920
+ choices: [
7921
+ {
7922
+ index: 0,
7923
+ message: {
7924
+ role: 'assistant',
7925
+ content: result.outputParameters.response,
7926
+ },
7927
+ finish_reason: 'stop',
7928
+ },
7929
+ ],
7930
+ usage: {
7931
+ prompt_tokens: 0,
7932
+ completion_tokens: 0,
7933
+ total_tokens: 0,
7934
+ },
7935
+ });
7936
+ }
7937
+ catch (error) {
7938
+ response.status(500).json({
7939
+ error: {
7940
+ message: error instanceof Error ? error.message : 'Unknown error',
7941
+ type: 'server_error',
7942
+ code: 'internal_error',
7943
+ },
7944
+ });
7945
+ }
7946
+ });
7824
7947
  // TODO: [🥺] Expose openapiJson to consumer and also allow to add new routes
7825
7948
  app.use(OpenApiValidator.middleware({
7826
7949
  apiSpec: openapiJson,
@@ -8181,7 +8304,6 @@ function startRemoteServer(options) {
8181
8304
  catch (error) {
8182
8305
  assertsError(error);
8183
8306
  socket.emit('error', serializeError(error));
8184
- // <- TODO: [🚋] There is a problem with the remote server handling errors and sending them back to the client
8185
8307
  }
8186
8308
  finally {
8187
8309
  socket.disconnect();