@promptbook/remote-server 0.89.0 → 0.92.0-11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +583 -34
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  8. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  10. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  13. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  14. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  15. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  17. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  18. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  19. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  23. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  24. package/esm/typings/src/remote-server/openapi.d.ts +397 -3
  25. package/package.json +2 -2
  26. package/umd/index.umd.js +583 -34
  27. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -33,7 +33,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
33
33
  * @generated
34
34
  * @see https://github.com/webgptorg/promptbook
35
35
  */
36
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
36
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-11';
37
37
  /**
38
38
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
39
39
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1787,7 +1787,7 @@ function isPipelinePrepared(pipeline) {
1787
1787
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
1788
1788
  return false;
1789
1789
  }
1790
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
1790
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
1791
1791
  return false;
1792
1792
  }
1793
1793
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
@@ -1833,6 +1833,45 @@ function isValidJsonString(value /* <- [👨‍⚖️] */) {
1833
1833
  }
1834
1834
  }
1835
1835
 
1836
+ /**
1837
+ * Converts a JavaScript Object Notation (JSON) string into an object.
1838
+ *
1839
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
1840
+ *
1841
+ * @public exported from `@promptbook/utils`
1842
+ */
1843
+ function jsonParse(value) {
1844
+ if (value === undefined) {
1845
+ throw new Error(`Can not parse JSON from undefined value.`);
1846
+ }
1847
+ else if (typeof value !== 'string') {
1848
+ console.error('Can not parse JSON from non-string value.', { text: value });
1849
+ throw new Error(spaceTrim(`
1850
+ Can not parse JSON from non-string value.
1851
+
1852
+ The value type: ${typeof value}
1853
+ See more in console.
1854
+ `));
1855
+ }
1856
+ try {
1857
+ return JSON.parse(value);
1858
+ }
1859
+ catch (error) {
1860
+ if (!(error instanceof Error)) {
1861
+ throw error;
1862
+ }
1863
+ throw new Error(spaceTrim((block) => `
1864
+ ${block(error.message)}
1865
+
1866
+ The JSON text:
1867
+ ${block(value)}
1868
+ `));
1869
+ }
1870
+ }
1871
+ /**
1872
+ * TODO: !!!! Use in Promptbook.studio
1873
+ */
1874
+
1836
1875
  /**
1837
1876
  * Recursively converts JSON strings to JSON objects
1838
1877
 
@@ -1851,7 +1890,7 @@ function jsonStringsToJsons(object) {
1851
1890
  const newObject = { ...object };
1852
1891
  for (const [key, value] of Object.entries(object)) {
1853
1892
  if (typeof value === 'string' && isValidJsonString(value)) {
1854
- newObject[key] = JSON.parse(value);
1893
+ newObject[key] = jsonParse(value);
1855
1894
  }
1856
1895
  else {
1857
1896
  newObject[key] = jsonStringsToJsons(value);
@@ -2028,7 +2067,7 @@ function createTask(options) {
2028
2067
  * TODO: [🐚] Split into more files and make `PrepareTask` & `RemoteTask` + split the function
2029
2068
  */
2030
2069
 
2031
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
2070
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
2032
2071
 
2033
2072
  /**
2034
2073
  * Checks if value is valid email
@@ -2331,7 +2370,7 @@ function extractParameterNames(template) {
2331
2370
  */
2332
2371
  function unpreparePipeline(pipeline) {
2333
2372
  let { personas, knowledgeSources, tasks } = pipeline;
2334
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
2373
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
2335
2374
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
2336
2375
  tasks = tasks.map((task) => {
2337
2376
  let { dependentParameterNames } = task;
@@ -2919,27 +2958,48 @@ async function preparePersona(personaDescription, tools, options) {
2919
2958
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
2920
2959
  tools,
2921
2960
  });
2922
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
2923
2961
  const _llms = arrayableToArray(tools.llm);
2924
2962
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
2925
- const availableModels = await llmTools.listModels();
2926
- const availableModelNames = availableModels
2963
+ const availableModels = (await llmTools.listModels())
2927
2964
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
2928
- .map(({ modelName }) => modelName)
2929
- .join(',');
2930
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
2965
+ .map(({ modelName, modelDescription }) => ({
2966
+ modelName,
2967
+ modelDescription,
2968
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
2969
+ }));
2970
+ const result = await preparePersonaExecutor({
2971
+ availableModels /* <- Note: Passing as JSON */,
2972
+ personaDescription,
2973
+ }).asPromise();
2931
2974
  const { outputParameters } = result;
2932
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
2933
- const modelRequirements = JSON.parse(modelRequirementsRaw);
2975
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
2976
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
2934
2977
  if (isVerbose) {
2935
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
2978
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
2936
2979
  }
2937
- const { modelName, systemMessage, temperature } = modelRequirements;
2938
- return {
2980
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
2981
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
2982
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
2983
+ /*
2984
+ throw new UnexpectedError(
2985
+ spaceTrim(
2986
+ (block) => `
2987
+ Invalid \`modelsRequirements\`:
2988
+
2989
+ \`\`\`json
2990
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
2991
+ \`\`\`
2992
+ `,
2993
+ ),
2994
+ );
2995
+ */
2996
+ }
2997
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
2939
2998
  modelVariant: 'CHAT',
2940
- modelName,
2941
- systemMessage,
2942
- temperature,
2999
+ ...modelRequirements,
3000
+ }));
3001
+ return {
3002
+ modelsRequirements,
2943
3003
  };
2944
3004
  }
2945
3005
  /**
@@ -3763,7 +3823,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3763
3823
  > },
3764
3824
  */
3765
3825
  async asJson() {
3766
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
3826
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
3767
3827
  },
3768
3828
  async asText() {
3769
3829
  return await tools.fs.readFile(filename, 'utf-8');
@@ -4021,14 +4081,14 @@ async function preparePipeline(pipeline, tools, options) {
4021
4081
  // TODO: [🖌][🧠] Implement some `mapAsync` function
4022
4082
  const preparedPersonas = new Array(personas.length);
4023
4083
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
4024
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
4084
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
4025
4085
  rootDirname,
4026
4086
  maxParallelCount /* <- TODO: [🪂] */,
4027
4087
  isVerbose,
4028
4088
  });
4029
4089
  const preparedPersona = {
4030
4090
  ...persona,
4031
- modelRequirements,
4091
+ modelsRequirements,
4032
4092
  preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
4033
4093
  // <- TODO: [🍙] Make some standard order of json properties
4034
4094
  };
@@ -4367,6 +4427,24 @@ function isValidCsvString(value) {
4367
4427
  }
4368
4428
  }
4369
4429
 
4430
+ /**
4431
+ * Converts a CSV string into an object
4432
+ *
4433
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
4434
+ *
4435
+ * @private - for now until `@promptbook/csv` is released
4436
+ */
4437
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
4438
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
4439
+ // Note: Autoheal invalid '\n' characters
4440
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
4441
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
4442
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
4443
+ }
4444
+ const csv = parse(value, settings);
4445
+ return csv;
4446
+ }
4447
+
4370
4448
  /**
4371
4449
  * Definition for CSV spreadsheet
4372
4450
  *
@@ -4389,8 +4467,7 @@ const CsvFormatDefinition = {
4389
4467
  {
4390
4468
  subvalueName: 'ROW',
4391
4469
  async mapValues(value, outputParameterName, settings, mapCallback) {
4392
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4393
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4470
+ const csv = csvParse(value, settings);
4394
4471
  if (csv.errors.length !== 0) {
4395
4472
  throw new CsvFormatError(spaceTrim((block) => `
4396
4473
  CSV parsing error
@@ -4420,8 +4497,7 @@ const CsvFormatDefinition = {
4420
4497
  {
4421
4498
  subvalueName: 'CELL',
4422
4499
  async mapValues(value, outputParameterName, settings, mapCallback) {
4423
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4424
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4500
+ const csv = csvParse(value, settings);
4425
4501
  if (csv.errors.length !== 0) {
4426
4502
  throw new CsvFormatError(spaceTrim((block) => `
4427
4503
  CSV parsing error
@@ -5467,13 +5543,79 @@ async function getExamplesForTask(task) {
5467
5543
  /**
5468
5544
  * @@@
5469
5545
  *
5546
+ * Here is the place where RAG (retrieval-augmented generation) happens
5547
+ *
5470
5548
  * @private internal utility of `createPipelineExecutor`
5471
5549
  */
5472
5550
  async function getKnowledgeForTask(options) {
5473
- const { preparedPipeline, task } = options;
5474
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
5551
+ const { tools, preparedPipeline, task } = options;
5552
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5553
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5554
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5555
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5556
+ return 'No knowledge pieces found';
5557
+ }
5558
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5559
+ const _llms = arrayableToArray(tools.llm);
5560
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5561
+ const taskEmbeddingPrompt = {
5562
+ title: 'Knowledge Search',
5563
+ modelRequirements: {
5564
+ modelVariant: 'EMBEDDING',
5565
+ modelName: firstKnowlegeIndex.modelName,
5566
+ },
5567
+ content: task.content,
5568
+ parameters: {
5569
+ /* !!!!!!!! */
5570
+ },
5571
+ };
5572
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5573
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5574
+ const { index } = knowledgePiece;
5575
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5576
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5577
+ if (knowledgePieceIndex === undefined) {
5578
+ return {
5579
+ content: knowledgePiece.content,
5580
+ relevance: 0,
5581
+ };
5582
+ }
5583
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5584
+ return {
5585
+ content: knowledgePiece.content,
5586
+ relevance,
5587
+ };
5588
+ });
5589
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5590
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5591
+ console.log('!!! Embedding', {
5592
+ task,
5593
+ taskEmbeddingPrompt,
5594
+ taskEmbeddingResult,
5595
+ firstKnowlegePiece,
5596
+ firstKnowlegeIndex,
5597
+ knowledgePiecesWithRelevance,
5598
+ knowledgePiecesSorted,
5599
+ knowledgePiecesLimited,
5600
+ });
5601
+ return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5475
5602
  // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5476
5603
  }
5604
+ // TODO: !!!!!! Annotate + to new file
5605
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
5606
+ if (embeddingVector1.length !== embeddingVector2.length) {
5607
+ throw new TypeError('Embedding vectors must have the same length');
5608
+ }
5609
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
5610
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
5611
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
5612
+ return 1 - dotProduct / (magnitude1 * magnitude2);
5613
+ }
5614
+ /**
5615
+ * TODO: !!!! Verify if this is working
5616
+ * TODO: [♨] Implement Better - use keyword search
5617
+ * TODO: [♨] Examples of values
5618
+ */
5477
5619
 
5478
5620
  /**
5479
5621
  * @@@
@@ -5481,9 +5623,9 @@ async function getKnowledgeForTask(options) {
5481
5623
  * @private internal utility of `createPipelineExecutor`
5482
5624
  */
5483
5625
  async function getReservedParametersForTask(options) {
5484
- const { preparedPipeline, task, pipelineIdentification } = options;
5626
+ const { tools, preparedPipeline, task, pipelineIdentification } = options;
5485
5627
  const context = await getContextForTask(); // <- [🏍]
5486
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
5628
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
5487
5629
  const examples = await getExamplesForTask();
5488
5630
  const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
5489
5631
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -5545,6 +5687,7 @@ async function executeTask(options) {
5545
5687
  }
5546
5688
  const definedParameters = Object.freeze({
5547
5689
  ...(await getReservedParametersForTask({
5690
+ tools,
5548
5691
  preparedPipeline,
5549
5692
  task: currentTask,
5550
5693
  pipelineIdentification,
@@ -6835,7 +6978,7 @@ async function $provideScriptingForNode(options) {
6835
6978
  const openapiJson = {
6836
6979
  openapi: '3.0.0',
6837
6980
  info: {
6838
- title: 'Promptbook Remote Server API (!!!! From TS)',
6981
+ title: 'Promptbook Remote Server API (!!!! From YML)',
6839
6982
  version: '1.0.0',
6840
6983
  description: 'API documentation for the Promptbook Remote Server',
6841
6984
  },
@@ -6847,6 +6990,13 @@ const openapiJson = {
6847
6990
  responses: {
6848
6991
  '200': {
6849
6992
  description: 'Server details in markdown format.',
6993
+ content: {
6994
+ 'text/markdown': {
6995
+ schema: {
6996
+ type: 'string',
6997
+ },
6998
+ },
6999
+ },
6850
7000
  },
6851
7001
  },
6852
7002
  },
@@ -6877,13 +7027,22 @@ const openapiJson = {
6877
7027
  },
6878
7028
  },
6879
7029
  responses: {
6880
- '200': {
7030
+ '201': {
6881
7031
  description: 'Successful login',
6882
7032
  content: {
6883
7033
  'application/json': {
6884
7034
  schema: {
6885
7035
  type: 'object',
6886
7036
  properties: {
7037
+ isSuccess: {
7038
+ type: 'boolean',
7039
+ },
7040
+ message: {
7041
+ type: 'string',
7042
+ },
7043
+ error: {
7044
+ type: 'object',
7045
+ },
6887
7046
  identification: {
6888
7047
  type: 'object',
6889
7048
  },
@@ -6892,6 +7051,43 @@ const openapiJson = {
6892
7051
  },
6893
7052
  },
6894
7053
  },
7054
+ '400': {
7055
+ description: 'Bad request or login failed',
7056
+ content: {
7057
+ 'application/json': {
7058
+ schema: {
7059
+ type: 'object',
7060
+ properties: {
7061
+ error: {
7062
+ type: 'object',
7063
+ },
7064
+ },
7065
+ },
7066
+ },
7067
+ },
7068
+ },
7069
+ '401': {
7070
+ description: 'Authentication error',
7071
+ content: {
7072
+ 'application/json': {
7073
+ schema: {
7074
+ type: 'object',
7075
+ properties: {
7076
+ isSuccess: {
7077
+ type: 'boolean',
7078
+ enum: [false],
7079
+ },
7080
+ message: {
7081
+ type: 'string',
7082
+ },
7083
+ error: {
7084
+ type: 'object',
7085
+ },
7086
+ },
7087
+ },
7088
+ },
7089
+ },
7090
+ },
6895
7091
  },
6896
7092
  },
6897
7093
  },
@@ -6913,6 +7109,16 @@ const openapiJson = {
6913
7109
  },
6914
7110
  },
6915
7111
  },
7112
+ '500': {
7113
+ description: 'No collection available',
7114
+ content: {
7115
+ 'text/plain': {
7116
+ schema: {
7117
+ type: 'string',
7118
+ },
7119
+ },
7120
+ },
7121
+ },
6916
7122
  },
6917
7123
  },
6918
7124
  },
@@ -6944,6 +7150,28 @@ const openapiJson = {
6944
7150
  },
6945
7151
  '404': {
6946
7152
  description: 'Book not found.',
7153
+ content: {
7154
+ 'application/json': {
7155
+ schema: {
7156
+ type: 'object',
7157
+ properties: {
7158
+ error: {
7159
+ type: 'object',
7160
+ },
7161
+ },
7162
+ },
7163
+ },
7164
+ },
7165
+ },
7166
+ '500': {
7167
+ description: 'No collection available',
7168
+ content: {
7169
+ 'text/plain': {
7170
+ schema: {
7171
+ type: 'string',
7172
+ },
7173
+ },
7174
+ },
6947
7175
  },
6948
7176
  },
6949
7177
  },
@@ -6961,11 +7189,174 @@ const openapiJson = {
6961
7189
  type: 'array',
6962
7190
  items: {
6963
7191
  type: 'object',
7192
+ properties: {
7193
+ nonce: {
7194
+ type: 'string',
7195
+ },
7196
+ taskId: {
7197
+ type: 'string',
7198
+ },
7199
+ taskType: {
7200
+ type: 'string',
7201
+ },
7202
+ status: {
7203
+ type: 'string',
7204
+ },
7205
+ createdAt: {
7206
+ type: 'string',
7207
+ format: 'date-time',
7208
+ },
7209
+ updatedAt: {
7210
+ type: 'string',
7211
+ format: 'date-time',
7212
+ },
7213
+ },
7214
+ },
7215
+ },
7216
+ },
7217
+ },
7218
+ },
7219
+ },
7220
+ },
7221
+ },
7222
+ '/executions/last': {
7223
+ get: {
7224
+ summary: 'Get the last execution',
7225
+ description: 'Returns details of the last execution task.',
7226
+ responses: {
7227
+ '200': {
7228
+ description: 'The last execution task with full details.',
7229
+ content: {
7230
+ 'application/json': {
7231
+ schema: {
7232
+ type: 'object',
7233
+ properties: {
7234
+ nonce: {
7235
+ type: 'string',
7236
+ },
7237
+ taskId: {
7238
+ type: 'string',
7239
+ },
7240
+ taskType: {
7241
+ type: 'string',
7242
+ },
7243
+ status: {
7244
+ type: 'string',
7245
+ },
7246
+ errors: {
7247
+ type: 'array',
7248
+ items: {
7249
+ type: 'object',
7250
+ },
7251
+ },
7252
+ warnings: {
7253
+ type: 'array',
7254
+ items: {
7255
+ type: 'object',
7256
+ },
7257
+ },
7258
+ createdAt: {
7259
+ type: 'string',
7260
+ format: 'date-time',
7261
+ },
7262
+ updatedAt: {
7263
+ type: 'string',
7264
+ format: 'date-time',
7265
+ },
7266
+ currentValue: {
7267
+ type: 'object',
7268
+ },
6964
7269
  },
6965
7270
  },
6966
7271
  },
6967
7272
  },
6968
7273
  },
7274
+ '404': {
7275
+ description: 'No execution tasks found.',
7276
+ content: {
7277
+ 'text/plain': {
7278
+ schema: {
7279
+ type: 'string',
7280
+ },
7281
+ },
7282
+ },
7283
+ },
7284
+ },
7285
+ },
7286
+ },
7287
+ '/executions/{taskId}': {
7288
+ get: {
7289
+ summary: 'Get specific execution',
7290
+ description: 'Returns details of a specific execution task.',
7291
+ parameters: [
7292
+ {
7293
+ in: 'path',
7294
+ name: 'taskId',
7295
+ required: true,
7296
+ schema: {
7297
+ type: 'string',
7298
+ },
7299
+ description: 'The ID of the execution task to retrieve.',
7300
+ },
7301
+ ],
7302
+ responses: {
7303
+ '200': {
7304
+ description: 'The execution task with full details.',
7305
+ content: {
7306
+ 'application/json': {
7307
+ schema: {
7308
+ type: 'object',
7309
+ properties: {
7310
+ nonce: {
7311
+ type: 'string',
7312
+ },
7313
+ taskId: {
7314
+ type: 'string',
7315
+ },
7316
+ taskType: {
7317
+ type: 'string',
7318
+ },
7319
+ status: {
7320
+ type: 'string',
7321
+ },
7322
+ errors: {
7323
+ type: 'array',
7324
+ items: {
7325
+ type: 'object',
7326
+ },
7327
+ },
7328
+ warnings: {
7329
+ type: 'array',
7330
+ items: {
7331
+ type: 'object',
7332
+ },
7333
+ },
7334
+ createdAt: {
7335
+ type: 'string',
7336
+ format: 'date-time',
7337
+ },
7338
+ updatedAt: {
7339
+ type: 'string',
7340
+ format: 'date-time',
7341
+ },
7342
+ currentValue: {
7343
+ type: 'object',
7344
+ },
7345
+ },
7346
+ },
7347
+ },
7348
+ },
7349
+ },
7350
+ '404': {
7351
+ description: 'Execution task not found.',
7352
+ content: {
7353
+ 'text/plain': {
7354
+ schema: {
7355
+ type: 'string',
7356
+ },
7357
+ },
7358
+ },
7359
+ },
6969
7360
  },
6970
7361
  },
6971
7362
  },
@@ -6982,12 +7373,19 @@ const openapiJson = {
6982
7373
  properties: {
6983
7374
  pipelineUrl: {
6984
7375
  type: 'string',
7376
+ description: 'URL of the pipeline to execute',
7377
+ },
7378
+ book: {
7379
+ type: 'string',
7380
+ description: 'Alternative field for pipelineUrl',
6985
7381
  },
6986
7382
  inputParameters: {
6987
7383
  type: 'object',
7384
+ description: 'Parameters for pipeline execution',
6988
7385
  },
6989
7386
  identification: {
6990
7387
  type: 'object',
7388
+ description: 'User identification data',
6991
7389
  },
6992
7390
  },
6993
7391
  },
@@ -7007,13 +7405,164 @@ const openapiJson = {
7007
7405
  },
7008
7406
  '400': {
7009
7407
  description: 'Invalid input.',
7408
+ content: {
7409
+ 'application/json': {
7410
+ schema: {
7411
+ type: 'object',
7412
+ properties: {
7413
+ error: {
7414
+ type: 'object',
7415
+ },
7416
+ },
7417
+ },
7418
+ },
7419
+ },
7420
+ },
7421
+ '404': {
7422
+ description: 'Pipeline not found.',
7423
+ content: {
7424
+ 'text/plain': {
7425
+ schema: {
7426
+ type: 'string',
7427
+ },
7428
+ },
7429
+ },
7430
+ },
7431
+ },
7432
+ },
7433
+ },
7434
+ '/api-docs': {
7435
+ get: {
7436
+ summary: 'API documentation UI',
7437
+ description: 'Swagger UI for API documentation',
7438
+ responses: {
7439
+ '200': {
7440
+ description: 'HTML Swagger UI',
7441
+ },
7442
+ },
7443
+ },
7444
+ },
7445
+ '/swagger': {
7446
+ get: {
7447
+ summary: 'API documentation UI (alternative path)',
7448
+ description: 'Swagger UI for API documentation',
7449
+ responses: {
7450
+ '200': {
7451
+ description: 'HTML Swagger UI',
7452
+ },
7453
+ },
7454
+ },
7455
+ },
7456
+ '/openapi': {
7457
+ get: {
7458
+ summary: 'OpenAPI specification',
7459
+ description: 'Returns the OpenAPI JSON specification',
7460
+ responses: {
7461
+ '200': {
7462
+ description: 'OpenAPI specification',
7463
+ content: {
7464
+ 'application/json': {
7465
+ schema: {
7466
+ type: 'object',
7467
+ },
7468
+ },
7469
+ },
7470
+ },
7471
+ },
7472
+ },
7473
+ },
7474
+ },
7475
+ components: {
7476
+ schemas: {
7477
+ Error: {
7478
+ type: 'object',
7479
+ properties: {
7480
+ error: {
7481
+ type: 'object',
7482
+ },
7483
+ },
7484
+ },
7485
+ ExecutionTaskSummary: {
7486
+ type: 'object',
7487
+ properties: {
7488
+ nonce: {
7489
+ type: 'string',
7490
+ },
7491
+ taskId: {
7492
+ type: 'string',
7493
+ },
7494
+ taskType: {
7495
+ type: 'string',
7496
+ },
7497
+ status: {
7498
+ type: 'string',
7499
+ },
7500
+ createdAt: {
7501
+ type: 'string',
7502
+ format: 'date-time',
7503
+ },
7504
+ updatedAt: {
7505
+ type: 'string',
7506
+ format: 'date-time',
7507
+ },
7508
+ },
7509
+ },
7510
+ ExecutionTaskFull: {
7511
+ type: 'object',
7512
+ properties: {
7513
+ nonce: {
7514
+ type: 'string',
7515
+ },
7516
+ taskId: {
7517
+ type: 'string',
7518
+ },
7519
+ taskType: {
7520
+ type: 'string',
7521
+ },
7522
+ status: {
7523
+ type: 'string',
7524
+ },
7525
+ errors: {
7526
+ type: 'array',
7527
+ items: {
7528
+ type: 'object',
7529
+ },
7530
+ },
7531
+ warnings: {
7532
+ type: 'array',
7533
+ items: {
7534
+ type: 'object',
7535
+ },
7536
+ },
7537
+ createdAt: {
7538
+ type: 'string',
7539
+ format: 'date-time',
7540
+ },
7541
+ updatedAt: {
7542
+ type: 'string',
7543
+ format: 'date-time',
7544
+ },
7545
+ currentValue: {
7546
+ type: 'object',
7010
7547
  },
7011
7548
  },
7012
7549
  },
7013
7550
  },
7014
7551
  },
7015
- components: {},
7016
- tags: [],
7552
+ tags: [
7553
+ {
7554
+ name: 'Books',
7555
+ description: 'Operations related to books and pipelines',
7556
+ },
7557
+ {
7558
+ name: 'Executions',
7559
+ description: 'Operations related to execution tasks',
7560
+ },
7561
+ {
7562
+ name: 'Authentication',
7563
+ description: 'Authentication operations',
7564
+ },
7565
+ ],
7017
7566
  };
7018
7567
  /**
7019
7568
  * Note: [💞] Ignore a discrepancy between file name and entity name