@promptbook/remote-server 0.89.0 → 0.92.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/esm/index.es.js +583 -34
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +6 -0
- package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
- package/esm/typings/src/_packages/google.index.d.ts +2 -0
- package/esm/typings/src/_packages/utils.index.d.ts +2 -0
- package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
- package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
- package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
- package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
- package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
- package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
- package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/personas/preparePersona.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
- package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
- package/esm/typings/src/remote-server/openapi.d.ts +397 -3
- package/package.json +2 -2
- package/umd/index.umd.js +583 -34
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
* @generated
|
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
|
50
50
|
*/
|
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-10';
|
|
52
52
|
/**
|
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1802,7 +1802,7 @@
|
|
|
1802
1802
|
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
1803
1803
|
return false;
|
|
1804
1804
|
}
|
|
1805
|
-
if (!pipeline.personas.every((persona) => persona.
|
|
1805
|
+
if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
|
|
1806
1806
|
return false;
|
|
1807
1807
|
}
|
|
1808
1808
|
if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
|
|
@@ -1848,6 +1848,45 @@
|
|
|
1848
1848
|
}
|
|
1849
1849
|
}
|
|
1850
1850
|
|
|
1851
|
+
/**
|
|
1852
|
+
* Converts a JavaScript Object Notation (JSON) string into an object.
|
|
1853
|
+
*
|
|
1854
|
+
* Note: This is wrapper around `JSON.parse()` with better error and type handling
|
|
1855
|
+
*
|
|
1856
|
+
* @public exported from `@promptbook/utils`
|
|
1857
|
+
*/
|
|
1858
|
+
function jsonParse(value) {
|
|
1859
|
+
if (value === undefined) {
|
|
1860
|
+
throw new Error(`Can not parse JSON from undefined value.`);
|
|
1861
|
+
}
|
|
1862
|
+
else if (typeof value !== 'string') {
|
|
1863
|
+
console.error('Can not parse JSON from non-string value.', { text: value });
|
|
1864
|
+
throw new Error(spaceTrim__default["default"](`
|
|
1865
|
+
Can not parse JSON from non-string value.
|
|
1866
|
+
|
|
1867
|
+
The value type: ${typeof value}
|
|
1868
|
+
See more in console.
|
|
1869
|
+
`));
|
|
1870
|
+
}
|
|
1871
|
+
try {
|
|
1872
|
+
return JSON.parse(value);
|
|
1873
|
+
}
|
|
1874
|
+
catch (error) {
|
|
1875
|
+
if (!(error instanceof Error)) {
|
|
1876
|
+
throw error;
|
|
1877
|
+
}
|
|
1878
|
+
throw new Error(spaceTrim__default["default"]((block) => `
|
|
1879
|
+
${block(error.message)}
|
|
1880
|
+
|
|
1881
|
+
The JSON text:
|
|
1882
|
+
${block(value)}
|
|
1883
|
+
`));
|
|
1884
|
+
}
|
|
1885
|
+
}
|
|
1886
|
+
/**
|
|
1887
|
+
* TODO: !!!! Use in Promptbook.studio
|
|
1888
|
+
*/
|
|
1889
|
+
|
|
1851
1890
|
/**
|
|
1852
1891
|
* Recursively converts JSON strings to JSON objects
|
|
1853
1892
|
|
|
@@ -1866,7 +1905,7 @@
|
|
|
1866
1905
|
const newObject = { ...object };
|
|
1867
1906
|
for (const [key, value] of Object.entries(object)) {
|
|
1868
1907
|
if (typeof value === 'string' && isValidJsonString(value)) {
|
|
1869
|
-
newObject[key] =
|
|
1908
|
+
newObject[key] = jsonParse(value);
|
|
1870
1909
|
}
|
|
1871
1910
|
else {
|
|
1872
1911
|
newObject[key] = jsonStringsToJsons(value);
|
|
@@ -2043,7 +2082,7 @@
|
|
|
2043
2082
|
* TODO: [🐚] Split into more files and make `PrepareTask` & `RemoteTask` + split the function
|
|
2044
2083
|
*/
|
|
2045
2084
|
|
|
2046
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2085
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2047
2086
|
|
|
2048
2087
|
/**
|
|
2049
2088
|
* Checks if value is valid email
|
|
@@ -2346,7 +2385,7 @@
|
|
|
2346
2385
|
*/
|
|
2347
2386
|
function unpreparePipeline(pipeline) {
|
|
2348
2387
|
let { personas, knowledgeSources, tasks } = pipeline;
|
|
2349
|
-
personas = personas.map((persona) => ({ ...persona,
|
|
2388
|
+
personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
|
|
2350
2389
|
knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
|
|
2351
2390
|
tasks = tasks.map((task) => {
|
|
2352
2391
|
let { dependentParameterNames } = task;
|
|
@@ -2934,27 +2973,48 @@
|
|
|
2934
2973
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
2935
2974
|
tools,
|
|
2936
2975
|
});
|
|
2937
|
-
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
2938
2976
|
const _llms = arrayableToArray(tools.llm);
|
|
2939
2977
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
2940
|
-
const availableModels = await llmTools.listModels()
|
|
2941
|
-
const availableModelNames = availableModels
|
|
2978
|
+
const availableModels = (await llmTools.listModels())
|
|
2942
2979
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
2943
|
-
.map(({ modelName }) =>
|
|
2944
|
-
|
|
2945
|
-
|
|
2980
|
+
.map(({ modelName, modelDescription }) => ({
|
|
2981
|
+
modelName,
|
|
2982
|
+
modelDescription,
|
|
2983
|
+
// <- Note: `modelTitle` and `modelVariant` is not relevant for this task
|
|
2984
|
+
}));
|
|
2985
|
+
const result = await preparePersonaExecutor({
|
|
2986
|
+
availableModels /* <- Note: Passing as JSON */,
|
|
2987
|
+
personaDescription,
|
|
2988
|
+
}).asPromise();
|
|
2946
2989
|
const { outputParameters } = result;
|
|
2947
|
-
const {
|
|
2948
|
-
|
|
2990
|
+
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
2991
|
+
let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
|
|
2949
2992
|
if (isVerbose) {
|
|
2950
|
-
console.info(`PERSONA ${personaDescription}`,
|
|
2993
|
+
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
2951
2994
|
}
|
|
2952
|
-
|
|
2953
|
-
|
|
2995
|
+
if (!Array.isArray(modelsRequirementsUnchecked)) {
|
|
2996
|
+
// <- TODO: Book should have syntax and system to enforce shape of JSON
|
|
2997
|
+
modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
|
|
2998
|
+
/*
|
|
2999
|
+
throw new UnexpectedError(
|
|
3000
|
+
spaceTrim(
|
|
3001
|
+
(block) => `
|
|
3002
|
+
Invalid \`modelsRequirements\`:
|
|
3003
|
+
|
|
3004
|
+
\`\`\`json
|
|
3005
|
+
${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
|
|
3006
|
+
\`\`\`
|
|
3007
|
+
`,
|
|
3008
|
+
),
|
|
3009
|
+
);
|
|
3010
|
+
*/
|
|
3011
|
+
}
|
|
3012
|
+
const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
|
|
2954
3013
|
modelVariant: 'CHAT',
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
3014
|
+
...modelRequirements,
|
|
3015
|
+
}));
|
|
3016
|
+
return {
|
|
3017
|
+
modelsRequirements,
|
|
2958
3018
|
};
|
|
2959
3019
|
}
|
|
2960
3020
|
/**
|
|
@@ -3778,7 +3838,7 @@
|
|
|
3778
3838
|
> },
|
|
3779
3839
|
*/
|
|
3780
3840
|
async asJson() {
|
|
3781
|
-
return
|
|
3841
|
+
return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
|
|
3782
3842
|
},
|
|
3783
3843
|
async asText() {
|
|
3784
3844
|
return await tools.fs.readFile(filename, 'utf-8');
|
|
@@ -4036,14 +4096,14 @@
|
|
|
4036
4096
|
// TODO: [🖌][🧠] Implement some `mapAsync` function
|
|
4037
4097
|
const preparedPersonas = new Array(personas.length);
|
|
4038
4098
|
await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
|
|
4039
|
-
const
|
|
4099
|
+
const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
|
|
4040
4100
|
rootDirname,
|
|
4041
4101
|
maxParallelCount /* <- TODO: [🪂] */,
|
|
4042
4102
|
isVerbose,
|
|
4043
4103
|
});
|
|
4044
4104
|
const preparedPersona = {
|
|
4045
4105
|
...persona,
|
|
4046
|
-
|
|
4106
|
+
modelsRequirements,
|
|
4047
4107
|
preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
|
|
4048
4108
|
// <- TODO: [🍙] Make some standard order of json properties
|
|
4049
4109
|
};
|
|
@@ -4382,6 +4442,24 @@
|
|
|
4382
4442
|
}
|
|
4383
4443
|
}
|
|
4384
4444
|
|
|
4445
|
+
/**
|
|
4446
|
+
* Converts a CSV string into an object
|
|
4447
|
+
*
|
|
4448
|
+
* Note: This is wrapper around `papaparse.parse()` with better autohealing
|
|
4449
|
+
*
|
|
4450
|
+
* @private - for now until `@promptbook/csv` is released
|
|
4451
|
+
*/
|
|
4452
|
+
function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
|
|
4453
|
+
settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
|
|
4454
|
+
// Note: Autoheal invalid '\n' characters
|
|
4455
|
+
if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
|
|
4456
|
+
console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
|
|
4457
|
+
value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
|
|
4458
|
+
}
|
|
4459
|
+
const csv = papaparse.parse(value, settings);
|
|
4460
|
+
return csv;
|
|
4461
|
+
}
|
|
4462
|
+
|
|
4385
4463
|
/**
|
|
4386
4464
|
* Definition for CSV spreadsheet
|
|
4387
4465
|
*
|
|
@@ -4404,8 +4482,7 @@
|
|
|
4404
4482
|
{
|
|
4405
4483
|
subvalueName: 'ROW',
|
|
4406
4484
|
async mapValues(value, outputParameterName, settings, mapCallback) {
|
|
4407
|
-
|
|
4408
|
-
const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
4485
|
+
const csv = csvParse(value, settings);
|
|
4409
4486
|
if (csv.errors.length !== 0) {
|
|
4410
4487
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
4411
4488
|
CSV parsing error
|
|
@@ -4435,8 +4512,7 @@
|
|
|
4435
4512
|
{
|
|
4436
4513
|
subvalueName: 'CELL',
|
|
4437
4514
|
async mapValues(value, outputParameterName, settings, mapCallback) {
|
|
4438
|
-
|
|
4439
|
-
const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
4515
|
+
const csv = csvParse(value, settings);
|
|
4440
4516
|
if (csv.errors.length !== 0) {
|
|
4441
4517
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
4442
4518
|
CSV parsing error
|
|
@@ -5482,13 +5558,79 @@
|
|
|
5482
5558
|
/**
|
|
5483
5559
|
* @@@
|
|
5484
5560
|
*
|
|
5561
|
+
* Here is the place where RAG (retrieval-augmented generation) happens
|
|
5562
|
+
*
|
|
5485
5563
|
* @private internal utility of `createPipelineExecutor`
|
|
5486
5564
|
*/
|
|
5487
5565
|
async function getKnowledgeForTask(options) {
|
|
5488
|
-
const { preparedPipeline, task } = options;
|
|
5489
|
-
|
|
5566
|
+
const { tools, preparedPipeline, task } = options;
|
|
5567
|
+
const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
|
|
5568
|
+
const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
|
|
5569
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5570
|
+
if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
|
|
5571
|
+
return 'No knowledge pieces found';
|
|
5572
|
+
}
|
|
5573
|
+
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
5574
|
+
const _llms = arrayableToArray(tools.llm);
|
|
5575
|
+
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5576
|
+
const taskEmbeddingPrompt = {
|
|
5577
|
+
title: 'Knowledge Search',
|
|
5578
|
+
modelRequirements: {
|
|
5579
|
+
modelVariant: 'EMBEDDING',
|
|
5580
|
+
modelName: firstKnowlegeIndex.modelName,
|
|
5581
|
+
},
|
|
5582
|
+
content: task.content,
|
|
5583
|
+
parameters: {
|
|
5584
|
+
/* !!!!!!!! */
|
|
5585
|
+
},
|
|
5586
|
+
};
|
|
5587
|
+
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5588
|
+
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5589
|
+
const { index } = knowledgePiece;
|
|
5590
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
|
|
5591
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5592
|
+
if (knowledgePieceIndex === undefined) {
|
|
5593
|
+
return {
|
|
5594
|
+
content: knowledgePiece.content,
|
|
5595
|
+
relevance: 0,
|
|
5596
|
+
};
|
|
5597
|
+
}
|
|
5598
|
+
const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
|
|
5599
|
+
return {
|
|
5600
|
+
content: knowledgePiece.content,
|
|
5601
|
+
relevance,
|
|
5602
|
+
};
|
|
5603
|
+
});
|
|
5604
|
+
const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
|
|
5605
|
+
const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
|
|
5606
|
+
console.log('!!! Embedding', {
|
|
5607
|
+
task,
|
|
5608
|
+
taskEmbeddingPrompt,
|
|
5609
|
+
taskEmbeddingResult,
|
|
5610
|
+
firstKnowlegePiece,
|
|
5611
|
+
firstKnowlegeIndex,
|
|
5612
|
+
knowledgePiecesWithRelevance,
|
|
5613
|
+
knowledgePiecesSorted,
|
|
5614
|
+
knowledgePiecesLimited,
|
|
5615
|
+
});
|
|
5616
|
+
return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
|
|
5490
5617
|
// <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
|
|
5491
5618
|
}
|
|
5619
|
+
// TODO: !!!!!! Annotate + to new file
|
|
5620
|
+
function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
|
|
5621
|
+
if (embeddingVector1.length !== embeddingVector2.length) {
|
|
5622
|
+
throw new TypeError('Embedding vectors must have the same length');
|
|
5623
|
+
}
|
|
5624
|
+
const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
|
|
5625
|
+
const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
|
|
5626
|
+
const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
|
|
5627
|
+
return 1 - dotProduct / (magnitude1 * magnitude2);
|
|
5628
|
+
}
|
|
5629
|
+
/**
|
|
5630
|
+
* TODO: !!!! Verify if this is working
|
|
5631
|
+
* TODO: [♨] Implement Better - use keyword search
|
|
5632
|
+
* TODO: [♨] Examples of values
|
|
5633
|
+
*/
|
|
5492
5634
|
|
|
5493
5635
|
/**
|
|
5494
5636
|
* @@@
|
|
@@ -5496,9 +5638,9 @@
|
|
|
5496
5638
|
* @private internal utility of `createPipelineExecutor`
|
|
5497
5639
|
*/
|
|
5498
5640
|
async function getReservedParametersForTask(options) {
|
|
5499
|
-
const { preparedPipeline, task, pipelineIdentification } = options;
|
|
5641
|
+
const { tools, preparedPipeline, task, pipelineIdentification } = options;
|
|
5500
5642
|
const context = await getContextForTask(); // <- [🏍]
|
|
5501
|
-
const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
|
|
5643
|
+
const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
|
|
5502
5644
|
const examples = await getExamplesForTask();
|
|
5503
5645
|
const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
|
|
5504
5646
|
const modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
@@ -5560,6 +5702,7 @@
|
|
|
5560
5702
|
}
|
|
5561
5703
|
const definedParameters = Object.freeze({
|
|
5562
5704
|
...(await getReservedParametersForTask({
|
|
5705
|
+
tools,
|
|
5563
5706
|
preparedPipeline,
|
|
5564
5707
|
task: currentTask,
|
|
5565
5708
|
pipelineIdentification,
|
|
@@ -6850,7 +6993,7 @@
|
|
|
6850
6993
|
const openapiJson = {
|
|
6851
6994
|
openapi: '3.0.0',
|
|
6852
6995
|
info: {
|
|
6853
|
-
title: 'Promptbook Remote Server API (!!!! From
|
|
6996
|
+
title: 'Promptbook Remote Server API (!!!! From YML)',
|
|
6854
6997
|
version: '1.0.0',
|
|
6855
6998
|
description: 'API documentation for the Promptbook Remote Server',
|
|
6856
6999
|
},
|
|
@@ -6862,6 +7005,13 @@
|
|
|
6862
7005
|
responses: {
|
|
6863
7006
|
'200': {
|
|
6864
7007
|
description: 'Server details in markdown format.',
|
|
7008
|
+
content: {
|
|
7009
|
+
'text/markdown': {
|
|
7010
|
+
schema: {
|
|
7011
|
+
type: 'string',
|
|
7012
|
+
},
|
|
7013
|
+
},
|
|
7014
|
+
},
|
|
6865
7015
|
},
|
|
6866
7016
|
},
|
|
6867
7017
|
},
|
|
@@ -6892,13 +7042,22 @@
|
|
|
6892
7042
|
},
|
|
6893
7043
|
},
|
|
6894
7044
|
responses: {
|
|
6895
|
-
'
|
|
7045
|
+
'201': {
|
|
6896
7046
|
description: 'Successful login',
|
|
6897
7047
|
content: {
|
|
6898
7048
|
'application/json': {
|
|
6899
7049
|
schema: {
|
|
6900
7050
|
type: 'object',
|
|
6901
7051
|
properties: {
|
|
7052
|
+
isSuccess: {
|
|
7053
|
+
type: 'boolean',
|
|
7054
|
+
},
|
|
7055
|
+
message: {
|
|
7056
|
+
type: 'string',
|
|
7057
|
+
},
|
|
7058
|
+
error: {
|
|
7059
|
+
type: 'object',
|
|
7060
|
+
},
|
|
6902
7061
|
identification: {
|
|
6903
7062
|
type: 'object',
|
|
6904
7063
|
},
|
|
@@ -6907,6 +7066,43 @@
|
|
|
6907
7066
|
},
|
|
6908
7067
|
},
|
|
6909
7068
|
},
|
|
7069
|
+
'400': {
|
|
7070
|
+
description: 'Bad request or login failed',
|
|
7071
|
+
content: {
|
|
7072
|
+
'application/json': {
|
|
7073
|
+
schema: {
|
|
7074
|
+
type: 'object',
|
|
7075
|
+
properties: {
|
|
7076
|
+
error: {
|
|
7077
|
+
type: 'object',
|
|
7078
|
+
},
|
|
7079
|
+
},
|
|
7080
|
+
},
|
|
7081
|
+
},
|
|
7082
|
+
},
|
|
7083
|
+
},
|
|
7084
|
+
'401': {
|
|
7085
|
+
description: 'Authentication error',
|
|
7086
|
+
content: {
|
|
7087
|
+
'application/json': {
|
|
7088
|
+
schema: {
|
|
7089
|
+
type: 'object',
|
|
7090
|
+
properties: {
|
|
7091
|
+
isSuccess: {
|
|
7092
|
+
type: 'boolean',
|
|
7093
|
+
enum: [false],
|
|
7094
|
+
},
|
|
7095
|
+
message: {
|
|
7096
|
+
type: 'string',
|
|
7097
|
+
},
|
|
7098
|
+
error: {
|
|
7099
|
+
type: 'object',
|
|
7100
|
+
},
|
|
7101
|
+
},
|
|
7102
|
+
},
|
|
7103
|
+
},
|
|
7104
|
+
},
|
|
7105
|
+
},
|
|
6910
7106
|
},
|
|
6911
7107
|
},
|
|
6912
7108
|
},
|
|
@@ -6928,6 +7124,16 @@
|
|
|
6928
7124
|
},
|
|
6929
7125
|
},
|
|
6930
7126
|
},
|
|
7127
|
+
'500': {
|
|
7128
|
+
description: 'No collection available',
|
|
7129
|
+
content: {
|
|
7130
|
+
'text/plain': {
|
|
7131
|
+
schema: {
|
|
7132
|
+
type: 'string',
|
|
7133
|
+
},
|
|
7134
|
+
},
|
|
7135
|
+
},
|
|
7136
|
+
},
|
|
6931
7137
|
},
|
|
6932
7138
|
},
|
|
6933
7139
|
},
|
|
@@ -6959,6 +7165,28 @@
|
|
|
6959
7165
|
},
|
|
6960
7166
|
'404': {
|
|
6961
7167
|
description: 'Book not found.',
|
|
7168
|
+
content: {
|
|
7169
|
+
'application/json': {
|
|
7170
|
+
schema: {
|
|
7171
|
+
type: 'object',
|
|
7172
|
+
properties: {
|
|
7173
|
+
error: {
|
|
7174
|
+
type: 'object',
|
|
7175
|
+
},
|
|
7176
|
+
},
|
|
7177
|
+
},
|
|
7178
|
+
},
|
|
7179
|
+
},
|
|
7180
|
+
},
|
|
7181
|
+
'500': {
|
|
7182
|
+
description: 'No collection available',
|
|
7183
|
+
content: {
|
|
7184
|
+
'text/plain': {
|
|
7185
|
+
schema: {
|
|
7186
|
+
type: 'string',
|
|
7187
|
+
},
|
|
7188
|
+
},
|
|
7189
|
+
},
|
|
6962
7190
|
},
|
|
6963
7191
|
},
|
|
6964
7192
|
},
|
|
@@ -6976,6 +7204,28 @@
|
|
|
6976
7204
|
type: 'array',
|
|
6977
7205
|
items: {
|
|
6978
7206
|
type: 'object',
|
|
7207
|
+
properties: {
|
|
7208
|
+
nonce: {
|
|
7209
|
+
type: 'string',
|
|
7210
|
+
},
|
|
7211
|
+
taskId: {
|
|
7212
|
+
type: 'string',
|
|
7213
|
+
},
|
|
7214
|
+
taskType: {
|
|
7215
|
+
type: 'string',
|
|
7216
|
+
},
|
|
7217
|
+
status: {
|
|
7218
|
+
type: 'string',
|
|
7219
|
+
},
|
|
7220
|
+
createdAt: {
|
|
7221
|
+
type: 'string',
|
|
7222
|
+
format: 'date-time',
|
|
7223
|
+
},
|
|
7224
|
+
updatedAt: {
|
|
7225
|
+
type: 'string',
|
|
7226
|
+
format: 'date-time',
|
|
7227
|
+
},
|
|
7228
|
+
},
|
|
6979
7229
|
},
|
|
6980
7230
|
},
|
|
6981
7231
|
},
|
|
@@ -6984,6 +7234,147 @@
|
|
|
6984
7234
|
},
|
|
6985
7235
|
},
|
|
6986
7236
|
},
|
|
7237
|
+
'/executions/last': {
|
|
7238
|
+
get: {
|
|
7239
|
+
summary: 'Get the last execution',
|
|
7240
|
+
description: 'Returns details of the last execution task.',
|
|
7241
|
+
responses: {
|
|
7242
|
+
'200': {
|
|
7243
|
+
description: 'The last execution task with full details.',
|
|
7244
|
+
content: {
|
|
7245
|
+
'application/json': {
|
|
7246
|
+
schema: {
|
|
7247
|
+
type: 'object',
|
|
7248
|
+
properties: {
|
|
7249
|
+
nonce: {
|
|
7250
|
+
type: 'string',
|
|
7251
|
+
},
|
|
7252
|
+
taskId: {
|
|
7253
|
+
type: 'string',
|
|
7254
|
+
},
|
|
7255
|
+
taskType: {
|
|
7256
|
+
type: 'string',
|
|
7257
|
+
},
|
|
7258
|
+
status: {
|
|
7259
|
+
type: 'string',
|
|
7260
|
+
},
|
|
7261
|
+
errors: {
|
|
7262
|
+
type: 'array',
|
|
7263
|
+
items: {
|
|
7264
|
+
type: 'object',
|
|
7265
|
+
},
|
|
7266
|
+
},
|
|
7267
|
+
warnings: {
|
|
7268
|
+
type: 'array',
|
|
7269
|
+
items: {
|
|
7270
|
+
type: 'object',
|
|
7271
|
+
},
|
|
7272
|
+
},
|
|
7273
|
+
createdAt: {
|
|
7274
|
+
type: 'string',
|
|
7275
|
+
format: 'date-time',
|
|
7276
|
+
},
|
|
7277
|
+
updatedAt: {
|
|
7278
|
+
type: 'string',
|
|
7279
|
+
format: 'date-time',
|
|
7280
|
+
},
|
|
7281
|
+
currentValue: {
|
|
7282
|
+
type: 'object',
|
|
7283
|
+
},
|
|
7284
|
+
},
|
|
7285
|
+
},
|
|
7286
|
+
},
|
|
7287
|
+
},
|
|
7288
|
+
},
|
|
7289
|
+
'404': {
|
|
7290
|
+
description: 'No execution tasks found.',
|
|
7291
|
+
content: {
|
|
7292
|
+
'text/plain': {
|
|
7293
|
+
schema: {
|
|
7294
|
+
type: 'string',
|
|
7295
|
+
},
|
|
7296
|
+
},
|
|
7297
|
+
},
|
|
7298
|
+
},
|
|
7299
|
+
},
|
|
7300
|
+
},
|
|
7301
|
+
},
|
|
7302
|
+
'/executions/{taskId}': {
|
|
7303
|
+
get: {
|
|
7304
|
+
summary: 'Get specific execution',
|
|
7305
|
+
description: 'Returns details of a specific execution task.',
|
|
7306
|
+
parameters: [
|
|
7307
|
+
{
|
|
7308
|
+
in: 'path',
|
|
7309
|
+
name: 'taskId',
|
|
7310
|
+
required: true,
|
|
7311
|
+
schema: {
|
|
7312
|
+
type: 'string',
|
|
7313
|
+
},
|
|
7314
|
+
description: 'The ID of the execution task to retrieve.',
|
|
7315
|
+
},
|
|
7316
|
+
],
|
|
7317
|
+
responses: {
|
|
7318
|
+
'200': {
|
|
7319
|
+
description: 'The execution task with full details.',
|
|
7320
|
+
content: {
|
|
7321
|
+
'application/json': {
|
|
7322
|
+
schema: {
|
|
7323
|
+
type: 'object',
|
|
7324
|
+
properties: {
|
|
7325
|
+
nonce: {
|
|
7326
|
+
type: 'string',
|
|
7327
|
+
},
|
|
7328
|
+
taskId: {
|
|
7329
|
+
type: 'string',
|
|
7330
|
+
},
|
|
7331
|
+
taskType: {
|
|
7332
|
+
type: 'string',
|
|
7333
|
+
},
|
|
7334
|
+
status: {
|
|
7335
|
+
type: 'string',
|
|
7336
|
+
},
|
|
7337
|
+
errors: {
|
|
7338
|
+
type: 'array',
|
|
7339
|
+
items: {
|
|
7340
|
+
type: 'object',
|
|
7341
|
+
},
|
|
7342
|
+
},
|
|
7343
|
+
warnings: {
|
|
7344
|
+
type: 'array',
|
|
7345
|
+
items: {
|
|
7346
|
+
type: 'object',
|
|
7347
|
+
},
|
|
7348
|
+
},
|
|
7349
|
+
createdAt: {
|
|
7350
|
+
type: 'string',
|
|
7351
|
+
format: 'date-time',
|
|
7352
|
+
},
|
|
7353
|
+
updatedAt: {
|
|
7354
|
+
type: 'string',
|
|
7355
|
+
format: 'date-time',
|
|
7356
|
+
},
|
|
7357
|
+
currentValue: {
|
|
7358
|
+
type: 'object',
|
|
7359
|
+
},
|
|
7360
|
+
},
|
|
7361
|
+
},
|
|
7362
|
+
},
|
|
7363
|
+
},
|
|
7364
|
+
},
|
|
7365
|
+
'404': {
|
|
7366
|
+
description: 'Execution task not found.',
|
|
7367
|
+
content: {
|
|
7368
|
+
'text/plain': {
|
|
7369
|
+
schema: {
|
|
7370
|
+
type: 'string',
|
|
7371
|
+
},
|
|
7372
|
+
},
|
|
7373
|
+
},
|
|
7374
|
+
},
|
|
7375
|
+
},
|
|
7376
|
+
},
|
|
7377
|
+
},
|
|
6987
7378
|
'/executions/new': {
|
|
6988
7379
|
post: {
|
|
6989
7380
|
summary: 'Start a new execution',
|
|
@@ -6997,12 +7388,19 @@
|
|
|
6997
7388
|
properties: {
|
|
6998
7389
|
pipelineUrl: {
|
|
6999
7390
|
type: 'string',
|
|
7391
|
+
description: 'URL of the pipeline to execute',
|
|
7392
|
+
},
|
|
7393
|
+
book: {
|
|
7394
|
+
type: 'string',
|
|
7395
|
+
description: 'Alternative field for pipelineUrl',
|
|
7000
7396
|
},
|
|
7001
7397
|
inputParameters: {
|
|
7002
7398
|
type: 'object',
|
|
7399
|
+
description: 'Parameters for pipeline execution',
|
|
7003
7400
|
},
|
|
7004
7401
|
identification: {
|
|
7005
7402
|
type: 'object',
|
|
7403
|
+
description: 'User identification data',
|
|
7006
7404
|
},
|
|
7007
7405
|
},
|
|
7008
7406
|
},
|
|
@@ -7022,13 +7420,164 @@
|
|
|
7022
7420
|
},
|
|
7023
7421
|
'400': {
|
|
7024
7422
|
description: 'Invalid input.',
|
|
7423
|
+
content: {
|
|
7424
|
+
'application/json': {
|
|
7425
|
+
schema: {
|
|
7426
|
+
type: 'object',
|
|
7427
|
+
properties: {
|
|
7428
|
+
error: {
|
|
7429
|
+
type: 'object',
|
|
7430
|
+
},
|
|
7431
|
+
},
|
|
7432
|
+
},
|
|
7433
|
+
},
|
|
7434
|
+
},
|
|
7435
|
+
},
|
|
7436
|
+
'404': {
|
|
7437
|
+
description: 'Pipeline not found.',
|
|
7438
|
+
content: {
|
|
7439
|
+
'text/plain': {
|
|
7440
|
+
schema: {
|
|
7441
|
+
type: 'string',
|
|
7442
|
+
},
|
|
7443
|
+
},
|
|
7444
|
+
},
|
|
7445
|
+
},
|
|
7446
|
+
},
|
|
7447
|
+
},
|
|
7448
|
+
},
|
|
7449
|
+
'/api-docs': {
|
|
7450
|
+
get: {
|
|
7451
|
+
summary: 'API documentation UI',
|
|
7452
|
+
description: 'Swagger UI for API documentation',
|
|
7453
|
+
responses: {
|
|
7454
|
+
'200': {
|
|
7455
|
+
description: 'HTML Swagger UI',
|
|
7456
|
+
},
|
|
7457
|
+
},
|
|
7458
|
+
},
|
|
7459
|
+
},
|
|
7460
|
+
'/swagger': {
|
|
7461
|
+
get: {
|
|
7462
|
+
summary: 'API documentation UI (alternative path)',
|
|
7463
|
+
description: 'Swagger UI for API documentation',
|
|
7464
|
+
responses: {
|
|
7465
|
+
'200': {
|
|
7466
|
+
description: 'HTML Swagger UI',
|
|
7467
|
+
},
|
|
7468
|
+
},
|
|
7469
|
+
},
|
|
7470
|
+
},
|
|
7471
|
+
'/openapi': {
|
|
7472
|
+
get: {
|
|
7473
|
+
summary: 'OpenAPI specification',
|
|
7474
|
+
description: 'Returns the OpenAPI JSON specification',
|
|
7475
|
+
responses: {
|
|
7476
|
+
'200': {
|
|
7477
|
+
description: 'OpenAPI specification',
|
|
7478
|
+
content: {
|
|
7479
|
+
'application/json': {
|
|
7480
|
+
schema: {
|
|
7481
|
+
type: 'object',
|
|
7482
|
+
},
|
|
7483
|
+
},
|
|
7484
|
+
},
|
|
7485
|
+
},
|
|
7486
|
+
},
|
|
7487
|
+
},
|
|
7488
|
+
},
|
|
7489
|
+
},
|
|
7490
|
+
components: {
|
|
7491
|
+
schemas: {
|
|
7492
|
+
Error: {
|
|
7493
|
+
type: 'object',
|
|
7494
|
+
properties: {
|
|
7495
|
+
error: {
|
|
7496
|
+
type: 'object',
|
|
7497
|
+
},
|
|
7498
|
+
},
|
|
7499
|
+
},
|
|
7500
|
+
ExecutionTaskSummary: {
|
|
7501
|
+
type: 'object',
|
|
7502
|
+
properties: {
|
|
7503
|
+
nonce: {
|
|
7504
|
+
type: 'string',
|
|
7505
|
+
},
|
|
7506
|
+
taskId: {
|
|
7507
|
+
type: 'string',
|
|
7508
|
+
},
|
|
7509
|
+
taskType: {
|
|
7510
|
+
type: 'string',
|
|
7511
|
+
},
|
|
7512
|
+
status: {
|
|
7513
|
+
type: 'string',
|
|
7514
|
+
},
|
|
7515
|
+
createdAt: {
|
|
7516
|
+
type: 'string',
|
|
7517
|
+
format: 'date-time',
|
|
7518
|
+
},
|
|
7519
|
+
updatedAt: {
|
|
7520
|
+
type: 'string',
|
|
7521
|
+
format: 'date-time',
|
|
7522
|
+
},
|
|
7523
|
+
},
|
|
7524
|
+
},
|
|
7525
|
+
ExecutionTaskFull: {
|
|
7526
|
+
type: 'object',
|
|
7527
|
+
properties: {
|
|
7528
|
+
nonce: {
|
|
7529
|
+
type: 'string',
|
|
7530
|
+
},
|
|
7531
|
+
taskId: {
|
|
7532
|
+
type: 'string',
|
|
7533
|
+
},
|
|
7534
|
+
taskType: {
|
|
7535
|
+
type: 'string',
|
|
7536
|
+
},
|
|
7537
|
+
status: {
|
|
7538
|
+
type: 'string',
|
|
7539
|
+
},
|
|
7540
|
+
errors: {
|
|
7541
|
+
type: 'array',
|
|
7542
|
+
items: {
|
|
7543
|
+
type: 'object',
|
|
7544
|
+
},
|
|
7545
|
+
},
|
|
7546
|
+
warnings: {
|
|
7547
|
+
type: 'array',
|
|
7548
|
+
items: {
|
|
7549
|
+
type: 'object',
|
|
7550
|
+
},
|
|
7551
|
+
},
|
|
7552
|
+
createdAt: {
|
|
7553
|
+
type: 'string',
|
|
7554
|
+
format: 'date-time',
|
|
7555
|
+
},
|
|
7556
|
+
updatedAt: {
|
|
7557
|
+
type: 'string',
|
|
7558
|
+
format: 'date-time',
|
|
7559
|
+
},
|
|
7560
|
+
currentValue: {
|
|
7561
|
+
type: 'object',
|
|
7025
7562
|
},
|
|
7026
7563
|
},
|
|
7027
7564
|
},
|
|
7028
7565
|
},
|
|
7029
7566
|
},
|
|
7030
|
-
|
|
7031
|
-
|
|
7567
|
+
tags: [
|
|
7568
|
+
{
|
|
7569
|
+
name: 'Books',
|
|
7570
|
+
description: 'Operations related to books and pipelines',
|
|
7571
|
+
},
|
|
7572
|
+
{
|
|
7573
|
+
name: 'Executions',
|
|
7574
|
+
description: 'Operations related to execution tasks',
|
|
7575
|
+
},
|
|
7576
|
+
{
|
|
7577
|
+
name: 'Authentication',
|
|
7578
|
+
description: 'Authentication operations',
|
|
7579
|
+
},
|
|
7580
|
+
],
|
|
7032
7581
|
};
|
|
7033
7582
|
/**
|
|
7034
7583
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|