@promptbook/remote-server 0.92.0-3 → 0.92.0-5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +33 -20
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
- package/esm/typings/src/_packages/google.index.d.ts +2 -0
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
- package/esm/typings/src/personas/preparePersona.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
- package/package.json +2 -2
- package/umd/index.umd.js +33 -20
- package/umd/index.umd.js.map +1 -1
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
|
|
2
2
|
import { createDeepseekExecutionTools } from '../llm-providers/deepseek/createDeepseekExecutionTools';
|
|
3
|
+
import { DEEPSEEK_MODELS } from '../llm-providers/deepseek/deepseek-models';
|
|
3
4
|
import type { DeepseekExecutionToolsOptions } from '../llm-providers/deepseek/DeepseekExecutionToolsOptions';
|
|
4
5
|
import { _DeepseekRegistration } from '../llm-providers/deepseek/register-constructor';
|
|
5
6
|
export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
|
|
6
7
|
export { createDeepseekExecutionTools };
|
|
8
|
+
export { DEEPSEEK_MODELS };
|
|
7
9
|
export type { DeepseekExecutionToolsOptions };
|
|
8
10
|
export { _DeepseekRegistration };
|
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
|
|
2
2
|
import { createGoogleExecutionTools } from '../llm-providers/google/createGoogleExecutionTools';
|
|
3
|
+
import { GOOGLE_MODELS } from '../llm-providers/google/google-models';
|
|
3
4
|
import type { GoogleExecutionToolsOptions } from '../llm-providers/google/GoogleExecutionToolsOptions';
|
|
4
5
|
import { _GoogleRegistration } from '../llm-providers/google/register-constructor';
|
|
5
6
|
import type { VercelExecutionToolsOptions } from '../llm-providers/vercel/VercelExecutionToolsOptions';
|
|
6
7
|
export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
|
|
7
8
|
export { createGoogleExecutionTools };
|
|
9
|
+
export { GOOGLE_MODELS };
|
|
8
10
|
export type { GoogleExecutionToolsOptions };
|
|
9
11
|
export { _GoogleRegistration };
|
|
10
12
|
export type { VercelExecutionToolsOptions };
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
|
+
import type { number_usd } from '../../types/typeAliases';
|
|
3
|
+
/**
|
|
4
|
+
* List of available Deepseek models with descriptions
|
|
5
|
+
*
|
|
6
|
+
* Note: Done at 2025-04-22
|
|
7
|
+
*
|
|
8
|
+
* @see https://www.deepseek.com/models
|
|
9
|
+
* @public exported from `@promptbook/deepseek`
|
|
10
|
+
*/
|
|
11
|
+
export declare const DEEPSEEK_MODELS: ReadonlyArray<AvailableModel & {
|
|
12
|
+
modelDescription?: string;
|
|
13
|
+
pricing?: {
|
|
14
|
+
readonly prompt: number_usd;
|
|
15
|
+
readonly output: number_usd;
|
|
16
|
+
};
|
|
17
|
+
}>;
|
|
18
|
+
/**
|
|
19
|
+
* TODO: [🧠] Add information about context window sizes, capabilities, and relative performance characteristics
|
|
20
|
+
* TODO: [🎰] Some mechanism to auto-update available models
|
|
21
|
+
* TODO: [🧠] Verify pricing information is current with Deepseek's official documentation
|
|
22
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
23
|
+
*/
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
|
+
import type { number_usd } from '../../types/typeAliases';
|
|
3
|
+
/**
|
|
4
|
+
* List of available Google models with descriptions
|
|
5
|
+
*
|
|
6
|
+
* Note: Done at 2025-04-22
|
|
7
|
+
*
|
|
8
|
+
* @see https://ai.google.dev/models/gemini
|
|
9
|
+
* @public exported from `@promptbook/google`
|
|
10
|
+
*/
|
|
11
|
+
export declare const GOOGLE_MODELS: ReadonlyArray<AvailableModel & {
|
|
12
|
+
modelDescription?: string;
|
|
13
|
+
pricing?: {
|
|
14
|
+
readonly prompt: number_usd;
|
|
15
|
+
readonly output: number_usd;
|
|
16
|
+
};
|
|
17
|
+
}>;
|
|
18
|
+
/**
|
|
19
|
+
* TODO: [🧠] Add information about context window sizes, capabilities, and relative performance characteristics
|
|
20
|
+
* TODO: [🎰] Some mechanism to auto-update available models
|
|
21
|
+
* TODO: [🧠] Verify pricing information is current with Google's official documentation
|
|
22
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
23
|
+
*/
|
|
@@ -8,7 +8,7 @@ import type { string_persona_description } from '../types/typeAliases';
|
|
|
8
8
|
* @see https://github.com/webgptorg/promptbook/discussions/22
|
|
9
9
|
* @public exported from `@promptbook/core`
|
|
10
10
|
*/
|
|
11
|
-
export declare function preparePersona(personaDescription: string_persona_description, tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions): Promise<PersonaPreparedJson
|
|
11
|
+
export declare function preparePersona(personaDescription: string_persona_description, tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions): Promise<Pick<PersonaPreparedJson, 'modelsRequirements'>>;
|
|
12
12
|
/**
|
|
13
13
|
* TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
|
|
14
14
|
* TODO: [🏢] Check validity of `modelName` in pipeline
|
|
@@ -30,11 +30,13 @@ export type PersonaJson = {
|
|
|
30
30
|
*/
|
|
31
31
|
export type PersonaPreparedJson = PersonaJson & {
|
|
32
32
|
/**
|
|
33
|
-
*
|
|
33
|
+
* Models requirements for the persona
|
|
34
|
+
*
|
|
35
|
+
* Sorted by relevance, best-fitting models is first
|
|
34
36
|
*
|
|
35
37
|
* Note: The model must be CHAT variant to be usable through persona
|
|
36
38
|
*/
|
|
37
|
-
readonly
|
|
39
|
+
readonly modelsRequirements: Array<ChatModelRequirements>;
|
|
38
40
|
/**
|
|
39
41
|
* List of preparation ids that were used to prepare this persona
|
|
40
42
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/remote-server",
|
|
3
|
-
"version": "0.92.0-
|
|
3
|
+
"version": "0.92.0-5",
|
|
4
4
|
"description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -51,7 +51,7 @@
|
|
|
51
51
|
"module": "./esm/index.es.js",
|
|
52
52
|
"typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
|
|
53
53
|
"peerDependencies": {
|
|
54
|
-
"@promptbook/core": "0.92.0-
|
|
54
|
+
"@promptbook/core": "0.92.0-5"
|
|
55
55
|
},
|
|
56
56
|
"dependencies": {
|
|
57
57
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
* @generated
|
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
|
50
50
|
*/
|
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-
|
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-5';
|
|
52
52
|
/**
|
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1802,7 +1802,7 @@
|
|
|
1802
1802
|
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
1803
1803
|
return false;
|
|
1804
1804
|
}
|
|
1805
|
-
if (!pipeline.personas.every((persona) => persona.
|
|
1805
|
+
if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
|
|
1806
1806
|
return false;
|
|
1807
1807
|
}
|
|
1808
1808
|
if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
|
|
@@ -2043,7 +2043,7 @@
|
|
|
2043
2043
|
* TODO: [🐚] Split into more files and make `PrepareTask` & `RemoteTask` + split the function
|
|
2044
2044
|
*/
|
|
2045
2045
|
|
|
2046
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2046
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2047
2047
|
|
|
2048
2048
|
/**
|
|
2049
2049
|
* Checks if value is valid email
|
|
@@ -2346,7 +2346,7 @@
|
|
|
2346
2346
|
*/
|
|
2347
2347
|
function unpreparePipeline(pipeline) {
|
|
2348
2348
|
let { personas, knowledgeSources, tasks } = pipeline;
|
|
2349
|
-
personas = personas.map((persona) => ({ ...persona,
|
|
2349
|
+
personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
|
|
2350
2350
|
knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
|
|
2351
2351
|
tasks = tasks.map((task) => {
|
|
2352
2352
|
let { dependentParameterNames } = task;
|
|
@@ -2934,27 +2934,40 @@
|
|
|
2934
2934
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
2935
2935
|
tools,
|
|
2936
2936
|
});
|
|
2937
|
-
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
2938
2937
|
const _llms = arrayableToArray(tools.llm);
|
|
2939
2938
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
2940
|
-
const availableModels = await llmTools.listModels()
|
|
2941
|
-
const availableModelNames = availableModels
|
|
2939
|
+
const availableModels = (await llmTools.listModels())
|
|
2942
2940
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
2943
|
-
.map(({ modelName }) =>
|
|
2944
|
-
|
|
2945
|
-
|
|
2941
|
+
.map(({ modelName, modelDescription }) => ({
|
|
2942
|
+
modelName,
|
|
2943
|
+
modelDescription,
|
|
2944
|
+
// <- Note: `modelTitle` and `modelVariant` is not relevant for this task
|
|
2945
|
+
}));
|
|
2946
|
+
const result = await preparePersonaExecutor({
|
|
2947
|
+
availableModels /* <- Note: Passing as JSON */,
|
|
2948
|
+
personaDescription,
|
|
2949
|
+
}).asPromise();
|
|
2946
2950
|
const { outputParameters } = result;
|
|
2947
|
-
const {
|
|
2948
|
-
const
|
|
2951
|
+
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
2952
|
+
const modelsRequirementsUnchecked = JSON.parse(modelsRequirementsJson);
|
|
2949
2953
|
if (isVerbose) {
|
|
2950
|
-
console.info(`PERSONA ${personaDescription}`,
|
|
2954
|
+
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
2951
2955
|
}
|
|
2952
|
-
|
|
2953
|
-
|
|
2956
|
+
if (!Array.isArray(modelsRequirementsUnchecked)) {
|
|
2957
|
+
throw new UnexpectedError(spaceTrim__default["default"]((block) => `
|
|
2958
|
+
Invalid \`modelsRequirements\`:
|
|
2959
|
+
|
|
2960
|
+
\`\`\`json
|
|
2961
|
+
${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
|
|
2962
|
+
\`\`\`
|
|
2963
|
+
`));
|
|
2964
|
+
}
|
|
2965
|
+
const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
|
|
2954
2966
|
modelVariant: 'CHAT',
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
2967
|
+
...modelRequirements,
|
|
2968
|
+
}));
|
|
2969
|
+
return {
|
|
2970
|
+
modelsRequirements,
|
|
2958
2971
|
};
|
|
2959
2972
|
}
|
|
2960
2973
|
/**
|
|
@@ -4036,14 +4049,14 @@
|
|
|
4036
4049
|
// TODO: [🖌][🧠] Implement some `mapAsync` function
|
|
4037
4050
|
const preparedPersonas = new Array(personas.length);
|
|
4038
4051
|
await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
|
|
4039
|
-
const
|
|
4052
|
+
const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
|
|
4040
4053
|
rootDirname,
|
|
4041
4054
|
maxParallelCount /* <- TODO: [🪂] */,
|
|
4042
4055
|
isVerbose,
|
|
4043
4056
|
});
|
|
4044
4057
|
const preparedPersona = {
|
|
4045
4058
|
...persona,
|
|
4046
|
-
|
|
4059
|
+
modelsRequirements,
|
|
4047
4060
|
preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
|
|
4048
4061
|
// <- TODO: [🍙] Make some standard order of json properties
|
|
4049
4062
|
};
|