@promptbook/cli 0.61.0-22 → 0.61.0-23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,7 @@ import type { CommonExecutionToolsOptions } from '../execution/CommonExecutionTo
4
4
  import { EmbeddingVector } from '../execution/EmbeddingVector';
5
5
  import type { ExecutionTools } from '../execution/ExecutionTools';
6
6
  import type { AvailableModel, LlmExecutionTools } from '../execution/LlmExecutionTools';
7
- import type { PipelineExecutor } from '../execution/PipelineExecutor';
7
+ import type { PipelineExecutor, PipelineExecutorResult } from '../execution/PipelineExecutor';
8
8
  import type { ChatPromptResult, CommonPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../execution/PromptResult';
9
9
  import type { PromptResultUsage, PromptResultUsageCounts } from '../execution/PromptResultUsage';
10
10
  import type { ScriptExecutionTools, ScriptExecutionToolsExecuteOptions } from '../execution/ScriptExecutionTools';
@@ -36,7 +36,7 @@ import type { FromtoItems } from '../utils/FromtoItems';
36
36
  import { PROMPTBOOK_VERSION, string_promptbook_version } from '../version';
37
37
  export { PROMPTBOOK_VERSION };
38
38
  export { EXPECTATION_UNITS };
39
- export type { AvailableModel, BlockType, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgePiecePreparedJson, KnowledgeSourceJson, KnowledgeSourcePreparedJson, LlmExecutionTools, LlmTemplateJson, ModelRequirements, ModelVariant, Parameters, PersonaJson, PersonaPreparedJson, PipelineCollection, PipelineExecutor, PipelineJson, PipelineString, PreparationJson, Prompt, ChatPromptResult as PromptChatResult, CommonPromptResult as PromptCommonResult, CompletionPromptResult as PromptCompletionResult, PromptDialogJson, EmbeddingPromptResult as PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ReservedParameters, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, number_model_temperature, number_seed, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_emails, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_parameter_name, string_parameter_value, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_promptbook_documentation_url, string_promptbook_version, string_reserved_parameter_name, string_script, string_semantic_version, string_sha256, string_system_message, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_uuid, };
39
+ export type { AvailableModel, BlockType, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgePiecePreparedJson, KnowledgeSourceJson, KnowledgeSourcePreparedJson, LlmExecutionTools, LlmTemplateJson, ModelRequirements, ModelVariant, Parameters, PersonaJson, PersonaPreparedJson, PipelineCollection, PipelineExecutor, PipelineExecutorResult, PipelineJson, PipelineString, PreparationJson, Prompt, ChatPromptResult as PromptChatResult, CommonPromptResult as PromptCommonResult, CompletionPromptResult as PromptCompletionResult, PromptDialogJson, EmbeddingPromptResult as PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ReservedParameters, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, number_model_temperature, number_seed, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_emails, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_parameter_name, string_parameter_value, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_promptbook_documentation_url, string_promptbook_version, string_reserved_parameter_name, string_script, string_semantic_version, string_sha256, string_system_message, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_uuid, };
40
40
  /**
41
41
  * TODO: Delete type aliases (from ../types/typeAliases) that are not exported here
42
42
  */
@@ -36,18 +36,22 @@ export declare const EXECUTIONS_CACHE_DIRNAME = "/.promptbook/executions-cache";
36
36
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
37
37
  */
38
38
  export declare const PIPELINE_COLLECTION_BASE_FILENAME = "index";
39
+ /**
40
+ * Nonce which is used for replacing things in strings
41
+ */
42
+ export declare const REPLACING_NONCE = "u$k42k%!V2zo34w7Fu#@QUHYPW";
39
43
  /**
40
44
  * The names of the parameters that are reserved for special purposes
41
45
  */
42
- export declare const RESERVED_PARAMETER_NAMES: readonly ["context", "currentDate"];
46
+ export declare const RESERVED_PARAMETER_NAMES: readonly ["context", "knowledge", "samples", "modelName", "currentDate"];
43
47
  /**
44
48
  * @@@
45
49
  */
46
- export declare const DEBUG_ALLOW_PAYED_TESTING: boolean;
50
+ export declare const RESERVED_PARAMETER_MISSING_VALUE: string;
47
51
  /**
48
- * Nonce which is used for replacing things in strings
52
+ * @@@
49
53
  */
50
- export declare const REPLACING_NONCE = "u$k42k%!V2zo34w7Fu#@QUHYPW";
54
+ export declare const DEBUG_ALLOW_PAYED_TESTING: boolean;
51
55
  /**
52
56
  * TODO: [🔼] Export all to core
53
57
  */
@@ -13,30 +13,38 @@ import type { PromptResultUsage } from './PromptResultUsage';
13
13
  * @see https://github.com/webgptorg/promptbook#executor
14
14
  */
15
15
  export type PipelineExecutor = {
16
- (inputParameters: Parameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<{
17
- /**
18
- * Whether the execution was successful, details are aviable in `executionReport`
19
- */
20
- readonly isSuccessful: boolean;
21
- /**
22
- * Added usage of whole execution, detailed usage is aviable in `executionReport`
23
- */
24
- readonly usage: PromptResultUsage;
25
- /**
26
- * Errors that occured during the execution, details are aviable in `executionReport`
27
- */
28
- readonly errors: Array<PipelineExecutionError | Error>;
29
- /**
30
- * The report of the execution with all details
31
- */
32
- readonly executionReport: ExecutionReportJson;
33
- /**
34
- * Result parameters of the execution
35
- *
36
- * Note: If the execution was not successful, there are only some of the result parameters
37
- */
38
- readonly outputParameters: Parameters;
39
- }>;
16
+ (inputParameters: Parameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<PipelineExecutorResult>;
17
+ };
18
+ /**
19
+ * @@@
20
+ */
21
+ export type PipelineExecutorResult = {
22
+ /**
23
+ * Whether the execution was successful, details are aviable in `executionReport`
24
+ */
25
+ readonly isSuccessful: boolean;
26
+ /**
27
+ * Added usage of whole execution, detailed usage is aviable in `executionReport`
28
+ */
29
+ readonly usage: PromptResultUsage;
30
+ /**
31
+ * Errors that occured during the execution, details are aviable in `executionReport`
32
+ */
33
+ readonly errors: Array<PipelineExecutionError | Error>;
34
+ /**
35
+ * Warnings that occured during the execution, details are aviable in `executionReport`
36
+ */
37
+ readonly warnings: Array<PipelineExecutionError | Error>;
38
+ /**
39
+ * The report of the execution with all details
40
+ */
41
+ readonly executionReport: ExecutionReportJson;
42
+ /**
43
+ * Result parameters of the execution
44
+ *
45
+ * Note: If the execution was not successful, there are only some of the result parameters
46
+ */
47
+ readonly outputParameters: Parameters;
40
48
  };
41
49
  /**
42
50
  * TODO: [🧠] Should this file be in /execution or /types folder?
@@ -7,4 +7,8 @@ export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
7
7
  * TODO: [🐠] Maybe base this on `makeValidator`
8
8
  * TODO: [🔼] Export via core or utils
9
9
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
10
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
11
+ * - Is context in each template
12
+ * - Are samples prepared
13
+ * - Are templates prepared
10
14
  */
@@ -5,5 +5,6 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
5
5
  export declare function unpreparePipeline(pipeline: PipelineJson): PipelineJson;
6
6
  /**
7
7
  * TODO: [🔼] !!! Export via `@promptbook/core`
8
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
8
9
  * TODO: Write tests for `preparePipeline`
9
10
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/cli",
3
- "version": "0.61.0-22",
3
+ "version": "0.61.0-23",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -54,7 +54,7 @@
54
54
  }
55
55
  ],
56
56
  "peerDependencies": {
57
- "@promptbook/core": "0.61.0-22"
57
+ "@promptbook/core": "0.61.0-23"
58
58
  },
59
59
  "main": "./umd/index.umd.js",
60
60
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -154,7 +154,7 @@
154
154
  /**
155
155
  * The version of the Promptbook library
156
156
  */
157
- var PROMPTBOOK_VERSION = '0.61.0-21';
157
+ var PROMPTBOOK_VERSION = '0.61.0-22';
158
158
  // TODO: !!!! List here all the versions and annotate + put into script
159
159
 
160
160
  /**
@@ -283,19 +283,26 @@
283
283
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
284
284
  */
285
285
  var PIPELINE_COLLECTION_BASE_FILENAME = "index";
286
+ /**
287
+ * Nonce which is used for replacing things in strings
288
+ */
289
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
286
290
  /**
287
291
  * The names of the parameters that are reserved for special purposes
288
292
  */
289
293
  var RESERVED_PARAMETER_NAMES = deepFreeze([
290
294
  'context',
295
+ 'knowledge',
296
+ 'samples',
297
+ 'modelName',
291
298
  'currentDate',
292
299
  // <- TODO: Add more like 'date', 'modelName',...
293
300
  // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
294
301
  ]);
295
302
  /**
296
- * Nonce which is used for replacing things in strings
303
+ * @@@
297
304
  */
298
- var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
305
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
299
306
  /*
300
307
  TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
301
308
  */
@@ -739,7 +746,7 @@
739
746
  });
740
747
  }
741
748
 
742
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
749
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
743
750
 
744
751
  /**
745
752
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1007,7 +1014,7 @@
1007
1014
  throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
1008
1015
  }
1009
1016
  if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
1010
- throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use fifferent name"));
1017
+ throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
1011
1018
  }
1012
1019
  definedParameters.add(template.resultingParameterName);
1013
1020
  if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
@@ -1184,6 +1191,7 @@
1184
1191
  }
1185
1192
  /**
1186
1193
  * TODO: [🔼] !!! Export via `@promptbook/core`
1194
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
1187
1195
  * TODO: Write tests for `preparePipeline`
1188
1196
  */
1189
1197
 
@@ -2086,22 +2094,21 @@
2086
2094
  // Note: Ignoring `pipeline.preparations` @@@
2087
2095
  // Note: Ignoring `pipeline.knowledgePieces` @@@
2088
2096
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
2089
- console.log('!!!!', 'Not all personas have modelRequirements');
2090
2097
  return false;
2091
2098
  }
2092
2099
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
2093
- console.log('!!!!', 'Not all knowledgeSources have preparationIds');
2094
2100
  return false;
2095
2101
  }
2096
- // TODO: !!!!! Is context in each template
2097
- // TODO: !!!!! Are samples prepared
2098
- // TODO: !!!!! Are templates prepared
2099
2102
  return true;
2100
2103
  }
2101
2104
  /**
2102
2105
  * TODO: [🐠] Maybe base this on `makeValidator`
2103
2106
  * TODO: [🔼] Export via core or utils
2104
2107
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2108
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2109
+ * - Is context in each template
2110
+ * - Are samples prepared
2111
+ * - Are templates prepared
2105
2112
  */
2106
2113
 
2107
2114
  /**
@@ -2163,6 +2170,22 @@
2163
2170
  * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
2164
2171
  */
2165
2172
  function replaceParameters(template, parameters) {
2173
+ var e_1, _a;
2174
+ try {
2175
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
2176
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
2177
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
2178
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
2179
+ }
2180
+ }
2181
+ }
2182
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2183
+ finally {
2184
+ try {
2185
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
2186
+ }
2187
+ finally { if (e_1) throw e_1.error; }
2188
+ }
2166
2189
  var replacedTemplate = template;
2167
2190
  var match;
2168
2191
  var loopLimit = LOOP_LIMIT;
@@ -2415,11 +2438,21 @@
2415
2438
  console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2416
2439
  }
2417
2440
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2441
+ // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2418
2442
  function getContextForTemplate(// <- TODO: [🧠][🥜]
2419
2443
  template) {
2420
2444
  return __awaiter(this, void 0, void 0, function () {
2421
2445
  return __generator(this, function (_a) {
2422
- // TODO: !!!!!! Implement Better - use real index and keyword search
2446
+ TODO_USE(template);
2447
+ return [2 /*return*/, ''];
2448
+ });
2449
+ });
2450
+ }
2451
+ function getKnowledgeForTemplate(// <- TODO: [🧠][🥜]
2452
+ template) {
2453
+ return __awaiter(this, void 0, void 0, function () {
2454
+ return __generator(this, function (_a) {
2455
+ // TODO: !!!! Implement Better - use real index and keyword search
2423
2456
  TODO_USE(template);
2424
2457
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2425
2458
  var content = _a.content;
@@ -2428,19 +2461,39 @@
2428
2461
  });
2429
2462
  });
2430
2463
  }
2464
+ function getSamplesForTemplate(// <- TODO: [🧠][🥜]
2465
+ template) {
2466
+ return __awaiter(this, void 0, void 0, function () {
2467
+ return __generator(this, function (_a) {
2468
+ // TODO: !!!! Implement Better - use real index and keyword search
2469
+ TODO_USE(template);
2470
+ return [2 /*return*/, ''];
2471
+ });
2472
+ });
2473
+ }
2431
2474
  function getReservedParametersForTemplate(template) {
2432
2475
  return __awaiter(this, void 0, void 0, function () {
2433
- var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2476
+ var context, knowledge, samples, currentDate, modelName, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2434
2477
  var e_3, _a;
2435
2478
  return __generator(this, function (_b) {
2436
2479
  switch (_b.label) {
2437
2480
  case 0: return [4 /*yield*/, getContextForTemplate(template)];
2438
2481
  case 1:
2439
2482
  context = _b.sent();
2483
+ return [4 /*yield*/, getKnowledgeForTemplate(template)];
2484
+ case 2:
2485
+ knowledge = _b.sent();
2486
+ return [4 /*yield*/, getSamplesForTemplate(template)];
2487
+ case 3:
2488
+ samples = _b.sent();
2440
2489
  currentDate = new Date().toISOString();
2490
+ modelName = RESERVED_PARAMETER_MISSING_VALUE;
2441
2491
  reservedParameters = {
2442
2492
  context: context,
2493
+ knowledge: knowledge,
2494
+ samples: samples,
2443
2495
  currentDate: currentDate,
2496
+ modelName: modelName,
2444
2497
  };
2445
2498
  try {
2446
2499
  // Note: Doublecheck that ALL reserved parameters are defined:
@@ -2927,7 +2980,7 @@
2927
2980
  var parameter = _c.value;
2928
2981
  if (parametersToPass[parameter.name] === undefined) {
2929
2982
  // [4]
2930
- errors.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2983
+ warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2931
2984
  continue;
2932
2985
  }
2933
2986
  outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
@@ -2942,7 +2995,7 @@
2942
2995
  }
2943
2996
  return outputParameters;
2944
2997
  }
2945
- var executionReport, _a, _b, parameter, errors, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2998
+ var executionReport, _a, _b, parameter, errors, warnings, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2946
2999
  var e_1, _e, e_2, _f;
2947
3000
  return __generator(this, function (_g) {
2948
3001
  switch (_g.label) {
@@ -2977,8 +3030,8 @@
2977
3030
  isSuccessful: false,
2978
3031
  errors: [
2979
3032
  new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter")),
2980
- // <- TODO: !!!!! Test this error
2981
3033
  ],
3034
+ warnings: [],
2982
3035
  executionReport: executionReport,
2983
3036
  outputParameters: {},
2984
3037
  usage: ZERO_USAGE,
@@ -2994,21 +3047,22 @@
2994
3047
  finally { if (e_1) throw e_1.error; }
2995
3048
  }
2996
3049
  errors = [];
3050
+ warnings = [];
2997
3051
  _loop_1 = function (parameterName) {
2998
3052
  var parameter = pipeline.parameters.find(function (_a) {
2999
3053
  var name = _a.name;
3000
3054
  return name === parameterName;
3001
3055
  });
3002
3056
  if (parameter === undefined) {
3003
- errors.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is passed as input parameter")));
3057
+ warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
3004
3058
  }
3005
3059
  else if (parameter.isInput === false) {
3006
3060
  return { value: deepFreezeWithSameType({
3007
3061
  isSuccessful: false,
3008
3062
  errors: [
3009
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but is not input")),
3010
- // <- TODO: !!!!! Test this error
3063
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input")),
3011
3064
  ],
3065
+ warnings: warnings,
3012
3066
  executionReport: executionReport,
3013
3067
  outputParameters: {},
3014
3068
  usage: ZERO_USAGE,
@@ -3117,6 +3171,7 @@
3117
3171
  return [2 /*return*/, deepFreezeWithSameType({
3118
3172
  isSuccessful: false,
3119
3173
  errors: __spreadArray([error_1], __read(errors), false),
3174
+ warnings: warnings,
3120
3175
  usage: usage_1,
3121
3176
  executionReport: executionReport,
3122
3177
  outputParameters: outputParameters_1,
@@ -3130,6 +3185,7 @@
3130
3185
  return [2 /*return*/, deepFreezeWithSameType({
3131
3186
  isSuccessful: true,
3132
3187
  errors: errors,
3188
+ warnings: warnings,
3133
3189
  usage: usage,
3134
3190
  executionReport: executionReport,
3135
3191
  outputParameters: outputParameters,
@@ -3156,7 +3212,7 @@
3156
3212
  */
3157
3213
  function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3158
3214
  return __awaiter(this, void 0, void 0, function () {
3159
- var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgeRaw, knowledgeTextPieces, knowledge;
3215
+ var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
3160
3216
  var _f, _g, _h;
3161
3217
  var _this = this;
3162
3218
  return __generator(this, function (_j) {
@@ -3197,8 +3253,8 @@
3197
3253
  result = _j.sent();
3198
3254
  assertsExecutionSuccessful(result);
3199
3255
  outputParameters = result.outputParameters;
3200
- knowledgeRaw = outputParameters.knowledge;
3201
- knowledgeTextPieces = (knowledgeRaw || '').split('\n---\n');
3256
+ knowledgePiecesRaw = outputParameters.knowledgePieces;
3257
+ knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3202
3258
  if (isVerbose) {
3203
3259
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3204
3260
  }
@@ -3466,7 +3522,8 @@
3466
3522
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3467
3523
  // ----- /Knowledge preparation -----
3468
3524
  // TODO: !!!!! Add context to each template (if missing)
3469
- // TODO: !!!!! Apply samples to each template (if missing)
3525
+ // TODO: !!!!! Add knowledge to each template (if missing and is in pipeline defined)
3526
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3470
3527
  return [2 /*return*/, __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3471
3528
  }
3472
3529
  });
@@ -7535,7 +7592,7 @@
7535
7592
  prettifyCommand.action(function (filesGlob, _a) {
7536
7593
  var ignore = _a.ignore;
7537
7594
  return __awaiter(_this, void 0, void 0, function () {
7538
- var filePaths, filePaths_1, filePaths_1_1, filePath, promptbookMarkdown, error_1, e_1_1;
7595
+ var filePaths, filePaths_1, filePaths_1_1, filePath, pipelineMarkdown, error_1, e_1_1;
7539
7596
  var e_1, _b;
7540
7597
  return __generator(this, function (_c) {
7541
7598
  switch (_c.label) {
@@ -7556,18 +7613,18 @@
7556
7613
  }
7557
7614
  return [4 /*yield*/, promises.readFile(filePath, 'utf-8')];
7558
7615
  case 4:
7559
- promptbookMarkdown = (_c.sent());
7616
+ pipelineMarkdown = (_c.sent());
7560
7617
  _c.label = 5;
7561
7618
  case 5:
7562
7619
  _c.trys.push([5, 8, , 9]);
7563
- return [4 /*yield*/, prettifyPipelineString(promptbookMarkdown, {
7620
+ return [4 /*yield*/, prettifyPipelineString(pipelineMarkdown, {
7564
7621
  isGraphAdded: true,
7565
7622
  isPrettifyed: true,
7566
7623
  // <- [🕌]
7567
7624
  })];
7568
7625
  case 6:
7569
- promptbookMarkdown = _c.sent();
7570
- return [4 /*yield*/, promises.writeFile(filePath, promptbookMarkdown)];
7626
+ pipelineMarkdown = _c.sent();
7627
+ return [4 /*yield*/, promises.writeFile(filePath, pipelineMarkdown)];
7571
7628
  case 7:
7572
7629
  _c.sent();
7573
7630
  console.info(colors__default["default"].green("Prettify ".concat(filePath)));