@promptbook/cli 0.61.0-26 → 0.61.0-27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/esm/index.es.js +26 -31
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  4. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  5. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  6. package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -1
  7. package/esm/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  8. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  9. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  10. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -1
  11. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  12. package/esm/typings/src/types/ModelRequirements.d.ts +1 -1
  13. package/esm/typings/src/utils/extractParameterNames.d.ts +0 -3
  14. package/package.json +2 -2
  15. package/umd/index.umd.js +26 -31
  16. package/umd/index.umd.js.map +1 -1
  17. package/umd/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  18. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  19. package/umd/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  20. package/umd/typings/src/execution/PipelineExecutor.d.ts +1 -1
  21. package/umd/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  22. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  23. package/umd/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  24. package/umd/typings/src/prepare/preparePipeline.d.ts +1 -1
  25. package/umd/typings/src/prepare/prepareTemplates.d.ts +1 -1
  26. package/umd/typings/src/types/ModelRequirements.d.ts +1 -1
  27. package/umd/typings/src/utils/extractParameterNames.d.ts +0 -3
@@ -43,6 +43,5 @@ type CreatePipelineCollectionFromDirectoryOptions = PipelineStringToJsonOptions
43
43
  export declare function createCollectionFromDirectory(path: string_folder_path, options?: CreatePipelineCollectionFromDirectoryOptions): Promise<PipelineCollection>;
44
44
  export {};
45
45
  /**
46
- * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json pipelines
47
46
  * Note: [🟢] This code should never be published outside of `@pipeline/node`
48
47
  */
@@ -7,7 +7,7 @@ import type { string_json } from '../../types/typeAliases';
7
7
  */
8
8
  export declare function stringifyPipelineJson<TType>(pipeline: TType): string_json<TType>;
9
9
  /**
10
- * TODO: !!!! Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
10
+ * TODO: [🐝] Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
11
11
  * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
12
12
  * TODO: [🧠] Maybe more elegant solution than replacing via regex
13
13
  * TODO: [🍙] Make some standart order of json properties
@@ -26,11 +26,11 @@ export declare function validatePipeline(pipeline: PipelineJson): PipelineJson;
26
26
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
27
27
  */
28
28
  /**
29
- * TODO: [🧠][🐣] !!!! Validate that all samples match expectations
30
- * TODO: [🧠][🐣] !!!! Validate that knowledge is valid (non-void)
31
- * TODO: [🧠][🐣] !!!! Validate that persona can be used only with CHAT variant
32
- * TODO: !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
33
- * TODO: !!!! Validate that reserved parameter is not used as joker
34
- * TODO: [🧠] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
29
+ * TODO: [🐣] !!!! Validate that all samples match expectations
30
+ * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
31
+ * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
32
+ * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
33
+ * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
34
+ * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
35
35
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
36
36
  */
@@ -48,5 +48,5 @@ export type PipelineExecutorResult = {
48
48
  };
49
49
  /**
50
50
  * TODO: [🧠] Should this file be in /execution or /types folder?
51
- * TODO: [💷] !!!! `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
51
+ * TODO: [💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
52
52
  */
@@ -47,8 +47,8 @@ interface CreatePipelineExecutorOptions {
47
47
  export declare function createPipelineExecutor(options: CreatePipelineExecutorOptions): PipelineExecutor;
48
48
  export {};
49
49
  /**
50
- * TODO: !!!! return `preparedPipeline` from execution
51
- * TODO: !!!! `isNotPreparedWarningSupressed`
50
+ * TODO: !!!!! return `preparedPipeline` from execution
51
+ * TODO: !!!!! `isNotPreparedWarningSupressed`
52
52
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
53
53
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
54
54
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -57,5 +57,5 @@ export {};
57
57
  * Note: CreatePipelineExecutorOptions are just connected to PipelineExecutor so do not extract to types folder
58
58
  * TODO: [🧠][3] transparent = (report intermediate parameters) / opaque execution = (report only output parameters) progress reporting mode
59
59
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
60
- * TODO: [💷] !!!! `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
60
+ * TODO: [🧠][💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result BUT maybe NOT to preserve pure JSON object
61
61
  */
@@ -41,7 +41,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
41
41
  listModels(): Array<AvailableModel>;
42
42
  }
43
43
  /**
44
- * TODO: !!!! [🍆] JSON mode
44
+ * TODO: [🍆] JSON mode
45
45
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
46
46
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
47
47
  * TODO: Maybe make custom OpenaiError
@@ -8,7 +8,7 @@ export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
8
8
  * TODO: [🔼] Export via core or utils
9
9
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
10
10
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
11
- * - Is context in each template
12
- * - Are samples prepared
13
- * - Are templates prepared
11
+ * - [🏍] ? Is context in each template
12
+ * - [♨] Are samples prepared
13
+ * - [♨] Are templates prepared
14
14
  */
@@ -12,6 +12,6 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
12
12
  * TODO: Write tests for `preparePipeline`
13
13
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
14
14
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
15
- * TODO: [🎐] !!!! Use here countTotalUsage
15
+ * TODO: [🎐] !!!!!! Use here countTotalUsage
16
16
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
17
17
  */
@@ -21,7 +21,7 @@ export {};
21
21
  /**
22
22
  * TODO: [🧠] Add context to each template (if missing)
23
23
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
24
- * TODO: !!!!! Index the samples and maybe templates
24
+ * TODO: [♨] !!! Prepare index the samples and maybe templates
25
25
  * TODO: [🔼] !!! Export via `@promptbook/core`
26
26
  * TODO: Write tests for `preparePipeline`
27
27
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -91,7 +91,7 @@ export type CommonModelRequirements = {
91
91
  readonly maxTokens?: number;
92
92
  };
93
93
  /**
94
- * TODO: [🔼] !!!! Export all from `@promptbook/types`
94
+ * TODO: [🔼] !!!!! (<- To all [🔼]) Export all from `@promptbook/types`
95
95
  * TODO: [🧠][🈁] `seed` should maybe be somewhere else (not in `ModelRequirements`) (simmilar that `user` identification is not here)
96
96
  * TODO: [🧠][💱] Add more model options: `stop_token`, `logit_bias`, `logprobs` (`top_logprobs`), `top_k`, `top_p`, `presence_penalty`, `frequency_penalty`, `bestOf`, `logitBias`, `logitBiasType`,...
97
97
  * [💱] Probbably keep using just `temperature` in Promptbook (not `top_k` and `top_p`)
@@ -7,6 +7,3 @@ import type { string_template } from '../types/typeAliases';
7
7
  * @returns the list of parameter names
8
8
  */
9
9
  export declare function extractParameterNames(template: string_template): Set<string_parameter_name>;
10
- /**
11
- * TODO: !!!!! Rename to extractParameterNames
12
- */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/cli",
3
- "version": "0.61.0-26",
3
+ "version": "0.61.0-27",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -54,7 +54,7 @@
54
54
  }
55
55
  ],
56
56
  "peerDependencies": {
57
- "@promptbook/core": "0.61.0-26"
57
+ "@promptbook/core": "0.61.0-27"
58
58
  },
59
59
  "main": "./umd/index.umd.js",
60
60
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -154,7 +154,7 @@
154
154
  /**
155
155
  * The version of the Promptbook library
156
156
  */
157
- var PROMPTBOOK_VERSION = '0.61.0-25';
157
+ var PROMPTBOOK_VERSION = '0.61.0-26';
158
158
  // TODO: !!!! List here all the versions and annotate + put into script
159
159
 
160
160
  /**
@@ -751,7 +751,7 @@
751
751
  });
752
752
  }
753
753
 
754
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
754
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
755
755
 
756
756
  /**
757
757
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -829,7 +829,7 @@
829
829
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
830
830
  return false;
831
831
  }
832
- // <- TODO: !!!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
832
+ // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
833
833
  return true;
834
834
  }
835
835
 
@@ -1169,12 +1169,12 @@
1169
1169
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1170
1170
  */
1171
1171
  /**
1172
- * TODO: [🧠][🐣] !!!! Validate that all samples match expectations
1173
- * TODO: [🧠][🐣] !!!! Validate that knowledge is valid (non-void)
1174
- * TODO: [🧠][🐣] !!!! Validate that persona can be used only with CHAT variant
1175
- * TODO: !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1176
- * TODO: !!!! Validate that reserved parameter is not used as joker
1177
- * TODO: [🧠] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
1172
+ * TODO: [🐣] !!!! Validate that all samples match expectations
1173
+ * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1174
+ * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1175
+ * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1176
+ * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1177
+ * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1178
1178
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1179
1179
  */
1180
1180
 
@@ -1232,9 +1232,6 @@
1232
1232
  }
1233
1233
  return parameterNames;
1234
1234
  }
1235
- /**
1236
- * TODO: !!!!! Rename to extractParameterNames
1237
- */
1238
1235
 
1239
1236
  /**
1240
1237
  * Unprepare just strips the preparation data of the pipeline
@@ -1875,7 +1872,7 @@
1875
1872
  /**
1876
1873
  * Function isValidJsonString will tell you if the string is valid JSON or not
1877
1874
  */
1878
- function isValidJsonString(value /* <-[👨‍⚖️] */) {
1875
+ function isValidJsonString(value /* <- [👨‍⚖️] */) {
1879
1876
  try {
1880
1877
  JSON.parse(value);
1881
1878
  return true;
@@ -2159,9 +2156,9 @@
2159
2156
  * TODO: [🔼] Export via core or utils
2160
2157
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2161
2158
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2162
- * - Is context in each template
2163
- * - Are samples prepared
2164
- * - Are templates prepared
2159
+ * - [🏍] ? Is context in each template
2160
+ * - [♨] Are samples prepared
2161
+ * - [♨] Are templates prepared
2165
2162
  */
2166
2163
 
2167
2164
  /**
@@ -2492,7 +2489,7 @@
2492
2489
  pipeline = rawPipeline;
2493
2490
  }
2494
2491
  else {
2495
- // TODO: !!!! This should be maybe warning in report
2492
+ // TODO: !!!!! This should be maybe warning in report
2496
2493
  console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2497
2494
  }
2498
2495
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
@@ -2502,7 +2499,7 @@
2502
2499
  return __awaiter(this, void 0, void 0, function () {
2503
2500
  return __generator(this, function (_a) {
2504
2501
  TODO_USE(template);
2505
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2502
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [🏍] Implement */];
2506
2503
  });
2507
2504
  });
2508
2505
  }
@@ -2510,7 +2507,7 @@
2510
2507
  template) {
2511
2508
  return __awaiter(this, void 0, void 0, function () {
2512
2509
  return __generator(this, function (_a) {
2513
- // TODO: !!!! Implement Better - use real index and keyword search
2510
+ // TODO: [♨] Implement Better - use real index and keyword search
2514
2511
  TODO_USE(template);
2515
2512
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2516
2513
  var content = _a.content;
@@ -2523,9 +2520,9 @@
2523
2520
  template) {
2524
2521
  return __awaiter(this, void 0, void 0, function () {
2525
2522
  return __generator(this, function (_a) {
2526
- // TODO: !!!! Implement Better - use real index and keyword search
2523
+ // TODO: [♨] Implement Better - use real index and keyword search
2527
2524
  TODO_USE(template);
2528
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2525
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [♨] Implement */];
2529
2526
  });
2530
2527
  });
2531
2528
  }
@@ -3251,8 +3248,8 @@
3251
3248
  return pipelineExecutor;
3252
3249
  }
3253
3250
  /**
3254
- * TODO: !!!! return `preparedPipeline` from execution
3255
- * TODO: !!!! `isNotPreparedWarningSupressed`
3251
+ * TODO: !!!!! return `preparedPipeline` from execution
3252
+ * TODO: !!!!! `isNotPreparedWarningSupressed`
3256
3253
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3257
3254
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3258
3255
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -3261,7 +3258,7 @@
3261
3258
  * Note: CreatePipelineExecutorOptions are just connected to PipelineExecutor so do not extract to types folder
3262
3259
  * TODO: [🧠][3] transparent = (report intermediate parameters) / opaque execution = (report only output parameters) progress reporting mode
3263
3260
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3264
- * TODO: [💷] !!!! `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
3261
+ * TODO: [🧠][💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result BUT maybe NOT to preserve pure JSON object
3265
3262
  */
3266
3263
 
3267
3264
  /**
@@ -3537,7 +3534,7 @@
3537
3534
  case 0:
3538
3535
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3539
3536
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3540
- // TODO: !!!! Apply samples to each template (if missing and is for the template defined)
3537
+ // TODO: !!!!!! Apply samples to each template (if missing and is for the template defined)
3541
3538
  TODO_USE(parameters);
3542
3539
  promptTemplatesPrepared = new Array(promptTemplates.length);
3543
3540
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
@@ -3567,7 +3564,7 @@
3567
3564
  /**
3568
3565
  * TODO: [🧠] Add context to each template (if missing)
3569
3566
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3570
- * TODO: !!!!! Index the samples and maybe templates
3567
+ * TODO: [♨] !!! Prepare index the samples and maybe templates
3571
3568
  * TODO: [🔼] !!! Export via `@promptbook/core`
3572
3569
  * TODO: Write tests for `preparePipeline`
3573
3570
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -3646,7 +3643,7 @@
3646
3643
  * TODO: Write tests for `preparePipeline`
3647
3644
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3648
3645
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3649
- * TODO: [🎐] !!!! Use here countTotalUsage
3646
+ * TODO: [🎐] !!!!!! Use here countTotalUsage
3650
3647
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3651
3648
  */
3652
3649
 
@@ -5959,7 +5956,6 @@
5959
5956
  });
5960
5957
  }
5961
5958
  /**
5962
- * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json pipelines
5963
5959
  * Note: [🟢] This code should never be published outside of `@pipeline/node`
5964
5960
  */
5965
5961
 
@@ -6415,7 +6411,7 @@
6415
6411
  return AnthropicClaudeExecutionTools;
6416
6412
  }());
6417
6413
  /**
6418
- * TODO: !!!! [🍆] JSON mode
6414
+ * TODO: [🍆] JSON mode
6419
6415
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
6420
6416
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6421
6417
  * TODO: Maybe make custom OpenaiError
@@ -7006,7 +7002,6 @@
7006
7002
  rawRequest = {
7007
7003
  input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
7008
7004
  model: modelName,
7009
- // TODO: !!!! Test model 3 and dimensions
7010
7005
  };
7011
7006
  start = getCurrentIsoDate();
7012
7007
  if (this.options.isVerbose) {
@@ -7347,7 +7342,7 @@
7347
7342
  return pipelineJsonStringified;
7348
7343
  }
7349
7344
  /**
7350
- * TODO: !!!! Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
7345
+ * TODO: [🐝] Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
7351
7346
  * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
7352
7347
  * TODO: [🧠] Maybe more elegant solution than replacing via regex
7353
7348
  * TODO: [🍙] Make some standart order of json properties