@promptbook/cli 0.61.0-26 โ†’ 0.61.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/esm/index.es.js +52 -45
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  4. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  5. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  6. package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -1
  7. package/esm/typings/src/execution/PromptResult.d.ts +15 -0
  8. package/esm/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
  12. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  13. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -1
  14. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  15. package/esm/typings/src/types/ModelRequirements.d.ts +1 -1
  16. package/esm/typings/src/utils/extractParameterNames.d.ts +0 -3
  17. package/package.json +2 -2
  18. package/umd/index.umd.js +52 -45
  19. package/umd/index.umd.js.map +1 -1
  20. package/umd/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  21. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  22. package/umd/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  23. package/umd/typings/src/execution/PipelineExecutor.d.ts +1 -1
  24. package/umd/typings/src/execution/PromptResult.d.ts +15 -0
  25. package/umd/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  26. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  27. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  28. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
  29. package/umd/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  30. package/umd/typings/src/prepare/preparePipeline.d.ts +1 -1
  31. package/umd/typings/src/prepare/prepareTemplates.d.ts +1 -1
  32. package/umd/typings/src/types/ModelRequirements.d.ts +1 -1
  33. package/umd/typings/src/utils/extractParameterNames.d.ts +0 -3
package/esm/index.es.js CHANGED
@@ -150,7 +150,7 @@ new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined'
150
150
  /**
151
151
  * The version of the Promptbook library
152
152
  */
153
- var PROMPTBOOK_VERSION = '0.61.0-25';
153
+ var PROMPTBOOK_VERSION = '0.61.0-27';
154
154
  // TODO: !!!! List here all the versions and annotate + put into script
155
155
 
156
156
  /**
@@ -747,7 +747,7 @@ function forEachAsync(array, options, callbackfunction) {
747
747
  });
748
748
  }
749
749
 
750
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
750
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
751
751
 
752
752
  /**
753
753
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -825,7 +825,7 @@ function isValidPromptbookVersion(version) {
825
825
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
826
826
  return false;
827
827
  }
828
- // <- TODO: !!!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
828
+ // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
829
829
  return true;
830
830
  }
831
831
 
@@ -1165,12 +1165,12 @@ function validatePipeline(pipeline) {
1165
1165
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1166
1166
  */
1167
1167
  /**
1168
- * TODO: [๐Ÿง ][๐Ÿฃ] !!!! Validate that all samples match expectations
1169
- * TODO: [๐Ÿง ][๐Ÿฃ] !!!! Validate that knowledge is valid (non-void)
1170
- * TODO: [๐Ÿง ][๐Ÿฃ] !!!! Validate that persona can be used only with CHAT variant
1171
- * TODO: !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1172
- * TODO: !!!! Validate that reserved parameter is not used as joker
1173
- * TODO: [๐Ÿง ] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
1168
+ * TODO: [๐Ÿฃ] !!!! Validate that all samples match expectations
1169
+ * TODO: [๐Ÿฃ][๐Ÿ] !!!! Validate that knowledge is valid (non-void)
1170
+ * TODO: [๐Ÿฃ] !!!! Validate that persona can be used only with CHAT variant
1171
+ * TODO: [๐Ÿฃ] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1172
+ * TODO: [๐Ÿฃ] !!!! Validate that reserved parameter is not used as joker
1173
+ * TODO: [๐Ÿง ] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1174
1174
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
1175
1175
  */
1176
1176
 
@@ -1228,9 +1228,6 @@ function extractParameterNames(template) {
1228
1228
  }
1229
1229
  return parameterNames;
1230
1230
  }
1231
- /**
1232
- * TODO: !!!!! Rename to extractParameterNames
1233
- */
1234
1231
 
1235
1232
  /**
1236
1233
  * Unprepare just strips the preparation data of the pipeline
@@ -1871,7 +1868,7 @@ var ExpectError = /** @class */ (function (_super) {
1871
1868
  /**
1872
1869
  * Function isValidJsonString will tell you if the string is valid JSON or not
1873
1870
  */
1874
- function isValidJsonString(value /* <-[๐Ÿ‘จโ€โš–๏ธ] */) {
1871
+ function isValidJsonString(value /* <- [๐Ÿ‘จโ€โš–๏ธ] */) {
1875
1872
  try {
1876
1873
  JSON.parse(value);
1877
1874
  return true;
@@ -2155,9 +2152,9 @@ function isPipelinePrepared(pipeline) {
2155
2152
  * TODO: [๐Ÿ”ผ] Export via core or utils
2156
2153
  * TODO: [๐ŸงŠ] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2157
2154
  * TODO: [๐Ÿงฟ] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2158
- * - Is context in each template
2159
- * - Are samples prepared
2160
- * - Are templates prepared
2155
+ * - [๐Ÿ] ? Is context in each template
2156
+ * - [โ™จ] Are samples prepared
2157
+ * - [โ™จ] Are templates prepared
2161
2158
  */
2162
2159
 
2163
2160
  /**
@@ -2488,7 +2485,7 @@ function createPipelineExecutor(options) {
2488
2485
  pipeline = rawPipeline;
2489
2486
  }
2490
2487
  else {
2491
- // TODO: !!!! This should be maybe warning in report
2488
+ // TODO: !!!!! This should be maybe warning in report
2492
2489
  console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2493
2490
  }
2494
2491
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
@@ -2498,7 +2495,7 @@ function createPipelineExecutor(options) {
2498
2495
  return __awaiter(this, void 0, void 0, function () {
2499
2496
  return __generator(this, function (_a) {
2500
2497
  TODO_USE(template);
2501
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2498
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [๐Ÿ] Implement */];
2502
2499
  });
2503
2500
  });
2504
2501
  }
@@ -2506,7 +2503,7 @@ function createPipelineExecutor(options) {
2506
2503
  template) {
2507
2504
  return __awaiter(this, void 0, void 0, function () {
2508
2505
  return __generator(this, function (_a) {
2509
- // TODO: !!!! Implement Better - use real index and keyword search
2506
+ // TODO: [โ™จ] Implement Better - use real index and keyword search
2510
2507
  TODO_USE(template);
2511
2508
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2512
2509
  var content = _a.content;
@@ -2519,9 +2516,9 @@ function createPipelineExecutor(options) {
2519
2516
  template) {
2520
2517
  return __awaiter(this, void 0, void 0, function () {
2521
2518
  return __generator(this, function (_a) {
2522
- // TODO: !!!! Implement Better - use real index and keyword search
2519
+ // TODO: [โ™จ] Implement Better - use real index and keyword search
2523
2520
  TODO_USE(template);
2524
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2521
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [โ™จ] Implement */];
2525
2522
  });
2526
2523
  });
2527
2524
  }
@@ -3247,8 +3244,8 @@ function createPipelineExecutor(options) {
3247
3244
  return pipelineExecutor;
3248
3245
  }
3249
3246
  /**
3250
- * TODO: !!!! return `preparedPipeline` from execution
3251
- * TODO: !!!! `isNotPreparedWarningSupressed`
3247
+ * TODO: !!!!! return `preparedPipeline` from execution
3248
+ * TODO: !!!!! `isNotPreparedWarningSupressed`
3252
3249
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3253
3250
  * TODO: [๐Ÿช‚] Use maxParallelCount here (not only pass to `preparePipeline`)
3254
3251
  * TODO: [โ™ˆ] Probbably move expectations from templates to parameters
@@ -3257,7 +3254,7 @@ function createPipelineExecutor(options) {
3257
3254
  * Note: CreatePipelineExecutorOptions are just connected to PipelineExecutor so do not extract to types folder
3258
3255
  * TODO: [๐Ÿง ][3] transparent = (report intermediate parameters) / opaque execution = (report only output parameters) progress reporting mode
3259
3256
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
3260
- * TODO: [๐Ÿ’ท] !!!! `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
3257
+ * TODO: [๐Ÿง ][๐Ÿ’ท] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result BUT maybe NOT to preserve pure JSON object
3261
3258
  */
3262
3259
 
3263
3260
  /**
@@ -3533,7 +3530,7 @@ function prepareTemplates(pipeline, options) {
3533
3530
  case 0:
3534
3531
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3535
3532
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3536
- // TODO: !!!! Apply samples to each template (if missing and is for the template defined)
3533
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3537
3534
  TODO_USE(parameters);
3538
3535
  promptTemplatesPrepared = new Array(promptTemplates.length);
3539
3536
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [๐Ÿช‚] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
@@ -3563,7 +3560,7 @@ function prepareTemplates(pipeline, options) {
3563
3560
  /**
3564
3561
  * TODO: [๐Ÿง ] Add context to each template (if missing)
3565
3562
  * TODO: [๐Ÿง ] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3566
- * TODO: !!!!! Index the samples and maybe templates
3563
+ * TODO: [โ™จ] !!! Prepare index the samples and maybe templates
3567
3564
  * TODO: [๐Ÿ”ผ] !!! Export via `@promptbook/core`
3568
3565
  * TODO: Write tests for `preparePipeline`
3569
3566
  * TODO: [๐Ÿ] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -3642,7 +3639,7 @@ function preparePipeline(pipeline, options) {
3642
3639
  * TODO: Write tests for `preparePipeline`
3643
3640
  * TODO: [๐Ÿ] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3644
3641
  * TODO: [๐ŸงŠ] In future one preparation can take data from previous preparation and save tokens and time
3645
- * TODO: [๐ŸŽ] !!!! Use here countTotalUsage
3642
+ * TODO: [๐ŸŽ] !!!!! Use here countTotalUsage
3646
3643
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
3647
3644
  */
3648
3645
 
@@ -5955,7 +5952,6 @@ function listAllFiles(path, isRecursive) {
5955
5952
  });
5956
5953
  }
5957
5954
  /**
5958
- * TODO: !!!! [๐Ÿง ] Library precompilation and do not mix markdown and json pipelines
5959
5955
  * Note: [๐ŸŸข] This code should never be published outside of `@pipeline/node`
5960
5956
  */
5961
5957
 
@@ -6238,7 +6234,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6238
6234
  */
6239
6235
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6240
6236
  return __awaiter(this, void 0, void 0, function () {
6241
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6237
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6242
6238
  return __generator(this, function (_a) {
6243
6239
  switch (_a.label) {
6244
6240
  case 0:
@@ -6251,6 +6247,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6251
6247
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6252
6248
  }
6253
6249
  modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6250
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6254
6251
  rawRequest = {
6255
6252
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
6256
6253
  max_tokens: modelRequirements.maxTokens || 4096,
@@ -6262,7 +6259,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6262
6259
  messages: [
6263
6260
  {
6264
6261
  role: 'user',
6265
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6262
+ content: rawPromptContent,
6266
6263
  },
6267
6264
  ],
6268
6265
  // TODO: Is here some equivalent of user identification?> user: this.options.user,
@@ -6299,8 +6296,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6299
6296
  complete: complete,
6300
6297
  },
6301
6298
  usage: usage,
6299
+ rawPromptContent: rawPromptContent,
6300
+ rawRequest: rawRequest,
6302
6301
  rawResponse: rawResponse,
6303
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6302
+ // <- [๐Ÿ—ฏ]
6304
6303
  }];
6305
6304
  }
6306
6305
  });
@@ -6333,7 +6332,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6333
6332
 
6334
6333
  const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
6335
6334
  ...modelSettings,
6336
- prompt: replaceParameters(content, { ...parameters, modelName }),
6335
+ prompt: rawPromptContent,
6337
6336
  user: this.options.user,
6338
6337
  };
6339
6338
  const start: string_date_iso8601 = getCurrentIsoDate();
@@ -6372,7 +6371,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6372
6371
  },
6373
6372
  usage,
6374
6373
  rawResponse,
6375
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6374
+ // <- [๐Ÿ—ฏ]
6376
6375
  };
6377
6376
  }
6378
6377
  */
@@ -6411,7 +6410,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6411
6410
  return AnthropicClaudeExecutionTools;
6412
6411
  }());
6413
6412
  /**
6414
- * TODO: !!!! [๐Ÿ†] JSON mode
6413
+ * TODO: [๐Ÿ†] JSON mode
6415
6414
  * TODO: [๐Ÿง ] Maybe handle errors via transformAnthropicError (like transformAzureError)
6416
6415
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6417
6416
  * TODO: Maybe make custom OpenaiError
@@ -6840,7 +6839,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6840
6839
  */
6841
6840
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6842
6841
  return __awaiter(this, void 0, void 0, function () {
6843
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6842
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6844
6843
  return __generator(this, function (_a) {
6845
6844
  switch (_a.label) {
6846
6845
  case 0:
@@ -6866,6 +6865,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6866
6865
  type: 'json_object',
6867
6866
  };
6868
6867
  }
6868
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6869
6869
  rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
6870
6870
  ? []
6871
6871
  : [
@@ -6876,7 +6876,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6876
6876
  ])), false), [
6877
6877
  {
6878
6878
  role: 'user',
6879
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6879
+ content: rawPromptContent,
6880
6880
  },
6881
6881
  ], false), user: this.options.user });
6882
6882
  start = getCurrentIsoDate();
@@ -6911,8 +6911,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6911
6911
  complete: complete,
6912
6912
  },
6913
6913
  usage: usage,
6914
+ rawPromptContent: rawPromptContent,
6915
+ rawRequest: rawRequest,
6914
6916
  rawResponse: rawResponse,
6915
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6917
+ // <- [๐Ÿ—ฏ]
6916
6918
  }];
6917
6919
  }
6918
6920
  });
@@ -6923,7 +6925,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6923
6925
  */
6924
6926
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6925
6927
  return __awaiter(this, void 0, void 0, function () {
6926
- var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6928
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6927
6929
  return __generator(this, function (_a) {
6928
6930
  switch (_a.label) {
6929
6931
  case 0:
@@ -6944,7 +6946,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
6944
6946
  // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6945
6947
  // <- Note: [๐Ÿง†]
6946
6948
  };
6947
- rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
6949
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6950
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
6948
6951
  start = getCurrentIsoDate();
6949
6952
  if (this.options.isVerbose) {
6950
6953
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
@@ -6974,8 +6977,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6974
6977
  complete: complete,
6975
6978
  },
6976
6979
  usage: usage,
6980
+ rawPromptContent: rawPromptContent,
6981
+ rawRequest: rawRequest,
6977
6982
  rawResponse: rawResponse,
6978
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6983
+ // <- [๐Ÿ—ฏ]
6979
6984
  }];
6980
6985
  }
6981
6986
  });
@@ -6986,7 +6991,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6986
6991
  */
6987
6992
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6988
6993
  return __awaiter(this, void 0, void 0, function () {
6989
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6994
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6990
6995
  return __generator(this, function (_a) {
6991
6996
  switch (_a.label) {
6992
6997
  case 0:
@@ -6999,10 +7004,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6999
7004
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
7000
7005
  }
7001
7006
  modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
7007
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7002
7008
  rawRequest = {
7003
- input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
7009
+ input: rawPromptContent,
7004
7010
  model: modelName,
7005
- // TODO: !!!! Test model 3 and dimensions
7006
7011
  };
7007
7012
  start = getCurrentIsoDate();
7008
7013
  if (this.options.isVerbose) {
@@ -7029,8 +7034,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
7029
7034
  complete: complete,
7030
7035
  },
7031
7036
  usage: usage,
7037
+ rawPromptContent: rawPromptContent,
7038
+ rawRequest: rawRequest,
7032
7039
  rawResponse: rawResponse,
7033
- // <- [๐Ÿคนโ€โ™‚๏ธ]
7040
+ // <- [๐Ÿ—ฏ]
7034
7041
  }];
7035
7042
  }
7036
7043
  });
@@ -7343,7 +7350,7 @@ function stringifyPipelineJson(pipeline) {
7343
7350
  return pipelineJsonStringified;
7344
7351
  }
7345
7352
  /**
7346
- * TODO: !!!! Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
7353
+ * TODO: [๐Ÿ] Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
7347
7354
  * TODO: [๐Ÿง ][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
7348
7355
  * TODO: [๐Ÿง ] Maybe more elegant solution than replacing via regex
7349
7356
  * TODO: [๐Ÿ™] Make some standart order of json properties