@promptbook/node 0.61.0-26 โ†’ 0.61.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/esm/index.es.js +51 -44
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  4. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  5. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  6. package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -1
  7. package/esm/typings/src/execution/PromptResult.d.ts +15 -0
  8. package/esm/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
  12. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  13. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -1
  14. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  15. package/esm/typings/src/types/ModelRequirements.d.ts +1 -1
  16. package/esm/typings/src/utils/extractParameterNames.d.ts +0 -3
  17. package/package.json +2 -2
  18. package/umd/index.umd.js +51 -44
  19. package/umd/index.umd.js.map +1 -1
  20. package/umd/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  21. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  22. package/umd/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  23. package/umd/typings/src/execution/PipelineExecutor.d.ts +1 -1
  24. package/umd/typings/src/execution/PromptResult.d.ts +15 -0
  25. package/umd/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  26. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  27. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  28. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
  29. package/umd/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  30. package/umd/typings/src/prepare/preparePipeline.d.ts +1 -1
  31. package/umd/typings/src/prepare/prepareTemplates.d.ts +1 -1
  32. package/umd/typings/src/types/ModelRequirements.d.ts +1 -1
  33. package/umd/typings/src/utils/extractParameterNames.d.ts +0 -3
package/esm/index.es.js CHANGED
@@ -654,7 +654,7 @@ function forEachAsync(array, options, callbackfunction) {
654
654
  });
655
655
  }
656
656
 
657
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
657
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
658
658
 
659
659
  /**
660
660
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -732,7 +732,7 @@ function isValidPromptbookVersion(version) {
732
732
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
733
733
  return false;
734
734
  }
735
- // <- TODO: !!!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
735
+ // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
736
736
  return true;
737
737
  }
738
738
 
@@ -1072,12 +1072,12 @@ function validatePipeline(pipeline) {
1072
1072
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1073
1073
  */
1074
1074
  /**
1075
- * TODO: [๐Ÿง ][๐Ÿฃ] !!!! Validate that all samples match expectations
1076
- * TODO: [๐Ÿง ][๐Ÿฃ] !!!! Validate that knowledge is valid (non-void)
1077
- * TODO: [๐Ÿง ][๐Ÿฃ] !!!! Validate that persona can be used only with CHAT variant
1078
- * TODO: !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1079
- * TODO: !!!! Validate that reserved parameter is not used as joker
1080
- * TODO: [๐Ÿง ] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
1075
+ * TODO: [๐Ÿฃ] !!!! Validate that all samples match expectations
1076
+ * TODO: [๐Ÿฃ][๐Ÿ] !!!! Validate that knowledge is valid (non-void)
1077
+ * TODO: [๐Ÿฃ] !!!! Validate that persona can be used only with CHAT variant
1078
+ * TODO: [๐Ÿฃ] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1079
+ * TODO: [๐Ÿฃ] !!!! Validate that reserved parameter is not used as joker
1080
+ * TODO: [๐Ÿง ] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1081
1081
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
1082
1082
  */
1083
1083
 
@@ -1135,9 +1135,6 @@ function extractParameterNames(template) {
1135
1135
  }
1136
1136
  return parameterNames;
1137
1137
  }
1138
- /**
1139
- * TODO: !!!!! Rename to extractParameterNames
1140
- */
1141
1138
 
1142
1139
  /**
1143
1140
  * Unprepare just strips the preparation data of the pipeline
@@ -1778,7 +1775,7 @@ var ExpectError = /** @class */ (function (_super) {
1778
1775
  /**
1779
1776
  * Function isValidJsonString will tell you if the string is valid JSON or not
1780
1777
  */
1781
- function isValidJsonString(value /* <-[๐Ÿ‘จโ€โš–๏ธ] */) {
1778
+ function isValidJsonString(value /* <- [๐Ÿ‘จโ€โš–๏ธ] */) {
1782
1779
  try {
1783
1780
  JSON.parse(value);
1784
1781
  return true;
@@ -2062,9 +2059,9 @@ function isPipelinePrepared(pipeline) {
2062
2059
  * TODO: [๐Ÿ”ผ] Export via core or utils
2063
2060
  * TODO: [๐ŸงŠ] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2064
2061
  * TODO: [๐Ÿงฟ] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2065
- * - Is context in each template
2066
- * - Are samples prepared
2067
- * - Are templates prepared
2062
+ * - [๐Ÿ] ? Is context in each template
2063
+ * - [โ™จ] Are samples prepared
2064
+ * - [โ™จ] Are templates prepared
2068
2065
  */
2069
2066
 
2070
2067
  /**
@@ -2271,7 +2268,7 @@ function union() {
2271
2268
  /**
2272
2269
  * The version of the Promptbook library
2273
2270
  */
2274
- var PROMPTBOOK_VERSION = '0.61.0-25';
2271
+ var PROMPTBOOK_VERSION = '0.61.0-27';
2275
2272
  // TODO: !!!! List here all the versions and annotate + put into script
2276
2273
 
2277
2274
  /**
@@ -2401,7 +2398,7 @@ function createPipelineExecutor(options) {
2401
2398
  pipeline = rawPipeline;
2402
2399
  }
2403
2400
  else {
2404
- // TODO: !!!! This should be maybe warning in report
2401
+ // TODO: !!!!! This should be maybe warning in report
2405
2402
  console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2406
2403
  }
2407
2404
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
@@ -2411,7 +2408,7 @@ function createPipelineExecutor(options) {
2411
2408
  return __awaiter(this, void 0, void 0, function () {
2412
2409
  return __generator(this, function (_a) {
2413
2410
  TODO_USE(template);
2414
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2411
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [๐Ÿ] Implement */];
2415
2412
  });
2416
2413
  });
2417
2414
  }
@@ -2419,7 +2416,7 @@ function createPipelineExecutor(options) {
2419
2416
  template) {
2420
2417
  return __awaiter(this, void 0, void 0, function () {
2421
2418
  return __generator(this, function (_a) {
2422
- // TODO: !!!! Implement Better - use real index and keyword search
2419
+ // TODO: [โ™จ] Implement Better - use real index and keyword search
2423
2420
  TODO_USE(template);
2424
2421
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2425
2422
  var content = _a.content;
@@ -2432,9 +2429,9 @@ function createPipelineExecutor(options) {
2432
2429
  template) {
2433
2430
  return __awaiter(this, void 0, void 0, function () {
2434
2431
  return __generator(this, function (_a) {
2435
- // TODO: !!!! Implement Better - use real index and keyword search
2432
+ // TODO: [โ™จ] Implement Better - use real index and keyword search
2436
2433
  TODO_USE(template);
2437
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2434
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [โ™จ] Implement */];
2438
2435
  });
2439
2436
  });
2440
2437
  }
@@ -3160,8 +3157,8 @@ function createPipelineExecutor(options) {
3160
3157
  return pipelineExecutor;
3161
3158
  }
3162
3159
  /**
3163
- * TODO: !!!! return `preparedPipeline` from execution
3164
- * TODO: !!!! `isNotPreparedWarningSupressed`
3160
+ * TODO: !!!!! return `preparedPipeline` from execution
3161
+ * TODO: !!!!! `isNotPreparedWarningSupressed`
3165
3162
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3166
3163
  * TODO: [๐Ÿช‚] Use maxParallelCount here (not only pass to `preparePipeline`)
3167
3164
  * TODO: [โ™ˆ] Probbably move expectations from templates to parameters
@@ -3170,7 +3167,7 @@ function createPipelineExecutor(options) {
3170
3167
  * Note: CreatePipelineExecutorOptions are just connected to PipelineExecutor so do not extract to types folder
3171
3168
  * TODO: [๐Ÿง ][3] transparent = (report intermediate parameters) / opaque execution = (report only output parameters) progress reporting mode
3172
3169
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
3173
- * TODO: [๐Ÿ’ท] !!!! `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
3170
+ * TODO: [๐Ÿง ][๐Ÿ’ท] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result BUT maybe NOT to preserve pure JSON object
3174
3171
  */
3175
3172
 
3176
3173
  /**
@@ -3446,7 +3443,7 @@ function prepareTemplates(pipeline, options) {
3446
3443
  case 0:
3447
3444
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3448
3445
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3449
- // TODO: !!!! Apply samples to each template (if missing and is for the template defined)
3446
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3450
3447
  TODO_USE(parameters);
3451
3448
  promptTemplatesPrepared = new Array(promptTemplates.length);
3452
3449
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [๐Ÿช‚] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
@@ -3476,7 +3473,7 @@ function prepareTemplates(pipeline, options) {
3476
3473
  /**
3477
3474
  * TODO: [๐Ÿง ] Add context to each template (if missing)
3478
3475
  * TODO: [๐Ÿง ] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3479
- * TODO: !!!!! Index the samples and maybe templates
3476
+ * TODO: [โ™จ] !!! Prepare index the samples and maybe templates
3480
3477
  * TODO: [๐Ÿ”ผ] !!! Export via `@promptbook/core`
3481
3478
  * TODO: Write tests for `preparePipeline`
3482
3479
  * TODO: [๐Ÿ] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -3555,7 +3552,7 @@ function preparePipeline(pipeline, options) {
3555
3552
  * TODO: Write tests for `preparePipeline`
3556
3553
  * TODO: [๐Ÿ] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3557
3554
  * TODO: [๐ŸงŠ] In future one preparation can take data from previous preparation and save tokens and time
3558
- * TODO: [๐ŸŽ] !!!! Use here countTotalUsage
3555
+ * TODO: [๐ŸŽ] !!!!! Use here countTotalUsage
3559
3556
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
3560
3557
  */
3561
3558
 
@@ -5884,7 +5881,6 @@ function listAllFiles(path, isRecursive) {
5884
5881
  });
5885
5882
  }
5886
5883
  /**
5887
- * TODO: !!!! [๐Ÿง ] Library precompilation and do not mix markdown and json pipelines
5888
5884
  * Note: [๐ŸŸข] This code should never be published outside of `@pipeline/node`
5889
5885
  */
5890
5886
 
@@ -6062,7 +6058,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6062
6058
  */
6063
6059
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6064
6060
  return __awaiter(this, void 0, void 0, function () {
6065
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6061
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6066
6062
  return __generator(this, function (_a) {
6067
6063
  switch (_a.label) {
6068
6064
  case 0:
@@ -6075,6 +6071,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6075
6071
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6076
6072
  }
6077
6073
  modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6074
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6078
6075
  rawRequest = {
6079
6076
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
6080
6077
  max_tokens: modelRequirements.maxTokens || 4096,
@@ -6086,7 +6083,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6086
6083
  messages: [
6087
6084
  {
6088
6085
  role: 'user',
6089
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6086
+ content: rawPromptContent,
6090
6087
  },
6091
6088
  ],
6092
6089
  // TODO: Is here some equivalent of user identification?> user: this.options.user,
@@ -6123,8 +6120,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6123
6120
  complete: complete,
6124
6121
  },
6125
6122
  usage: usage,
6123
+ rawPromptContent: rawPromptContent,
6124
+ rawRequest: rawRequest,
6126
6125
  rawResponse: rawResponse,
6127
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6126
+ // <- [๐Ÿ—ฏ]
6128
6127
  }];
6129
6128
  }
6130
6129
  });
@@ -6157,7 +6156,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6157
6156
 
6158
6157
  const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
6159
6158
  ...modelSettings,
6160
- prompt: replaceParameters(content, { ...parameters, modelName }),
6159
+ prompt: rawPromptContent,
6161
6160
  user: this.options.user,
6162
6161
  };
6163
6162
  const start: string_date_iso8601 = getCurrentIsoDate();
@@ -6196,7 +6195,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6196
6195
  },
6197
6196
  usage,
6198
6197
  rawResponse,
6199
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6198
+ // <- [๐Ÿ—ฏ]
6200
6199
  };
6201
6200
  }
6202
6201
  */
@@ -6235,7 +6234,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6235
6234
  return AnthropicClaudeExecutionTools;
6236
6235
  }());
6237
6236
  /**
6238
- * TODO: !!!! [๐Ÿ†] JSON mode
6237
+ * TODO: [๐Ÿ†] JSON mode
6239
6238
  * TODO: [๐Ÿง ] Maybe handle errors via transformAnthropicError (like transformAzureError)
6240
6239
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6241
6240
  * TODO: Maybe make custom OpenaiError
@@ -6664,7 +6663,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6664
6663
  */
6665
6664
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6666
6665
  return __awaiter(this, void 0, void 0, function () {
6667
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6666
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6668
6667
  return __generator(this, function (_a) {
6669
6668
  switch (_a.label) {
6670
6669
  case 0:
@@ -6690,6 +6689,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6690
6689
  type: 'json_object',
6691
6690
  };
6692
6691
  }
6692
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6693
6693
  rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
6694
6694
  ? []
6695
6695
  : [
@@ -6700,7 +6700,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6700
6700
  ])), false), [
6701
6701
  {
6702
6702
  role: 'user',
6703
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6703
+ content: rawPromptContent,
6704
6704
  },
6705
6705
  ], false), user: this.options.user });
6706
6706
  start = getCurrentIsoDate();
@@ -6735,8 +6735,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6735
6735
  complete: complete,
6736
6736
  },
6737
6737
  usage: usage,
6738
+ rawPromptContent: rawPromptContent,
6739
+ rawRequest: rawRequest,
6738
6740
  rawResponse: rawResponse,
6739
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6741
+ // <- [๐Ÿ—ฏ]
6740
6742
  }];
6741
6743
  }
6742
6744
  });
@@ -6747,7 +6749,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6747
6749
  */
6748
6750
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6749
6751
  return __awaiter(this, void 0, void 0, function () {
6750
- var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6752
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6751
6753
  return __generator(this, function (_a) {
6752
6754
  switch (_a.label) {
6753
6755
  case 0:
@@ -6768,7 +6770,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
6768
6770
  // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6769
6771
  // <- Note: [๐Ÿง†]
6770
6772
  };
6771
- rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
6773
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6774
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
6772
6775
  start = getCurrentIsoDate();
6773
6776
  if (this.options.isVerbose) {
6774
6777
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
@@ -6798,8 +6801,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6798
6801
  complete: complete,
6799
6802
  },
6800
6803
  usage: usage,
6804
+ rawPromptContent: rawPromptContent,
6805
+ rawRequest: rawRequest,
6801
6806
  rawResponse: rawResponse,
6802
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6807
+ // <- [๐Ÿ—ฏ]
6803
6808
  }];
6804
6809
  }
6805
6810
  });
@@ -6810,7 +6815,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6810
6815
  */
6811
6816
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6812
6817
  return __awaiter(this, void 0, void 0, function () {
6813
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6818
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6814
6819
  return __generator(this, function (_a) {
6815
6820
  switch (_a.label) {
6816
6821
  case 0:
@@ -6823,10 +6828,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6823
6828
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
6824
6829
  }
6825
6830
  modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6831
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6826
6832
  rawRequest = {
6827
- input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6833
+ input: rawPromptContent,
6828
6834
  model: modelName,
6829
- // TODO: !!!! Test model 3 and dimensions
6830
6835
  };
6831
6836
  start = getCurrentIsoDate();
6832
6837
  if (this.options.isVerbose) {
@@ -6853,8 +6858,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6853
6858
  complete: complete,
6854
6859
  },
6855
6860
  usage: usage,
6861
+ rawPromptContent: rawPromptContent,
6862
+ rawRequest: rawRequest,
6856
6863
  rawResponse: rawResponse,
6857
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6864
+ // <- [๐Ÿ—ฏ]
6858
6865
  }];
6859
6866
  }
6860
6867
  });