@promptbook/node 0.61.0-26 → 0.61.0-27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/esm/index.es.js +25 -30
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  4. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  5. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  6. package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -1
  7. package/esm/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  8. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  9. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  10. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -1
  11. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  12. package/esm/typings/src/types/ModelRequirements.d.ts +1 -1
  13. package/esm/typings/src/utils/extractParameterNames.d.ts +0 -3
  14. package/package.json +2 -2
  15. package/umd/index.umd.js +25 -30
  16. package/umd/index.umd.js.map +1 -1
  17. package/umd/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +0 -1
  18. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  19. package/umd/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  20. package/umd/typings/src/execution/PipelineExecutor.d.ts +1 -1
  21. package/umd/typings/src/execution/createPipelineExecutor.d.ts +3 -3
  22. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  23. package/umd/typings/src/prepare/isPipelinePrepared.d.ts +3 -3
  24. package/umd/typings/src/prepare/preparePipeline.d.ts +1 -1
  25. package/umd/typings/src/prepare/prepareTemplates.d.ts +1 -1
  26. package/umd/typings/src/types/ModelRequirements.d.ts +1 -1
  27. package/umd/typings/src/utils/extractParameterNames.d.ts +0 -3
package/esm/index.es.js CHANGED
@@ -654,7 +654,7 @@ function forEachAsync(array, options, callbackfunction) {
654
654
  });
655
655
  }
656
656
 
657
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
657
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
658
658
 
659
659
  /**
660
660
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -732,7 +732,7 @@ function isValidPromptbookVersion(version) {
732
732
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
733
733
  return false;
734
734
  }
735
- // <- TODO: !!!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
735
+ // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
736
736
  return true;
737
737
  }
738
738
 
@@ -1072,12 +1072,12 @@ function validatePipeline(pipeline) {
1072
1072
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1073
1073
  */
1074
1074
  /**
1075
- * TODO: [🧠][🐣] !!!! Validate that all samples match expectations
1076
- * TODO: [🧠][🐣] !!!! Validate that knowledge is valid (non-void)
1077
- * TODO: [🧠][🐣] !!!! Validate that persona can be used only with CHAT variant
1078
- * TODO: !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1079
- * TODO: !!!! Validate that reserved parameter is not used as joker
1080
- * TODO: [🧠] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
1075
+ * TODO: [🐣] !!!! Validate that all samples match expectations
1076
+ * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1077
+ * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1078
+ * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1079
+ * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1080
+ * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1081
1081
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1082
1082
  */
1083
1083
 
@@ -1135,9 +1135,6 @@ function extractParameterNames(template) {
1135
1135
  }
1136
1136
  return parameterNames;
1137
1137
  }
1138
- /**
1139
- * TODO: !!!!! Rename to extractParameterNames
1140
- */
1141
1138
 
1142
1139
  /**
1143
1140
  * Unprepare just strips the preparation data of the pipeline
@@ -1778,7 +1775,7 @@ var ExpectError = /** @class */ (function (_super) {
1778
1775
  /**
1779
1776
  * Function isValidJsonString will tell you if the string is valid JSON or not
1780
1777
  */
1781
- function isValidJsonString(value /* <-[👨‍⚖️] */) {
1778
+ function isValidJsonString(value /* <- [👨‍⚖️] */) {
1782
1779
  try {
1783
1780
  JSON.parse(value);
1784
1781
  return true;
@@ -2062,9 +2059,9 @@ function isPipelinePrepared(pipeline) {
2062
2059
  * TODO: [🔼] Export via core or utils
2063
2060
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2064
2061
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2065
- * - Is context in each template
2066
- * - Are samples prepared
2067
- * - Are templates prepared
2062
+ * - [🏍] ? Is context in each template
2063
+ * - [♨] Are samples prepared
2064
+ * - [♨] Are templates prepared
2068
2065
  */
2069
2066
 
2070
2067
  /**
@@ -2271,7 +2268,7 @@ function union() {
2271
2268
  /**
2272
2269
  * The version of the Promptbook library
2273
2270
  */
2274
- var PROMPTBOOK_VERSION = '0.61.0-25';
2271
+ var PROMPTBOOK_VERSION = '0.61.0-26';
2275
2272
  // TODO: !!!! List here all the versions and annotate + put into script
2276
2273
 
2277
2274
  /**
@@ -2401,7 +2398,7 @@ function createPipelineExecutor(options) {
2401
2398
  pipeline = rawPipeline;
2402
2399
  }
2403
2400
  else {
2404
- // TODO: !!!! This should be maybe warning in report
2401
+ // TODO: !!!!! This should be maybe warning in report
2405
2402
  console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2406
2403
  }
2407
2404
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
@@ -2411,7 +2408,7 @@ function createPipelineExecutor(options) {
2411
2408
  return __awaiter(this, void 0, void 0, function () {
2412
2409
  return __generator(this, function (_a) {
2413
2410
  TODO_USE(template);
2414
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2411
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [🏍] Implement */];
2415
2412
  });
2416
2413
  });
2417
2414
  }
@@ -2419,7 +2416,7 @@ function createPipelineExecutor(options) {
2419
2416
  template) {
2420
2417
  return __awaiter(this, void 0, void 0, function () {
2421
2418
  return __generator(this, function (_a) {
2422
- // TODO: !!!! Implement Better - use real index and keyword search
2419
+ // TODO: [♨] Implement Better - use real index and keyword search
2423
2420
  TODO_USE(template);
2424
2421
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2425
2422
  var content = _a.content;
@@ -2432,9 +2429,9 @@ function createPipelineExecutor(options) {
2432
2429
  template) {
2433
2430
  return __awaiter(this, void 0, void 0, function () {
2434
2431
  return __generator(this, function (_a) {
2435
- // TODO: !!!! Implement Better - use real index and keyword search
2432
+ // TODO: [♨] Implement Better - use real index and keyword search
2436
2433
  TODO_USE(template);
2437
- return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2434
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [♨] Implement */];
2438
2435
  });
2439
2436
  });
2440
2437
  }
@@ -3160,8 +3157,8 @@ function createPipelineExecutor(options) {
3160
3157
  return pipelineExecutor;
3161
3158
  }
3162
3159
  /**
3163
- * TODO: !!!! return `preparedPipeline` from execution
3164
- * TODO: !!!! `isNotPreparedWarningSupressed`
3160
+ * TODO: !!!!! return `preparedPipeline` from execution
3161
+ * TODO: !!!!! `isNotPreparedWarningSupressed`
3165
3162
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3166
3163
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3167
3164
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -3170,7 +3167,7 @@ function createPipelineExecutor(options) {
3170
3167
  * Note: CreatePipelineExecutorOptions are just connected to PipelineExecutor so do not extract to types folder
3171
3168
  * TODO: [🧠][3] transparent = (report intermediate parameters) / opaque execution = (report only output parameters) progress reporting mode
3172
3169
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3173
- * TODO: [💷] !!!! `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
3170
+ * TODO: [🧠][💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result BUT maybe NOT to preserve pure JSON object
3174
3171
  */
3175
3172
 
3176
3173
  /**
@@ -3446,7 +3443,7 @@ function prepareTemplates(pipeline, options) {
3446
3443
  case 0:
3447
3444
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3448
3445
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3449
- // TODO: !!!! Apply samples to each template (if missing and is for the template defined)
3446
+ // TODO: !!!!!! Apply samples to each template (if missing and is for the template defined)
3450
3447
  TODO_USE(parameters);
3451
3448
  promptTemplatesPrepared = new Array(promptTemplates.length);
3452
3449
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
@@ -3476,7 +3473,7 @@ function prepareTemplates(pipeline, options) {
3476
3473
  /**
3477
3474
  * TODO: [🧠] Add context to each template (if missing)
3478
3475
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3479
- * TODO: !!!!! Index the samples and maybe templates
3476
+ * TODO: [♨] !!! Prepare index the samples and maybe templates
3480
3477
  * TODO: [🔼] !!! Export via `@promptbook/core`
3481
3478
  * TODO: Write tests for `preparePipeline`
3482
3479
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -3555,7 +3552,7 @@ function preparePipeline(pipeline, options) {
3555
3552
  * TODO: Write tests for `preparePipeline`
3556
3553
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3557
3554
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3558
- * TODO: [🎐] !!!! Use here countTotalUsage
3555
+ * TODO: [🎐] !!!!!! Use here countTotalUsage
3559
3556
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3560
3557
  */
3561
3558
 
@@ -5884,7 +5881,6 @@ function listAllFiles(path, isRecursive) {
5884
5881
  });
5885
5882
  }
5886
5883
  /**
5887
- * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json pipelines
5888
5884
  * Note: [🟢] This code should never be published outside of `@pipeline/node`
5889
5885
  */
5890
5886
 
@@ -6235,7 +6231,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6235
6231
  return AnthropicClaudeExecutionTools;
6236
6232
  }());
6237
6233
  /**
6238
- * TODO: !!!! [🍆] JSON mode
6234
+ * TODO: [🍆] JSON mode
6239
6235
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
6240
6236
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6241
6237
  * TODO: Maybe make custom OpenaiError
@@ -6826,7 +6822,6 @@ var OpenAiExecutionTools = /** @class */ (function () {
6826
6822
  rawRequest = {
6827
6823
  input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6828
6824
  model: modelName,
6829
- // TODO: !!!! Test model 3 and dimensions
6830
6825
  };
6831
6826
  start = getCurrentIsoDate();
6832
6827
  if (this.options.isVerbose) {