@promptbook/cli 0.69.0-14 → 0.69.0-16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +91 -49
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  6. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  7. package/esm/typings/src/commands/FOREACH/ForeachCommand.d.ts +1 -1
  8. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -1
  9. package/esm/typings/src/config.d.ts +2 -2
  10. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  11. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +6 -6
  12. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  13. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  19. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  20. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  21. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  22. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  23. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  24. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  25. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  26. package/package.json +1 -1
  27. package/umd/index.umd.js +91 -49
  28. package/umd/index.umd.js.map +1 -1
  29. /package/esm/typings/src/{personas/preparePersona.test.d.ts → collection/constructors/createCollectionFromDirectory.test.d.ts} +0 -0
package/README.md CHANGED
@@ -103,6 +103,10 @@ This will prettify all promptbooks in `promptbook` directory and adds Mermaid gr
103
103
 
104
104
  Rest of the documentation is common for **entire promptbook ecosystem**:
105
105
 
106
+ # ✨ New Features
107
+
108
+ - ✨ **Support [OpenAI o1 model](https://openai.com/o1/)**
109
+
106
110
  ## 🤍 The Promptbook Whitepaper
107
111
 
108
112
 
package/esm/index.es.js CHANGED
@@ -21,8 +21,8 @@ import OpenAI from 'openai';
21
21
  /**
22
22
  * The version of the Promptbook library
23
23
  */
24
- var PROMPTBOOK_VERSION = '0.69.0-13';
25
- // TODO: !!!! List here all the versions and annotate + put into script
24
+ var PROMPTBOOK_VERSION = '0.69.0-15';
25
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
26
26
 
27
27
  /*! *****************************************************************************
28
28
  Copyright (c) Microsoft Corporation.
@@ -357,7 +357,7 @@ function checkSerializableAsJson(name, value) {
357
357
  }
358
358
  /**
359
359
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
360
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
360
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
361
361
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
362
362
  */
363
363
 
@@ -682,7 +682,7 @@ function pipelineJsonToString(pipelineJson) {
682
682
  commands.push("PIPELINE URL ".concat(pipelineUrl));
683
683
  }
684
684
  commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
685
- // TODO: !!! This increase size of the bundle and is probbably not necessary
685
+ // TODO:[main] !!! This increase size of the bundle and is probbably not necessary
686
686
  pipelineString = prettifyMarkdown(pipelineString);
687
687
  try {
688
688
  for (var _g = __values(parameters.filter(function (_a) {
@@ -830,12 +830,12 @@ function pipelineJsonToString(pipelineJson) {
830
830
  pipelineString += '```' + contentLanguage;
831
831
  pipelineString += '\n';
832
832
  pipelineString += spaceTrim$1(content);
833
- // <- TODO: !!! Escape
833
+ // <- TODO:[main] !!! Escape
834
834
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
835
835
  pipelineString += '\n';
836
836
  pipelineString += '```';
837
837
  pipelineString += '\n\n';
838
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: !!! If the parameter here has description, add it and use templateParameterJsonToString
838
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO:[main] !!! If the parameter here has description, add it and use templateParameterJsonToString
839
839
  }
840
840
  }
841
841
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -1062,7 +1062,7 @@ function forEachAsync(array, options, callbackfunction) {
1062
1062
  });
1063
1063
  }
1064
1064
 
1065
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.69.0-13",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.69.0-13",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.69.0-13",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.69.0-13",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1065
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1066
1066
 
1067
1067
  /**
1068
1068
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1137,7 +1137,7 @@ function isValidPromptbookVersion(version) {
1137
1137
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
1138
1138
  return false;
1139
1139
  }
1140
- // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1140
+ // <- TODO:[main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1141
1141
  return true;
1142
1142
  }
1143
1143
 
@@ -1306,7 +1306,7 @@ function validatePipelineCore(pipeline) {
1306
1306
  // <- Note: [🚲]
1307
1307
  throw new PipelineLogicError(spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1308
1308
  }
1309
- if (!isValidPromptbookVersion(pipeline.promptbookVersion)) {
1309
+ if (pipeline.promptbookVersion !== undefined && !isValidPromptbookVersion(pipeline.promptbookVersion)) {
1310
1310
  // <- Note: [🚲]
1311
1311
  throw new PipelineLogicError(spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1312
1312
  }
@@ -1489,7 +1489,7 @@ function validatePipelineCore(pipeline) {
1489
1489
  }
1490
1490
  }
1491
1491
  /**
1492
- * TODO: !!!!!! [🧞‍♀️] Do not allow joker + foreach
1492
+ * TODO: !!!!! [🧞‍♀️] Do not allow joker + foreach
1493
1493
  * TODO: [🧠] Work with promptbookVersion
1494
1494
  * TODO: Use here some json-schema, Zod or something similar and change it to:
1495
1495
  * > /**
@@ -1501,11 +1501,11 @@ function validatePipelineCore(pipeline) {
1501
1501
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1502
1502
  */
1503
1503
  /**
1504
- * TODO: [🐣] !!!! Validate that all samples match expectations
1505
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1506
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1507
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1508
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1504
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
1505
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
1506
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
1507
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1508
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
1509
1509
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1510
1510
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1511
1511
  */
@@ -2233,7 +2233,7 @@ function isPipelinePrepared(pipeline) {
2233
2233
  return true;
2234
2234
  }
2235
2235
  /**
2236
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2236
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2237
2237
  * TODO: [🐠] Maybe base this on `makeValidator`
2238
2238
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2239
2239
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -2285,9 +2285,10 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2285
2285
  });
2286
2286
  Object.defineProperty(MultipleLlmExecutionTools.prototype, "description", {
2287
2287
  get: function () {
2288
- return this.llmExecutionTools
2289
- .map(function (tools, index) { return "".concat(index + 1, ") ").concat(tools.title, " ").concat(tools.description || ''); })
2290
- .join('\n');
2288
+ return this.llmExecutionTools.map(function (_a, index) {
2289
+ var title = _a.title;
2290
+ return "".concat(index + 1, ") `").concat(title, "`");
2291
+ }).join('\n');
2291
2292
  },
2292
2293
  enumerable: false,
2293
2294
  configurable: true
@@ -2485,7 +2486,7 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2485
2486
  throw new PipelineExecutionError("You have not provided any `LlmExecutionTools`");
2486
2487
  }
2487
2488
  else {
2488
- throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\"\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools.map(function (tools) { return "- ".concat(tools.title); }).join('\n')), "\n\n "); }));
2489
+ throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\"\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.description), "\n\n "); }));
2489
2490
  }
2490
2491
  }
2491
2492
  });
@@ -4643,7 +4644,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4643
4644
  outputParameters = result.outputParameters;
4644
4645
  knowledgePiecesRaw = outputParameters.knowledgePieces;
4645
4646
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
4646
- // <- TODO: !!!!! Smarter split and filter out empty pieces
4647
+ // <- TODO:[main] !!!!! Smarter split and filter out empty pieces
4647
4648
  if (isVerbose) {
4648
4649
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
4649
4650
  }
@@ -4728,7 +4729,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4728
4729
  });
4729
4730
  }
4730
4731
  /**
4731
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
4732
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
4732
4733
  * TODO: [🪂] Do it in parallel 11:11
4733
4734
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
4734
4735
  */
@@ -4752,7 +4753,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
4752
4753
  var partialPieces, pieces;
4753
4754
  return __generator(this, function (_a) {
4754
4755
  switch (_a.label) {
4755
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4756
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4756
4757
  options)];
4757
4758
  case 1:
4758
4759
  partialPieces = _a.sent();
@@ -4944,7 +4945,7 @@ function preparePersona(personaDescription, options) {
4944
4945
  });
4945
4946
  }
4946
4947
  /**
4947
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4948
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4948
4949
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
4949
4950
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
4950
4951
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -4993,7 +4994,7 @@ function prepareTemplates(pipeline, options) {
4993
4994
  case 0:
4994
4995
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
4995
4996
  templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
4996
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
4997
+ // TODO:[main] !!!!! Apply samples to each template (if missing and is for the template defined)
4997
4998
  TODO_USE(parameters);
4998
4999
  templatesPrepared = new Array(
4999
5000
  // <- TODO: [🧱] Implement in a functional (not new Class) way
@@ -5025,7 +5026,7 @@ function prepareTemplates(pipeline, options) {
5025
5026
  /**
5026
5027
  * TODO: [🧠] Add context to each template (if missing)
5027
5028
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
5028
- * TODO: [♨] !!! Prepare index the samples and maybe templates
5029
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
5029
5030
  * TODO: Write tests for `preparePipeline`
5030
5031
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
5031
5032
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -5197,7 +5198,7 @@ var knowledgeCommandParser = {
5197
5198
  if (sourceContent === '') {
5198
5199
  throw new ParseError("Source is not defined");
5199
5200
  }
5200
- // TODO: !!!! Following checks should be applied every link in the `sourceContent`
5201
+ // TODO:[main] !!!! Following checks should be applied every link in the `sourceContent`
5201
5202
  if (sourceContent.startsWith('http://')) {
5202
5203
  throw new ParseError("Source is not secure");
5203
5204
  }
@@ -5400,7 +5401,7 @@ var templateCommandParser = {
5400
5401
  if (command.templateType === 'KNOWLEDGE') {
5401
5402
  knowledgeCommandParser.$applyToPipelineJson({
5402
5403
  type: 'KNOWLEDGE',
5403
- sourceContent: $templateJson.content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5404
+ sourceContent: $templateJson.content, // <- TODO: [🐝][main] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5404
5405
  }, $pipelineJson);
5405
5406
  $templateJson.isTemplate = false;
5406
5407
  return;
@@ -5918,7 +5919,7 @@ var foreachCommandParser = {
5918
5919
  /**
5919
5920
  * Link to discussion
5920
5921
  */
5921
- documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
5922
+ documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/148',
5922
5923
  /**
5923
5924
  * Example usages of the FOREACH command
5924
5925
  */
@@ -5926,7 +5927,6 @@ var foreachCommandParser = {
5926
5927
  'FOREACH Text Line `{customers}` -> `{customer}`',
5927
5928
  'FOR Csv Row `{customers}` -> `{firstName}`, `{lastName}`',
5928
5929
  'EACH Csv Cell `{customers}` -> `{cell}`',
5929
- // <- TODO: [🍭] !!!!!! More
5930
5930
  ],
5931
5931
  /**
5932
5932
  * Parses the FOREACH command
@@ -5941,7 +5941,6 @@ var foreachCommandParser = {
5941
5941
  return __spreadArray([formatDefinition.formatName], __read((formatDefinition.aliases || [])), false).includes(formatName);
5942
5942
  });
5943
5943
  if (formatDefinition === undefined) {
5944
- console.info({ args: args, formatName: formatName });
5945
5944
  throw new ParseError(spaceTrim$1(function (block) { return "\n Unsupported format \"".concat(formatName, "\"\n\n Available formats:\n ").concat(block(FORMAT_DEFINITIONS.map(function (formatDefinition) { return formatDefinition.formatName; })
5946
5945
  .map(function (formatName) { return "- ".concat(formatName); })
5947
5946
  .join('\n')), "\n "); }));
@@ -5951,7 +5950,6 @@ var foreachCommandParser = {
5951
5950
  return __spreadArray([subvalueDefinition.subvalueName], __read((subvalueDefinition.aliases || [])), false).includes(cellName);
5952
5951
  });
5953
5952
  if (subvalueDefinition === undefined) {
5954
- console.info({ args: args, cellName: cellName });
5955
5953
  throw new ParseError(spaceTrim$1(function (block) { return "\n Unsupported cell name \"".concat(cellName, "\" for format \"").concat(formatName, "\"\n\n Available cell names for format \"").concat(formatDefinition.formatName, "\":\n ").concat(block(formatDefinition.subvalueDefinitions
5956
5954
  .map(function (subvalueDefinition) { return subvalueDefinition.subvalueName; })
5957
5955
  .map(function (subvalueName) { return "- ".concat(subvalueName); })
@@ -5959,14 +5957,12 @@ var foreachCommandParser = {
5959
5957
  // <- TODO: [🏢] List all supported cell names for the format
5960
5958
  }
5961
5959
  if (assignSign !== '->') {
5962
- console.info({ args: args, assignSign: assignSign });
5963
5960
  throw new ParseError("FOREACH command must have '->' to assign the value to the parameter");
5964
5961
  }
5965
- // TODO: !!!!!! Replace with propper parameter name validation `validateParameterName`
5962
+ // TODO: !!! Replace with propper parameter name validation `validateParameterName`
5966
5963
  if ((parameterNameWrapped === null || parameterNameWrapped === void 0 ? void 0 : parameterNameWrapped.substring(0, 1)) !== '{' ||
5967
5964
  (parameterNameWrapped === null || parameterNameWrapped === void 0 ? void 0 : parameterNameWrapped.substring(parameterNameWrapped.length - 1, parameterNameWrapped.length)) !== '}') {
5968
- console.info({ args: args, parameterNameWrapped: parameterNameWrapped }, parameterNameWrapped === null || parameterNameWrapped === void 0 ? void 0 : parameterNameWrapped.substring(0, 1), parameterNameWrapped === null || parameterNameWrapped === void 0 ? void 0 : parameterNameWrapped.substring(parameterNameWrapped.length - 1, parameterNameWrapped.length));
5969
- throw new ParseError("!!!!!! 1 Here will be error (with rules and precise error) from validateParameterName");
5965
+ throw new ParseError("Invalid parameter name \"".concat(parameterNameWrapped, "\" - must be wrapped in curly brackets: {parameterName}"));
5970
5966
  }
5971
5967
  var parameterName = parameterNameWrapped.substring(1, parameterNameWrapped.length - 1);
5972
5968
  var subparameterNames = args
@@ -5995,7 +5991,7 @@ var foreachCommandParser = {
5995
5991
  // TODO: !!!!!! Detect double use
5996
5992
  // TODO: !!!!!! Detect usage with JOKER and don't allow it
5997
5993
  $templateJson.foreach = { formatName: formatName, cellName: cellName, parameterName: parameterName, subparameterNames: subparameterNames };
5998
- keepUnused($pipelineJson); // <- TODO: !!!!!! BUT Maybe register subparameter from foreach into parameters of the pipeline
5994
+ keepUnused($pipelineJson); // <- TODO: [🧠] Maybe register subparameter from foreach into parameters of the pipeline
5999
5995
  // Note: [🍭] FOREACH apply has some sideeffects on different places in codebase
6000
5996
  },
6001
5997
  /**
@@ -6018,7 +6014,6 @@ var foreachCommandParser = {
6018
6014
  },
6019
6015
  };
6020
6016
  /**
6021
- * TODO: !!!!!! Remove console logs
6022
6017
  * TODO: [🧠][🦥] Better (less confusing) name for "cell" / "subvalue" / "subparameter"
6023
6018
  * TODO: [🍭] !!!!!! Make .ptbk.md file with examples of the FOREACH command and also with wrong parsing and logic
6024
6019
  */
@@ -6703,6 +6698,7 @@ var promptbookVersionCommandParser = {
6703
6698
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
6704
6699
  */
6705
6700
  $applyToPipelineJson: function (command, $pipelineJson) {
6701
+ // TODO: Warn if the version is overridden
6706
6702
  $pipelineJson.promptbookVersion = command.promptbookVersion;
6707
6703
  },
6708
6704
  /**
@@ -7523,7 +7519,7 @@ function pipelineStringToJsonSync(pipelineString) {
7523
7519
  var $pipelineJson = {
7524
7520
  title: undefined /* <- Note: [🍙] Putting here placeholder to keep `title` on top at final JSON */,
7525
7521
  pipelineUrl: undefined /* <- Note: Putting here placeholder to keep `pipelineUrl` on top at final JSON */,
7526
- promptbookVersion: PROMPTBOOK_VERSION,
7522
+ promptbookVersion: undefined /* <- Note: By default no explicit version */,
7527
7523
  description: undefined /* <- Note: [🍙] Putting here placeholder to keep `description` on top at final JSON */,
7528
7524
  parameters: [],
7529
7525
  templates: [],
@@ -7814,7 +7810,7 @@ function pipelineStringToJsonSync(pipelineString) {
7814
7810
  return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
7815
7811
  }
7816
7812
  /**
7817
- * TODO: !!!! Warn if used only sync version
7813
+ * TODO:[main] !!!! Warn if used only sync version
7818
7814
  * TODO: [🚞] Report here line/column of error
7819
7815
  * TODO: Use spaceTrim more effectively
7820
7816
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -8319,7 +8315,7 @@ function isSerializableAsJson(value) {
8319
8315
  }
8320
8316
  }
8321
8317
  /**
8322
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
8318
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
8323
8319
  * TODO: [🧠][💺] Can be done this on type-level?
8324
8320
  */
8325
8321
 
@@ -9180,7 +9176,7 @@ function initializeMakeCommand(program) {
9180
9176
  });
9181
9177
  }
9182
9178
  /**
9183
- * TODO: [🥃] !!! Allow `ptbk make` without configuring any llm tools
9179
+ * TODO: [🥃][main] !!! Allow `ptbk make` without configuring any llm tools
9184
9180
  * TODO: Maybe remove this command - "about" command should be enough?
9185
9181
  * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types
9186
9182
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -9598,7 +9594,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
9598
9594
  socket.on('connect', function () {
9599
9595
  resolve(socket);
9600
9596
  });
9601
- // TODO: !!!! Better timeout handling
9597
+ // TODO:[main] !!!! Better timeout handling
9602
9598
  setTimeout(function () {
9603
9599
  reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
9604
9600
  }, CONNECTION_TIMEOUT_MS);
@@ -9778,11 +9774,11 @@ var ANTHROPIC_CLAUDE_MODELS = $asDeeplyFrozenSerializableJson('ANTHROPIC_CLAUDE_
9778
9774
  output: computeUsage("$2.40 / 1M tokens"),
9779
9775
  },
9780
9776
  },
9781
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
9777
+ // TODO:[main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
9782
9778
  ]);
9783
9779
  /**
9784
9780
  * Note: [🤖] Add models of new variant
9785
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
9781
+ * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
9786
9782
  * TODO: [🧠] Some mechanism to propagate unsureness
9787
9783
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
9788
9784
  * TODO: [🎰] Some mechanism to auto-update available models
@@ -10138,8 +10134,8 @@ var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
10138
10134
  className: 'AnthropicClaudeExecutionTools',
10139
10135
  });
10140
10136
  /**
10141
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
10142
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
10137
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
10138
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
10143
10139
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
10144
10140
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
10145
10141
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
@@ -10517,6 +10513,7 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
10517
10513
  prompt: computeUsage("$5.00 / 1M tokens"),
10518
10514
  output: computeUsage("$15.00 / 1M tokens"),
10519
10515
  },
10516
+ //TODO:[main] !!!!!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
10520
10517
  },
10521
10518
  /**/
10522
10519
  /**/
@@ -10531,6 +10528,51 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
10531
10528
  },
10532
10529
  /**/
10533
10530
  /**/
10531
+ {
10532
+ modelVariant: 'CHAT',
10533
+ modelTitle: 'o1-preview',
10534
+ modelName: 'o1-preview',
10535
+ pricing: {
10536
+ prompt: computeUsage("$15.00 / 1M tokens"),
10537
+ output: computeUsage("$60.00 / 1M tokens"),
10538
+ },
10539
+ },
10540
+ /**/
10541
+ /**/
10542
+ {
10543
+ modelVariant: 'CHAT',
10544
+ modelTitle: 'o1-preview-2024-09-12',
10545
+ modelName: 'o1-preview-2024-09-12',
10546
+ // <- TODO:[main] !!!!!! Some better system to organize theese date suffixes and versions
10547
+ pricing: {
10548
+ prompt: computeUsage("$15.00 / 1M tokens"),
10549
+ output: computeUsage("$60.00 / 1M tokens"),
10550
+ },
10551
+ },
10552
+ /**/
10553
+ /**/
10554
+ {
10555
+ modelVariant: 'CHAT',
10556
+ modelTitle: 'o1-mini',
10557
+ modelName: 'o1-mini',
10558
+ pricing: {
10559
+ prompt: computeUsage("$3.00 / 1M tokens"),
10560
+ output: computeUsage("$12.00 / 1M tokens"),
10561
+ },
10562
+ },
10563
+ /**/
10564
+ /**/
10565
+ {
10566
+ modelVariant: 'CHAT',
10567
+ modelTitle: 'o1-mini-2024-09-12',
10568
+ modelName: 'o1-mini-2024-09-12',
10569
+ pricing: {
10570
+ prompt: computeUsage("$3.00 / 1M tokens"),
10571
+ output: computeUsage("$12.00 / 1M tokens"),
10572
+ },
10573
+ },
10574
+ /**/
10575
+ /**/
10534
10576
  {
10535
10577
  modelVariant: 'CHAT',
10536
10578
  modelTitle: 'gpt-3.5-turbo-16k-0613',
@@ -10619,7 +10661,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
10619
10661
  AzureOpenAiExecutionTools.prototype.listModels = function () {
10620
10662
  return __awaiter(this, void 0, void 0, function () {
10621
10663
  return __generator(this, function (_a) {
10622
- // TODO: !!! Do here some filtering which models are really available as deployment
10664
+ // TODO:[main] !!! Do here some filtering which models are really available as deployment
10623
10665
  // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
10624
10666
  return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
10625
10667
  var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
@@ -11310,7 +11352,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
11310
11352
  * @public exported from `@promptbook/openai`
11311
11353
  */
11312
11354
  var createOpenAiExecutionTools = Object.assign(function (options) {
11313
- // TODO: [🧠] !!!! If browser, auto add `dangerouslyAllowBrowser`
11355
+ // TODO: [🧠][main] !!!! If browser, auto add `dangerouslyAllowBrowser`
11314
11356
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
11315
11357
  options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
11316
11358
  }