@promptbook/cli 0.68.3 → 0.68.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +83 -36
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  6. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  7. package/esm/typings/src/config.d.ts +2 -2
  8. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  9. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  10. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  11. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  12. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -1
  15. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +2 -1
  20. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  23. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  24. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  25. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  26. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  27. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  28. package/package.json +1 -1
  29. package/umd/index.umd.js +83 -36
  30. package/umd/index.umd.js.map +1 -1
  31. package/esm/typings/src/personas/preparePersona.test.d.ts +0 -1
package/README.md CHANGED
@@ -103,6 +103,10 @@ This will prettify all promptbooks in `promptbook` directory and adds Mermaid gr
103
103
 
104
104
  Rest of the documentation is common for **entire promptbook ecosystem**:
105
105
 
106
+ # ✨ New Features
107
+
108
+ - ✨ **Support [OpenAI o1 model](https://openai.com/o1/)**
109
+
106
110
  ## 🤍 The Promptbook Whitepaper
107
111
 
108
112
 
package/esm/index.es.js CHANGED
@@ -20,8 +20,8 @@ import OpenAI from 'openai';
20
20
  /**
21
21
  * The version of the Promptbook library
22
22
  */
23
- var PROMPTBOOK_VERSION = '0.68.2';
24
- // TODO: !!!! List here all the versions and annotate + put into script
23
+ var PROMPTBOOK_VERSION = '0.68.4';
24
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
25
25
 
26
26
  /*! *****************************************************************************
27
27
  Copyright (c) Microsoft Corporation.
@@ -356,7 +356,7 @@ function checkSerializableAsJson(name, value) {
356
356
  }
357
357
  /**
358
358
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
359
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
359
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
360
360
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
361
361
  */
362
362
 
@@ -660,7 +660,7 @@ function pipelineJsonToString(pipelineJson) {
660
660
  commands.push("PIPELINE URL ".concat(pipelineUrl));
661
661
  }
662
662
  commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
663
- // TODO: !!! This increase size of the bundle and is probbably not necessary
663
+ // TODO:[main] !!! This increase size of the bundle and is probbably not necessary
664
664
  pipelineString = prettifyMarkdown(pipelineString);
665
665
  try {
666
666
  for (var _g = __values(parameters.filter(function (_a) {
@@ -808,12 +808,12 @@ function pipelineJsonToString(pipelineJson) {
808
808
  pipelineString += '```' + contentLanguage;
809
809
  pipelineString += '\n';
810
810
  pipelineString += spaceTrim$1(content);
811
- // <- TODO: !!! Escape
811
+ // <- TODO:[main] !!! Escape
812
812
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
813
813
  pipelineString += '\n';
814
814
  pipelineString += '```';
815
815
  pipelineString += '\n\n';
816
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: !!! If the parameter here has description, add it and use templateParameterJsonToString
816
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO:[main] !!! If the parameter here has description, add it and use templateParameterJsonToString
817
817
  }
818
818
  }
819
819
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -1040,7 +1040,7 @@ function forEachAsync(array, options, callbackfunction) {
1040
1040
  });
1041
1041
  }
1042
1042
 
1043
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1043
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1044
1044
 
1045
1045
  /**
1046
1046
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1115,7 +1115,7 @@ function isValidPromptbookVersion(version) {
1115
1115
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
1116
1116
  return false;
1117
1117
  }
1118
- // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1118
+ // <- TODO:[main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1119
1119
  return true;
1120
1120
  }
1121
1121
 
@@ -1264,7 +1264,7 @@ function validatePipeline(pipeline) {
1264
1264
  // <- Note: [🚲]
1265
1265
  throw new PipelineLogicError(spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1266
1266
  }
1267
- if (!isValidPromptbookVersion(pipeline.promptbookVersion)) {
1267
+ if (pipeline.promptbookVersion !== undefined && !isValidPromptbookVersion(pipeline.promptbookVersion)) {
1268
1268
  // <- Note: [🚲]
1269
1269
  throw new PipelineLogicError(spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1270
1270
  }
@@ -1459,11 +1459,11 @@ function validatePipeline(pipeline) {
1459
1459
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1460
1460
  */
1461
1461
  /**
1462
- * TODO: [🐣] !!!! Validate that all samples match expectations
1463
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1464
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1465
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1466
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1462
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
1463
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
1464
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
1465
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1466
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
1467
1467
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1468
1468
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1469
1469
  */
@@ -2748,7 +2748,7 @@ function isPipelinePrepared(pipeline) {
2748
2748
  return true;
2749
2749
  }
2750
2750
  /**
2751
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2751
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2752
2752
  * TODO: [🐠] Maybe base this on `makeValidator`
2753
2753
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2754
2754
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -3127,7 +3127,7 @@ function createPipelineExecutor(options) {
3127
3127
  console.warn(spaceTrim(function (block) { return "\n Pipeline is not prepared\n\n ".concat(block(pipelineIdentification), "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n "); }));
3128
3128
  }
3129
3129
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
3130
- // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3130
+ // TODO:[main] !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3131
3131
  function getContextForTemplate(template) {
3132
3132
  return __awaiter(this, void 0, void 0, function () {
3133
3133
  return __generator(this, function (_a) {
@@ -3946,7 +3946,7 @@ function createPipelineExecutor(options) {
3946
3946
  return pipelineExecutor;
3947
3947
  }
3948
3948
  /**
3949
- * TODO: !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3949
+ * TODO:[main] !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3950
3950
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3951
3951
  * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3952
3952
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
@@ -4009,7 +4009,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4009
4009
  outputParameters = result.outputParameters;
4010
4010
  knowledgePiecesRaw = outputParameters.knowledgePieces;
4011
4011
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
4012
- // <- TODO: !!!!! Smarter split and filter out empty pieces
4012
+ // <- TODO:[main] !!!!! Smarter split and filter out empty pieces
4013
4013
  if (isVerbose) {
4014
4014
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
4015
4015
  }
@@ -4089,7 +4089,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4089
4089
  });
4090
4090
  }
4091
4091
  /**
4092
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
4092
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
4093
4093
  * TODO: [🪂] Do it in parallel 11:11
4094
4094
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
4095
4095
  */
@@ -4113,7 +4113,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
4113
4113
  var partialPieces, pieces;
4114
4114
  return __generator(this, function (_a) {
4115
4115
  switch (_a.label) {
4116
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4116
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4117
4117
  options)];
4118
4118
  case 1:
4119
4119
  partialPieces = _a.sent();
@@ -4305,7 +4305,7 @@ function preparePersona(personaDescription, options) {
4305
4305
  });
4306
4306
  }
4307
4307
  /**
4308
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4308
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4309
4309
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
4310
4310
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
4311
4311
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -4354,7 +4354,7 @@ function prepareTemplates(pipeline, options) {
4354
4354
  case 0:
4355
4355
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
4356
4356
  templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
4357
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
4357
+ // TODO:[main] !!!!! Apply samples to each template (if missing and is for the template defined)
4358
4358
  TODO_USE(parameters);
4359
4359
  templatesPrepared = new Array(
4360
4360
  // <- TODO: [🧱] Implement in a functional (not new Class) way
@@ -4386,7 +4386,7 @@ function prepareTemplates(pipeline, options) {
4386
4386
  /**
4387
4387
  * TODO: [🧠] Add context to each template (if missing)
4388
4388
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
4389
- * TODO: [♨] !!! Prepare index the samples and maybe templates
4389
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
4390
4390
  * TODO: Write tests for `preparePipeline`
4391
4391
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
4392
4392
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -4558,7 +4558,7 @@ var knowledgeCommandParser = {
4558
4558
  if (sourceContent === '') {
4559
4559
  throw new ParseError("Source is not defined");
4560
4560
  }
4561
- // TODO: !!!! Following checks should be applied every link in the `sourceContent`
4561
+ // TODO:[main] !!!! Following checks should be applied every link in the `sourceContent`
4562
4562
  if (sourceContent.startsWith('http://')) {
4563
4563
  throw new ParseError("Source is not secure");
4564
4564
  }
@@ -4761,7 +4761,7 @@ var templateCommandParser = {
4761
4761
  if (command.templateType === 'KNOWLEDGE') {
4762
4762
  knowledgeCommandParser.$applyToPipelineJson({
4763
4763
  type: 'KNOWLEDGE',
4764
- sourceContent: $templateJson.content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4764
+ sourceContent: $templateJson.content, // <- TODO: [🐝][main] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4765
4765
  }, $pipelineJson);
4766
4766
  $templateJson.isTemplate = false;
4767
4767
  return;
@@ -5803,6 +5803,7 @@ var promptbookVersionCommandParser = {
5803
5803
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
5804
5804
  */
5805
5805
  $applyToPipelineJson: function (command, $pipelineJson) {
5806
+ // TODO: Warn if the version is overridden
5806
5807
  $pipelineJson.promptbookVersion = command.promptbookVersion;
5807
5808
  },
5808
5809
  /**
@@ -6692,7 +6693,7 @@ function pipelineStringToJsonSync(pipelineString) {
6692
6693
  var $pipelineJson = {
6693
6694
  title: undefined /* <- Note: [🍙] Putting here placeholder to keep `title` on top at final JSON */,
6694
6695
  pipelineUrl: undefined /* <- Note: Putting here placeholder to keep `pipelineUrl` on top at final JSON */,
6695
- promptbookVersion: PROMPTBOOK_VERSION,
6696
+ promptbookVersion: undefined /* <- Note: By default no explicit version */,
6696
6697
  description: undefined /* <- Note: [🍙] Putting here placeholder to keep `description` on top at final JSON */,
6697
6698
  parameters: [],
6698
6699
  templates: [],
@@ -6983,7 +6984,7 @@ function pipelineStringToJsonSync(pipelineString) {
6983
6984
  return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
6984
6985
  }
6985
6986
  /**
6986
- * TODO: !!!! Warn if used only sync version
6987
+ * TODO:[main] !!!! Warn if used only sync version
6987
6988
  * TODO: [🚞] Report here line/column of error
6988
6989
  * TODO: Use spaceTrim more effectively
6989
6990
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -7488,7 +7489,7 @@ function isSerializableAsJson(value) {
7488
7489
  }
7489
7490
  }
7490
7491
  /**
7491
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
7492
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
7492
7493
  * TODO: [🧠][💺] Can be done this on type-level?
7493
7494
  */
7494
7495
 
@@ -8329,7 +8330,7 @@ function initializeMakeCommand(program) {
8329
8330
  });
8330
8331
  }
8331
8332
  /**
8332
- * TODO: [🥃] !!! Allow `ptbk make` without configuring any llm tools
8333
+ * TODO: [🥃][main] !!! Allow `ptbk make` without configuring any llm tools
8333
8334
  * TODO: Maybe remove this command - "about" command should be enough?
8334
8335
  * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types
8335
8336
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -8808,7 +8809,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
8808
8809
  socket.on('connect', function () {
8809
8810
  resolve(socket);
8810
8811
  });
8811
- // TODO: !!!! Better timeout handling
8812
+ // TODO:[main] !!!! Better timeout handling
8812
8813
  setTimeout(function () {
8813
8814
  reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
8814
8815
  }, CONNECTION_TIMEOUT_MS);
@@ -8988,11 +8989,11 @@ var ANTHROPIC_CLAUDE_MODELS = $asDeeplyFrozenSerializableJson('ANTHROPIC_CLAUDE_
8988
8989
  output: computeUsage("$2.40 / 1M tokens"),
8989
8990
  },
8990
8991
  },
8991
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
8992
+ // TODO:[main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
8992
8993
  ]);
8993
8994
  /**
8994
8995
  * Note: [🤖] Add models of new variant
8995
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
8996
+ * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
8996
8997
  * TODO: [🧠] Some mechanism to propagate unsureness
8997
8998
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
8998
8999
  * TODO: [🎰] Some mechanism to auto-update available models
@@ -9348,8 +9349,8 @@ var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
9348
9349
  className: 'AnthropicClaudeExecutionTools',
9349
9350
  });
9350
9351
  /**
9351
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
9352
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
9352
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
9353
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
9353
9354
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
9354
9355
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
9355
9356
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
@@ -9727,6 +9728,7 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
9727
9728
  prompt: computeUsage("$5.00 / 1M tokens"),
9728
9729
  output: computeUsage("$15.00 / 1M tokens"),
9729
9730
  },
9731
+ //TODO:[main] !!!!!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
9730
9732
  },
9731
9733
  /**/
9732
9734
  /**/
@@ -9741,6 +9743,51 @@ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
9741
9743
  },
9742
9744
  /**/
9743
9745
  /**/
9746
+ {
9747
+ modelVariant: 'CHAT',
9748
+ modelTitle: 'o1-preview',
9749
+ modelName: 'o1-preview',
9750
+ pricing: {
9751
+ prompt: computeUsage("$15.00 / 1M tokens"),
9752
+ output: computeUsage("$60.00 / 1M tokens"),
9753
+ },
9754
+ },
9755
+ /**/
9756
+ /**/
9757
+ {
9758
+ modelVariant: 'CHAT',
9759
+ modelTitle: 'o1-preview-2024-09-12',
9760
+ modelName: 'o1-preview-2024-09-12',
9761
+ // <- TODO:[main] !!!!!! Some better system to organize theese date suffixes and versions
9762
+ pricing: {
9763
+ prompt: computeUsage("$15.00 / 1M tokens"),
9764
+ output: computeUsage("$60.00 / 1M tokens"),
9765
+ },
9766
+ },
9767
+ /**/
9768
+ /**/
9769
+ {
9770
+ modelVariant: 'CHAT',
9771
+ modelTitle: 'o1-mini',
9772
+ modelName: 'o1-mini',
9773
+ pricing: {
9774
+ prompt: computeUsage("$3.00 / 1M tokens"),
9775
+ output: computeUsage("$12.00 / 1M tokens"),
9776
+ },
9777
+ },
9778
+ /**/
9779
+ /**/
9780
+ {
9781
+ modelVariant: 'CHAT',
9782
+ modelTitle: 'o1-mini-2024-09-12',
9783
+ modelName: 'o1-mini-2024-09-12',
9784
+ pricing: {
9785
+ prompt: computeUsage("$3.00 / 1M tokens"),
9786
+ output: computeUsage("$12.00 / 1M tokens"),
9787
+ },
9788
+ },
9789
+ /**/
9790
+ /**/
9744
9791
  {
9745
9792
  modelVariant: 'CHAT',
9746
9793
  modelTitle: 'gpt-3.5-turbo-16k-0613',
@@ -9829,7 +9876,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
9829
9876
  AzureOpenAiExecutionTools.prototype.listModels = function () {
9830
9877
  return __awaiter(this, void 0, void 0, function () {
9831
9878
  return __generator(this, function (_a) {
9832
- // TODO: !!! Do here some filtering which models are really available as deployment
9879
+ // TODO:[main] !!! Do here some filtering which models are really available as deployment
9833
9880
  // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
9834
9881
  return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
9835
9882
  var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
@@ -10518,7 +10565,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
10518
10565
  * @public exported from `@promptbook/openai`
10519
10566
  */
10520
10567
  var createOpenAiExecutionTools = Object.assign(function (options) {
10521
- // TODO: [🧠] !!!! If browser, auto add `dangerouslyAllowBrowser`
10568
+ // TODO: [🧠][main] !!!! If browser, auto add `dangerouslyAllowBrowser`
10522
10569
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
10523
10570
  options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
10524
10571
  }