@promptbook/cli 0.68.3 → 0.68.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +83 -36
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  6. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  7. package/esm/typings/src/config.d.ts +2 -2
  8. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  9. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  10. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  11. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  12. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -1
  15. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +2 -1
  20. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  23. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  24. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  25. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  26. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  27. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  28. package/package.json +1 -1
  29. package/umd/index.umd.js +83 -36
  30. package/umd/index.umd.js.map +1 -1
  31. package/esm/typings/src/personas/preparePersona.test.d.ts +0 -1
package/umd/index.umd.js CHANGED
@@ -39,8 +39,8 @@
39
39
  /**
40
40
  * The version of the Promptbook library
41
41
  */
42
- var PROMPTBOOK_VERSION = '0.68.2';
43
- // TODO: !!!! List here all the versions and annotate + put into script
42
+ var PROMPTBOOK_VERSION = '0.68.4';
43
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
44
44
 
45
45
  /*! *****************************************************************************
46
46
  Copyright (c) Microsoft Corporation.
@@ -375,7 +375,7 @@
375
375
  }
376
376
  /**
377
377
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
378
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
378
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
379
379
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
380
380
  */
381
381
 
@@ -679,7 +679,7 @@
679
679
  commands.push("PIPELINE URL ".concat(pipelineUrl));
680
680
  }
681
681
  commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
682
- // TODO: !!! This increase size of the bundle and is probbably not necessary
682
+ // TODO:[main] !!! This increase size of the bundle and is probbably not necessary
683
683
  pipelineString = prettifyMarkdown(pipelineString);
684
684
  try {
685
685
  for (var _g = __values(parameters.filter(function (_a) {
@@ -827,12 +827,12 @@
827
827
  pipelineString += '```' + contentLanguage;
828
828
  pipelineString += '\n';
829
829
  pipelineString += spaceTrim__default["default"](content);
830
- // <- TODO: !!! Escape
830
+ // <- TODO:[main] !!! Escape
831
831
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
832
832
  pipelineString += '\n';
833
833
  pipelineString += '```';
834
834
  pipelineString += '\n\n';
835
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: !!! If the parameter here has description, add it and use templateParameterJsonToString
835
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO:[main] !!! If the parameter here has description, add it and use templateParameterJsonToString
836
836
  }
837
837
  }
838
838
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -1059,7 +1059,7 @@
1059
1059
  });
1060
1060
  }
1061
1061
 
1062
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1062
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1063
1063
 
1064
1064
  /**
1065
1065
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1134,7 +1134,7 @@
1134
1134
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
1135
1135
  return false;
1136
1136
  }
1137
- // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1137
+ // <- TODO:[main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
1138
1138
  return true;
1139
1139
  }
1140
1140
 
@@ -1283,7 +1283,7 @@
1283
1283
  // <- Note: [🚲]
1284
1284
  throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1285
1285
  }
1286
- if (!isValidPromptbookVersion(pipeline.promptbookVersion)) {
1286
+ if (pipeline.promptbookVersion !== undefined && !isValidPromptbookVersion(pipeline.promptbookVersion)) {
1287
1287
  // <- Note: [🚲]
1288
1288
  throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1289
1289
  }
@@ -1478,11 +1478,11 @@
1478
1478
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1479
1479
  */
1480
1480
  /**
1481
- * TODO: [🐣] !!!! Validate that all samples match expectations
1482
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1483
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1484
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1485
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1481
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
1482
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
1483
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
1484
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1485
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
1486
1486
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1487
1487
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1488
1488
  */
@@ -2767,7 +2767,7 @@
2767
2767
  return true;
2768
2768
  }
2769
2769
  /**
2770
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2770
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2771
2771
  * TODO: [🐠] Maybe base this on `makeValidator`
2772
2772
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2773
2773
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -3146,7 +3146,7 @@
3146
3146
  console.warn(spaceTrim.spaceTrim(function (block) { return "\n Pipeline is not prepared\n\n ".concat(block(pipelineIdentification), "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n "); }));
3147
3147
  }
3148
3148
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
3149
- // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3149
+ // TODO:[main] !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3150
3150
  function getContextForTemplate(template) {
3151
3151
  return __awaiter(this, void 0, void 0, function () {
3152
3152
  return __generator(this, function (_a) {
@@ -3965,7 +3965,7 @@
3965
3965
  return pipelineExecutor;
3966
3966
  }
3967
3967
  /**
3968
- * TODO: !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3968
+ * TODO:[main] !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3969
3969
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3970
3970
  * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3971
3971
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
@@ -4028,7 +4028,7 @@
4028
4028
  outputParameters = result.outputParameters;
4029
4029
  knowledgePiecesRaw = outputParameters.knowledgePieces;
4030
4030
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
4031
- // <- TODO: !!!!! Smarter split and filter out empty pieces
4031
+ // <- TODO:[main] !!!!! Smarter split and filter out empty pieces
4032
4032
  if (isVerbose) {
4033
4033
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
4034
4034
  }
@@ -4108,7 +4108,7 @@
4108
4108
  });
4109
4109
  }
4110
4110
  /**
4111
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
4111
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
4112
4112
  * TODO: [🪂] Do it in parallel 11:11
4113
4113
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
4114
4114
  */
@@ -4132,7 +4132,7 @@
4132
4132
  var partialPieces, pieces;
4133
4133
  return __generator(this, function (_a) {
4134
4134
  switch (_a.label) {
4135
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4135
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4136
4136
  options)];
4137
4137
  case 1:
4138
4138
  partialPieces = _a.sent();
@@ -4324,7 +4324,7 @@
4324
4324
  });
4325
4325
  }
4326
4326
  /**
4327
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4327
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4328
4328
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
4329
4329
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
4330
4330
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -4373,7 +4373,7 @@
4373
4373
  case 0:
4374
4374
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
4375
4375
  templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
4376
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
4376
+ // TODO:[main] !!!!! Apply samples to each template (if missing and is for the template defined)
4377
4377
  TODO_USE(parameters);
4378
4378
  templatesPrepared = new Array(
4379
4379
  // <- TODO: [🧱] Implement in a functional (not new Class) way
@@ -4405,7 +4405,7 @@
4405
4405
  /**
4406
4406
  * TODO: [🧠] Add context to each template (if missing)
4407
4407
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
4408
- * TODO: [♨] !!! Prepare index the samples and maybe templates
4408
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
4409
4409
  * TODO: Write tests for `preparePipeline`
4410
4410
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
4411
4411
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -4577,7 +4577,7 @@
4577
4577
  if (sourceContent === '') {
4578
4578
  throw new ParseError("Source is not defined");
4579
4579
  }
4580
- // TODO: !!!! Following checks should be applied every link in the `sourceContent`
4580
+ // TODO:[main] !!!! Following checks should be applied every link in the `sourceContent`
4581
4581
  if (sourceContent.startsWith('http://')) {
4582
4582
  throw new ParseError("Source is not secure");
4583
4583
  }
@@ -4780,7 +4780,7 @@
4780
4780
  if (command.templateType === 'KNOWLEDGE') {
4781
4781
  knowledgeCommandParser.$applyToPipelineJson({
4782
4782
  type: 'KNOWLEDGE',
4783
- sourceContent: $templateJson.content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4783
+ sourceContent: $templateJson.content, // <- TODO: [🐝][main] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4784
4784
  }, $pipelineJson);
4785
4785
  $templateJson.isTemplate = false;
4786
4786
  return;
@@ -5822,6 +5822,7 @@
5822
5822
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
5823
5823
  */
5824
5824
  $applyToPipelineJson: function (command, $pipelineJson) {
5825
+ // TODO: Warn if the version is overridden
5825
5826
  $pipelineJson.promptbookVersion = command.promptbookVersion;
5826
5827
  },
5827
5828
  /**
@@ -6711,7 +6712,7 @@
6711
6712
  var $pipelineJson = {
6712
6713
  title: undefined /* <- Note: [🍙] Putting here placeholder to keep `title` on top at final JSON */,
6713
6714
  pipelineUrl: undefined /* <- Note: Putting here placeholder to keep `pipelineUrl` on top at final JSON */,
6714
- promptbookVersion: PROMPTBOOK_VERSION,
6715
+ promptbookVersion: undefined /* <- Note: By default no explicit version */,
6715
6716
  description: undefined /* <- Note: [🍙] Putting here placeholder to keep `description` on top at final JSON */,
6716
6717
  parameters: [],
6717
6718
  templates: [],
@@ -7002,7 +7003,7 @@
7002
7003
  return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
7003
7004
  }
7004
7005
  /**
7005
- * TODO: !!!! Warn if used only sync version
7006
+ * TODO:[main] !!!! Warn if used only sync version
7006
7007
  * TODO: [🚞] Report here line/column of error
7007
7008
  * TODO: Use spaceTrim more effectively
7008
7009
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -7507,7 +7508,7 @@
7507
7508
  }
7508
7509
  }
7509
7510
  /**
7510
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
7511
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
7511
7512
  * TODO: [🧠][💺] Can be done this on type-level?
7512
7513
  */
7513
7514
 
@@ -8348,7 +8349,7 @@
8348
8349
  });
8349
8350
  }
8350
8351
  /**
8351
- * TODO: [🥃] !!! Allow `ptbk make` without configuring any llm tools
8352
+ * TODO: [🥃][main] !!! Allow `ptbk make` without configuring any llm tools
8352
8353
  * TODO: Maybe remove this command - "about" command should be enough?
8353
8354
  * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types
8354
8355
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -8827,7 +8828,7 @@
8827
8828
  socket.on('connect', function () {
8828
8829
  resolve(socket);
8829
8830
  });
8830
- // TODO: !!!! Better timeout handling
8831
+ // TODO:[main] !!!! Better timeout handling
8831
8832
  setTimeout(function () {
8832
8833
  reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
8833
8834
  }, CONNECTION_TIMEOUT_MS);
@@ -9007,11 +9008,11 @@
9007
9008
  output: computeUsage("$2.40 / 1M tokens"),
9008
9009
  },
9009
9010
  },
9010
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
9011
+ // TODO:[main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
9011
9012
  ]);
9012
9013
  /**
9013
9014
  * Note: [🤖] Add models of new variant
9014
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
9015
+ * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
9015
9016
  * TODO: [🧠] Some mechanism to propagate unsureness
9016
9017
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
9017
9018
  * TODO: [🎰] Some mechanism to auto-update available models
@@ -9367,8 +9368,8 @@
9367
9368
  className: 'AnthropicClaudeExecutionTools',
9368
9369
  });
9369
9370
  /**
9370
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
9371
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
9371
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
9372
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
9372
9373
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
9373
9374
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
9374
9375
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
@@ -9746,6 +9747,7 @@
9746
9747
  prompt: computeUsage("$5.00 / 1M tokens"),
9747
9748
  output: computeUsage("$15.00 / 1M tokens"),
9748
9749
  },
9750
+ //TODO:[main] !!!!!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
9749
9751
  },
9750
9752
  /**/
9751
9753
  /**/
@@ -9760,6 +9762,51 @@
9760
9762
  },
9761
9763
  /**/
9762
9764
  /**/
9765
+ {
9766
+ modelVariant: 'CHAT',
9767
+ modelTitle: 'o1-preview',
9768
+ modelName: 'o1-preview',
9769
+ pricing: {
9770
+ prompt: computeUsage("$15.00 / 1M tokens"),
9771
+ output: computeUsage("$60.00 / 1M tokens"),
9772
+ },
9773
+ },
9774
+ /**/
9775
+ /**/
9776
+ {
9777
+ modelVariant: 'CHAT',
9778
+ modelTitle: 'o1-preview-2024-09-12',
9779
+ modelName: 'o1-preview-2024-09-12',
9780
+ // <- TODO:[main] !!!!!! Some better system to organize theese date suffixes and versions
9781
+ pricing: {
9782
+ prompt: computeUsage("$15.00 / 1M tokens"),
9783
+ output: computeUsage("$60.00 / 1M tokens"),
9784
+ },
9785
+ },
9786
+ /**/
9787
+ /**/
9788
+ {
9789
+ modelVariant: 'CHAT',
9790
+ modelTitle: 'o1-mini',
9791
+ modelName: 'o1-mini',
9792
+ pricing: {
9793
+ prompt: computeUsage("$3.00 / 1M tokens"),
9794
+ output: computeUsage("$12.00 / 1M tokens"),
9795
+ },
9796
+ },
9797
+ /**/
9798
+ /**/
9799
+ {
9800
+ modelVariant: 'CHAT',
9801
+ modelTitle: 'o1-mini-2024-09-12',
9802
+ modelName: 'o1-mini-2024-09-12',
9803
+ pricing: {
9804
+ prompt: computeUsage("$3.00 / 1M tokens"),
9805
+ output: computeUsage("$12.00 / 1M tokens"),
9806
+ },
9807
+ },
9808
+ /**/
9809
+ /**/
9763
9810
  {
9764
9811
  modelVariant: 'CHAT',
9765
9812
  modelTitle: 'gpt-3.5-turbo-16k-0613',
@@ -9848,7 +9895,7 @@
9848
9895
  AzureOpenAiExecutionTools.prototype.listModels = function () {
9849
9896
  return __awaiter(this, void 0, void 0, function () {
9850
9897
  return __generator(this, function (_a) {
9851
- // TODO: !!! Do here some filtering which models are really available as deployment
9898
+ // TODO:[main] !!! Do here some filtering which models are really available as deployment
9852
9899
  // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
9853
9900
  return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
9854
9901
  var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
@@ -10537,7 +10584,7 @@
10537
10584
  * @public exported from `@promptbook/openai`
10538
10585
  */
10539
10586
  var createOpenAiExecutionTools = Object.assign(function (options) {
10540
- // TODO: [🧠] !!!! If browser, auto add `dangerouslyAllowBrowser`
10587
+ // TODO: [🧠][main] !!!! If browser, auto add `dangerouslyAllowBrowser`
10541
10588
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
10542
10589
  options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
10543
10590
  }