@promptbook/node 0.69.0-15 → 0.69.0-16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +27 -26
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  6. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  7. package/esm/typings/src/config.d.ts +2 -2
  8. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  9. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  10. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  11. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  17. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  18. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  19. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  20. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  21. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  22. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  23. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  24. package/package.json +2 -2
  25. package/umd/index.umd.js +27 -26
  26. package/umd/index.umd.js.map +1 -1
  27. package/esm/typings/src/personas/preparePersona.test.d.ts +0 -1
package/README.md CHANGED
@@ -42,6 +42,10 @@ Core of the library for Node.js runtime, it contains the main logic for promptbo
42
42
 
43
43
  Rest of the documentation is common for **entire promptbook ecosystem**:
44
44
 
45
+ # ✨ New Features
46
+
47
+ - ✨ **Support [OpenAI o1 model](https://openai.com/o1/)**
48
+
45
49
  ## 🤍 The Promptbook Whitepaper
46
50
 
47
51
 
package/esm/index.es.js CHANGED
@@ -15,8 +15,8 @@ import * as dotenv from 'dotenv';
15
15
  /**
16
16
  * The version of the Promptbook library
17
17
  */
18
- var PROMPTBOOK_VERSION = '0.69.0-14';
19
- // TODO: !!!! List here all the versions and annotate + put into script
18
+ var PROMPTBOOK_VERSION = '0.69.0-15';
19
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
20
20
 
21
21
  /*! *****************************************************************************
22
22
  Copyright (c) Microsoft Corporation.
@@ -326,7 +326,7 @@ function checkSerializableAsJson(name, value) {
326
326
  }
327
327
  /**
328
328
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
329
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
329
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
330
330
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
331
331
  */
332
332
 
@@ -516,7 +516,7 @@ function pipelineJsonToString(pipelineJson) {
516
516
  commands.push("PIPELINE URL ".concat(pipelineUrl));
517
517
  }
518
518
  commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
519
- // TODO: !!! This increase size of the bundle and is probbably not necessary
519
+ // TODO:[main] !!! This increase size of the bundle and is probbably not necessary
520
520
  pipelineString = prettifyMarkdown(pipelineString);
521
521
  try {
522
522
  for (var _g = __values(parameters.filter(function (_a) {
@@ -664,12 +664,12 @@ function pipelineJsonToString(pipelineJson) {
664
664
  pipelineString += '```' + contentLanguage;
665
665
  pipelineString += '\n';
666
666
  pipelineString += spaceTrim$1(content);
667
- // <- TODO: !!! Escape
667
+ // <- TODO:[main] !!! Escape
668
668
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
669
669
  pipelineString += '\n';
670
670
  pipelineString += '```';
671
671
  pipelineString += '\n\n';
672
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: !!! If the parameter here has description, add it and use templateParameterJsonToString
672
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO:[main] !!! If the parameter here has description, add it and use templateParameterJsonToString
673
673
  }
674
674
  }
675
675
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -896,7 +896,7 @@ function forEachAsync(array, options, callbackfunction) {
896
896
  });
897
897
  }
898
898
 
899
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.69.0-14",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.69.0-14",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.69.0-14",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.69.0-14",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
899
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
900
900
 
901
901
  /**
902
902
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -971,7 +971,7 @@ function isValidPromptbookVersion(version) {
971
971
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
972
972
  return false;
973
973
  }
974
- // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
974
+ // <- TODO:[main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
975
975
  return true;
976
976
  }
977
977
 
@@ -1140,7 +1140,7 @@ function validatePipelineCore(pipeline) {
1140
1140
  // <- Note: [🚲]
1141
1141
  throw new PipelineLogicError(spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1142
1142
  }
1143
- if (!isValidPromptbookVersion(pipeline.promptbookVersion)) {
1143
+ if (pipeline.promptbookVersion !== undefined && !isValidPromptbookVersion(pipeline.promptbookVersion)) {
1144
1144
  // <- Note: [🚲]
1145
1145
  throw new PipelineLogicError(spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1146
1146
  }
@@ -1335,11 +1335,11 @@ function validatePipelineCore(pipeline) {
1335
1335
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1336
1336
  */
1337
1337
  /**
1338
- * TODO: [🐣] !!!! Validate that all samples match expectations
1339
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1340
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1341
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1342
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1338
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
1339
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
1340
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
1341
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1342
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
1343
1343
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1344
1344
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1345
1345
  */
@@ -2083,7 +2083,7 @@ function isPipelinePrepared(pipeline) {
2083
2083
  return true;
2084
2084
  }
2085
2085
  /**
2086
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2086
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2087
2087
  * TODO: [🐠] Maybe base this on `makeValidator`
2088
2088
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2089
2089
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -4494,7 +4494,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4494
4494
  outputParameters = result.outputParameters;
4495
4495
  knowledgePiecesRaw = outputParameters.knowledgePieces;
4496
4496
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
4497
- // <- TODO: !!!!! Smarter split and filter out empty pieces
4497
+ // <- TODO:[main] !!!!! Smarter split and filter out empty pieces
4498
4498
  if (isVerbose) {
4499
4499
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
4500
4500
  }
@@ -4579,7 +4579,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
4579
4579
  });
4580
4580
  }
4581
4581
  /**
4582
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
4582
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
4583
4583
  * TODO: [🪂] Do it in parallel 11:11
4584
4584
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
4585
4585
  */
@@ -4603,7 +4603,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
4603
4603
  var partialPieces, pieces;
4604
4604
  return __generator(this, function (_a) {
4605
4605
  switch (_a.label) {
4606
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4606
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4607
4607
  options)];
4608
4608
  case 1:
4609
4609
  partialPieces = _a.sent();
@@ -4795,7 +4795,7 @@ function preparePersona(personaDescription, options) {
4795
4795
  });
4796
4796
  }
4797
4797
  /**
4798
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4798
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4799
4799
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
4800
4800
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
4801
4801
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -4844,7 +4844,7 @@ function prepareTemplates(pipeline, options) {
4844
4844
  case 0:
4845
4845
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
4846
4846
  templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
4847
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
4847
+ // TODO:[main] !!!!! Apply samples to each template (if missing and is for the template defined)
4848
4848
  TODO_USE(parameters);
4849
4849
  templatesPrepared = new Array(
4850
4850
  // <- TODO: [🧱] Implement in a functional (not new Class) way
@@ -4876,7 +4876,7 @@ function prepareTemplates(pipeline, options) {
4876
4876
  /**
4877
4877
  * TODO: [🧠] Add context to each template (if missing)
4878
4878
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
4879
- * TODO: [♨] !!! Prepare index the samples and maybe templates
4879
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
4880
4880
  * TODO: Write tests for `preparePipeline`
4881
4881
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
4882
4882
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -5048,7 +5048,7 @@ var knowledgeCommandParser = {
5048
5048
  if (sourceContent === '') {
5049
5049
  throw new ParseError("Source is not defined");
5050
5050
  }
5051
- // TODO: !!!! Following checks should be applied every link in the `sourceContent`
5051
+ // TODO:[main] !!!! Following checks should be applied every link in the `sourceContent`
5052
5052
  if (sourceContent.startsWith('http://')) {
5053
5053
  throw new ParseError("Source is not secure");
5054
5054
  }
@@ -5251,7 +5251,7 @@ var templateCommandParser = {
5251
5251
  if (command.templateType === 'KNOWLEDGE') {
5252
5252
  knowledgeCommandParser.$applyToPipelineJson({
5253
5253
  type: 'KNOWLEDGE',
5254
- sourceContent: $templateJson.content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5254
+ sourceContent: $templateJson.content, // <- TODO: [🐝][main] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5255
5255
  }, $pipelineJson);
5256
5256
  $templateJson.isTemplate = false;
5257
5257
  return;
@@ -6548,6 +6548,7 @@ var promptbookVersionCommandParser = {
6548
6548
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
6549
6549
  */
6550
6550
  $applyToPipelineJson: function (command, $pipelineJson) {
6551
+ // TODO: Warn if the version is overridden
6551
6552
  $pipelineJson.promptbookVersion = command.promptbookVersion;
6552
6553
  },
6553
6554
  /**
@@ -7368,7 +7369,7 @@ function pipelineStringToJsonSync(pipelineString) {
7368
7369
  var $pipelineJson = {
7369
7370
  title: undefined /* <- Note: [🍙] Putting here placeholder to keep `title` on top at final JSON */,
7370
7371
  pipelineUrl: undefined /* <- Note: Putting here placeholder to keep `pipelineUrl` on top at final JSON */,
7371
- promptbookVersion: PROMPTBOOK_VERSION,
7372
+ promptbookVersion: undefined /* <- Note: By default no explicit version */,
7372
7373
  description: undefined /* <- Note: [🍙] Putting here placeholder to keep `description` on top at final JSON */,
7373
7374
  parameters: [],
7374
7375
  templates: [],
@@ -7659,7 +7660,7 @@ function pipelineStringToJsonSync(pipelineString) {
7659
7660
  return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
7660
7661
  }
7661
7662
  /**
7662
- * TODO: !!!! Warn if used only sync version
7663
+ * TODO:[main] !!!! Warn if used only sync version
7663
7664
  * TODO: [🚞] Report here line/column of error
7664
7665
  * TODO: Use spaceTrim more effectively
7665
7666
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -8442,7 +8443,7 @@ function isSerializableAsJson(value) {
8442
8443
  }
8443
8444
  }
8444
8445
  /**
8445
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
8446
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
8446
8447
  * TODO: [🧠][💺] Can be done this on type-level?
8447
8448
  */
8448
8449