@promptbook/markdown-utils 0.75.0-2 → 0.75.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +27 -17
  2. package/esm/index.es.js +46 -45
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +10 -4
  5. package/esm/typings/src/_packages/types.index.d.ts +8 -6
  6. package/esm/typings/src/commands/EXPECT/ExpectCommand.d.ts +1 -1
  7. package/esm/typings/src/commands/SECTION/SectionCommand.d.ts +1 -1
  8. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +0 -2
  9. package/esm/typings/src/config.d.ts +9 -2
  10. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +1 -1
  11. package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +2 -2
  12. package/esm/typings/src/conversion/utils/extractParameterNamesFromTask.d.ts +3 -3
  13. package/esm/typings/src/conversion/utils/renameParameter.d.ts +2 -2
  14. package/esm/typings/src/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  15. package/esm/typings/src/execution/ScriptExecutionTools.d.ts +1 -1
  16. package/esm/typings/src/formfactors/_boilerplate/BoilerplateFormfactorDefinition.d.ts +2 -2
  17. package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +3 -0
  18. package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +32 -2
  19. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +14 -0
  20. package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
  21. package/esm/typings/src/formfactors/index.d.ts +72 -10
  22. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +2 -2
  23. package/esm/typings/src/formfactors/sheets/SheetsFormfactorDefinition.d.ts +12 -2
  24. package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +12 -2
  25. package/esm/typings/src/pipeline/PipelineInterface/PipelineInterface.d.ts +5 -4
  26. package/esm/typings/src/pipeline/PipelineInterface/constants.d.ts +2 -2
  27. package/esm/typings/src/pipeline/PipelineJson/{TaskJsonCommon.d.ts → CommonTaskJson.d.ts} +13 -13
  28. package/esm/typings/src/pipeline/PipelineJson/DialogTaskJson.d.ts +2 -2
  29. package/esm/typings/src/pipeline/PipelineJson/ParameterJson.d.ts +2 -0
  30. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +1 -1
  31. package/esm/typings/src/pipeline/PipelineJson/PipelineJson.d.ts +2 -2
  32. package/esm/typings/src/pipeline/PipelineJson/PromptTaskJson.d.ts +2 -2
  33. package/esm/typings/src/pipeline/PipelineJson/ScriptTaskJson.d.ts +2 -2
  34. package/esm/typings/src/pipeline/PipelineJson/SimpleTaskJson.d.ts +2 -2
  35. package/esm/typings/src/pipeline/PipelineJson/TaskJson.d.ts +1 -1
  36. package/esm/typings/src/pipeline/PipelineString.d.ts +1 -1
  37. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  38. package/esm/typings/src/prepare/prepareTasks.d.ts +5 -5
  39. package/esm/typings/src/types/Prompt.d.ts +3 -3
  40. package/esm/typings/src/types/SectionType.d.ts +21 -0
  41. package/esm/typings/src/types/TaskProgress.d.ts +1 -1
  42. package/esm/typings/src/types/TaskType.d.ts +15 -0
  43. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  44. package/esm/typings/src/utils/organization/TODO_remove_as.d.ts +6 -0
  45. package/esm/typings/src/utils/parameters/extractParameterNames.d.ts +1 -1
  46. package/package.json +1 -1
  47. package/umd/index.umd.js +46 -45
  48. package/umd/index.umd.js.map +1 -1
  49. package/esm/typings/src/commands/SECTION/SectionType.d.ts +0 -13
  50. /package/esm/typings/{promptbook-collection → books}/index.d.ts +0 -0
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  # ![Promptbook logo - cube with letters P and B](./other/design/logo-h1.png) Promptbook
4
4
 
5
- Build responsible, controlled and transparent applications on top of LLM models!
5
+ It's time for a paradigm shift! The future of software is in plain English, French or Latin.
6
6
 
7
7
 
8
8
 
@@ -51,25 +51,24 @@ Utility functions used for processing markdown. Its part of the larger [`@prompt
51
51
 
52
52
  Rest of the documentation is common for **entire promptbook ecosystem**:
53
53
 
54
- ## 🤍 The Promptbook Whitepaper
55
54
 
56
- If you have a simple, single prompt for ChatGPT, GPT-4, Anthropic Claude, Google Gemini, Llama 3, or whatever, it doesn't matter how you integrate it. Whether it's calling a REST API directly, using the SDK, hardcoding the prompt into the source code, or importing a text file, the process remains the same.
57
55
 
58
- But often you will struggle with the **limitations of LLMs**, such as **hallucinations, off-topic responses, poor quality output, language and prompt drift, word repetition repetition repetition repetition or misuse, lack of context, or just plain w𝒆𝐢rd resp0nses**. When this happens, you generally have three options:
59
56
 
60
- 1. **Fine-tune** the model to your specifications or even train your own.
61
- 2. **Prompt-engineer** the prompt to the best shape you can achieve.
62
- 3. Orchestrate **multiple prompts** in a [pipeline](https://github.com/webgptorg/promptbook/discussions/64) to get the best result.
57
+ ## 🤍 The Book Abstract
58
+
59
+ > It's time for a paradigm shift! **The future of software is in plain English**, French or Latin.
60
+
61
+ During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
62
+
63
+ It's a revolution of writing software in plain human language that is understandable and executable by both humans and machines – and it's going to change everything!
64
+
65
+ The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
66
+
67
+ This shift is going to happen, whether we are ready for it or not. Our mission is to make it excellently, not just good.
68
+
69
+ > **Join us in this journey!**
63
70
 
64
- In all of these situations, but especially in 3., the **✨ Promptbook can make your life waaaaaaaaaay easier**.
65
71
 
66
- - [**Separates concerns**](https://github.com/webgptorg/promptbook/discussions/32) between prompt-engineer and programmer, between code files and prompt files, and between prompts and their execution logic. For this purpose, it introduces a new language called [the **💙 Book**](https://github.com/webgptorg/book).
67
- - Book allows you to **focus on the business** logic without having to write code or deal with the technicalities of LLMs.
68
- - **Forget** about **low-level details** like choosing the right model, tokens, context size, `temperature`, `top-k`, `top-p`, or kernel sampling. **Just write your intent** and [**persona**](https://github.com/webgptorg/promptbook/discussions/22) who should be responsible for the task and let the library do the rest.
69
- - We have built-in **orchestration** of [pipeline](https://github.com/webgptorg/promptbook/discussions/64) execution and many tools to make the process easier, more reliable, and more efficient, such as caching, [compilation+preparation](https://github.com/webgptorg/promptbook/discussions/78), [just-in-time fine-tuning](https://github.com/webgptorg/promptbook/discussions/33), [expectation-aware generation](https://github.com/webgptorg/promptbook/discussions/37), [agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39), and more.
70
- - Sometimes even the best prompts with the best framework like Promptbook `:)` can't avoid the problems. In this case, the library has built-in **[anomaly detection](https://github.com/webgptorg/promptbook/discussions/40) and logging** to help you find and fix the problems.
71
- - Versioning is build in. You can test multiple **A/B versions** of pipelines and see which one works best.
72
- - Promptbook is designed to use [**RAG** (Retrieval-Augmented Generation)](https://github.com/webgptorg/promptbook/discussions/41) and other advanced techniques to bring the context of your business to generic LLM. You can use **knowledge** to improve the quality of the output.
73
72
 
74
73
 
75
74
 
@@ -181,7 +180,9 @@ Reserved words:
181
180
 
182
181
  #### Parameter notation
183
182
 
184
- ### Template
183
+ ### Task
184
+
185
+ ### Task type
185
186
 
186
187
  Todo todo
187
188
 
@@ -242,6 +243,11 @@ Or you can install them separately:
242
243
 
243
244
  ## 📚 Dictionary
244
245
 
246
+
247
+
248
+
249
+
250
+
245
251
  ### 📚 Dictionary
246
252
 
247
253
  The following glossary is used to clarify certain concepts:
@@ -257,6 +263,8 @@ The following glossary is used to clarify certain concepts:
257
263
  - **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
258
264
  - **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
259
265
 
266
+
267
+
260
268
  _Note: Thos section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
261
269
 
262
270
  #### Promptbook core
@@ -293,7 +301,7 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
293
301
 
294
302
  - [📚 Collection of pipelines](https://github.com/webgptorg/promptbook/discussions/65)
295
303
  - [📯 Pipeline](https://github.com/webgptorg/promptbook/discussions/64)
296
- - [🎺 Pipeline templates](https://github.com/webgptorg/promptbook/discussions/88)
304
+ - [🙇‍♂️ Tasks and pipeline sections](https://github.com/webgptorg/promptbook/discussions/88)
297
305
  - [🤼 Personas](https://github.com/webgptorg/promptbook/discussions/22)
298
306
  - [⭕ Parameters](https://github.com/webgptorg/promptbook/discussions/83)
299
307
  - [🚀 Pipeline execution](https://github.com/webgptorg/promptbook/discussions/84)
@@ -317,6 +325,8 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
317
325
  - [👮 Agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39)
318
326
  - [view more](https://github.com/webgptorg/promptbook/discussions/categories/concepts)
319
327
 
328
+
329
+
320
330
  ### Terms specific to Promptbook TypeScript implementation
321
331
 
322
332
  - Anonymous mode
package/esm/index.es.js CHANGED
@@ -20,7 +20,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
20
20
  *
21
21
  * @see https://github.com/webgptorg/promptbook
22
22
  */
23
- var PROMPTBOOK_ENGINE_VERSION = '0.75.0-1';
23
+ var PROMPTBOOK_ENGINE_VERSION = '0.75.0-3';
24
24
  /**
25
25
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
26
26
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -269,7 +269,7 @@ function extractAllBlocksFromMarkdown(markdown) {
269
269
  function extractOneBlockFromMarkdown(markdown) {
270
270
  var codeBlocks = extractAllBlocksFromMarkdown(markdown);
271
271
  if (codeBlocks.length !== 1) {
272
- throw new ParseError(spaceTrim(function (block) { return "\n There should be exactly 1 code block in template, found ".concat(codeBlocks.length, " code blocks\n\n ").concat(block(codeBlocks.map(function (block, i) { return "Block ".concat(i + 1, ":\n").concat(block.content); }).join('\n\n\n')), "\n "); }));
272
+ throw new ParseError(spaceTrim(function (block) { return "\n There should be exactly 1 code block in task section, found ".concat(codeBlocks.length, " code blocks\n\n ").concat(block(codeBlocks.map(function (block, i) { return "Block ".concat(i + 1, ":\n").concat(block.content); }).join('\n\n\n')), "\n "); }));
273
273
  }
274
274
  return codeBlocks[0];
275
275
  }
@@ -356,7 +356,7 @@ function extractJsonBlock(markdown) {
356
356
  * TODO: [🏢] Make this logic part of `JsonFormatDefinition` or `isValidJsonString`
357
357
  */
358
358
 
359
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.book.md"}];
359
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-persona.book.md"}];
360
360
 
361
361
  /**
362
362
  * Prettify the html code
@@ -422,7 +422,7 @@ function pipelineJsonToString(pipelineJson) {
422
422
  return isInput;
423
423
  })), _h = _g.next(); !_h.done; _h = _g.next()) {
424
424
  var parameter = _h.value;
425
- commands.push("INPUT PARAMETER ".concat(templateParameterJsonToString(parameter)));
425
+ commands.push("INPUT PARAMETER ".concat(taskParameterJsonToString(parameter)));
426
426
  }
427
427
  }
428
428
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
@@ -438,7 +438,7 @@ function pipelineJsonToString(pipelineJson) {
438
438
  return isOutput;
439
439
  })), _k = _j.next(); !_k.done; _k = _j.next()) {
440
440
  var parameter = _k.value;
441
- commands.push("OUTPUT PARAMETER ".concat(templateParameterJsonToString(parameter)));
441
+ commands.push("OUTPUT PARAMETER ".concat(taskParameterJsonToString(parameter)));
442
442
  }
443
443
  }
444
444
  catch (e_2_1) { e_2 = { error: e_2_1 }; }
@@ -452,12 +452,12 @@ function pipelineJsonToString(pipelineJson) {
452
452
  pipelineString += commands.map(function (command) { return "- ".concat(command); }).join('\n');
453
453
  try {
454
454
  for (var tasks_1 = __values(tasks), tasks_1_1 = tasks_1.next(); !tasks_1_1.done; tasks_1_1 = tasks_1.next()) {
455
- var template = tasks_1_1.value;
455
+ var task = tasks_1_1.value;
456
456
  var
457
457
  /* Note: Not using:> name, */
458
- title_1 = template.title, description_1 = template.description,
458
+ title_1 = task.title, description_1 = task.description,
459
459
  /* Note: dependentParameterNames, */
460
- jokers = template.jokerParameterNames, taskType = template.taskType, content = template.content, postprocessing = template.postprocessingFunctionNames, expectations = template.expectations, format = template.format, resultingParameterName = template.resultingParameterName;
460
+ jokers = task.jokerParameterNames, taskType = task.taskType, content = task.content, postprocessing = task.postprocessingFunctionNames, expectations = task.expectations, format = task.format, resultingParameterName = task.resultingParameterName;
461
461
  pipelineString += '\n\n';
462
462
  pipelineString += "## ".concat(title_1);
463
463
  if (description_1) {
@@ -467,9 +467,10 @@ function pipelineJsonToString(pipelineJson) {
467
467
  var commands_1 = [];
468
468
  var contentLanguage = 'text';
469
469
  if (taskType === 'PROMPT_TASK') {
470
- var modelRequirements = template.modelRequirements;
470
+ var modelRequirements = task.modelRequirements;
471
471
  var _l = modelRequirements || {}, modelName = _l.modelName, modelVariant = _l.modelVariant;
472
- commands_1.push("EXECUTE PROMPT TEMPLATE");
472
+ // Note: Do nothing, it is default
473
+ // commands.push(`PROMPT`);
473
474
  if (modelVariant) {
474
475
  commands_1.push("MODEL VARIANT ".concat(capitalize(modelVariant)));
475
476
  }
@@ -482,16 +483,16 @@ function pipelineJsonToString(pipelineJson) {
482
483
  // Note: Nothing special here
483
484
  }
484
485
  else if (taskType === 'SCRIPT_TASK') {
485
- commands_1.push("SCRIPT TEMPLATE");
486
- if (template.contentLanguage) {
487
- contentLanguage = template.contentLanguage;
486
+ commands_1.push("SCRIPT");
487
+ if (task.contentLanguage) {
488
+ contentLanguage = task.contentLanguage;
488
489
  }
489
490
  else {
490
491
  contentLanguage = '';
491
492
  }
492
493
  }
493
494
  else if (taskType === 'DIALOG_TASK') {
494
- commands_1.push("DIALOG TEMPLATE");
495
+ commands_1.push("DIALOG");
495
496
  // Note: Nothing special here
496
497
  } // <- }else if([🅱]
497
498
  if (jokers) {
@@ -566,7 +567,7 @@ function pipelineJsonToString(pipelineJson) {
566
567
  pipelineString += '\n';
567
568
  pipelineString += '```';
568
569
  pipelineString += '\n\n';
569
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: [main] !!! If the parameter here has description, add it and use templateParameterJsonToString
570
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: [main] !!! If the parameter here has description, add it and use taskParameterJsonToString
570
571
  }
571
572
  }
572
573
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -581,8 +582,8 @@ function pipelineJsonToString(pipelineJson) {
581
582
  /**
582
583
  * @private internal utility of `pipelineJsonToString`
583
584
  */
584
- function templateParameterJsonToString(templateParameterJson) {
585
- var name = templateParameterJson.name, description = templateParameterJson.description;
585
+ function taskParameterJsonToString(taskParameterJson) {
586
+ var name = taskParameterJson.name, description = taskParameterJson.description;
586
587
  var parameterString = "{".concat(name, "}");
587
588
  if (description) {
588
589
  parameterString = "".concat(parameterString, " ").concat(description);
@@ -590,7 +591,7 @@ function templateParameterJsonToString(templateParameterJson) {
590
591
  return parameterString;
591
592
  }
592
593
  /**
593
- * TODO: [🛋] Implement new features and commands into `pipelineJsonToString` + `templateParameterJsonToString` , use `stringifyCommand`
594
+ * TODO: [🛋] Implement new features and commands into `pipelineJsonToString` + `taskParameterJsonToString` , use `stringifyCommand`
594
595
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
595
596
  * TODO: [🏛] Maybe make some markdown builder
596
597
  * TODO: [🏛] Escape all
@@ -1288,20 +1289,20 @@ function validatePipelineCore(pipeline) {
1288
1289
  }
1289
1290
  finally { if (e_3) throw e_3.error; }
1290
1291
  }
1291
- var unresovedTemplates = __spreadArray([], __read(pipeline.tasks), false);
1292
+ var unresovedTasks = __spreadArray([], __read(pipeline.tasks), false);
1292
1293
  var loopLimit = LOOP_LIMIT;
1293
1294
  var _loop_3 = function () {
1294
1295
  if (loopLimit-- < 0) {
1295
1296
  // Note: Really UnexpectedError not LimitReachedError - this should not happen and be caught below
1296
1297
  throw new UnexpectedError(spaceTrim$1(function (block) { return "\n Loop limit reached during detection of circular dependencies in `validatePipeline`\n\n ".concat(block(pipelineIdentification), "\n "); }));
1297
1298
  }
1298
- var currentlyResovedTemplates = unresovedTemplates.filter(function (task) {
1299
+ var currentlyResovedTasks = unresovedTasks.filter(function (task) {
1299
1300
  return task.dependentParameterNames.every(function (name) { return resovedParameters.includes(name); });
1300
1301
  });
1301
- if (currentlyResovedTemplates.length === 0) {
1302
+ if (currentlyResovedTasks.length === 0) {
1302
1303
  throw new PipelineLogicError(
1303
1304
  // TODO: [🐎] DRY
1304
- spaceTrim$1(function (block) { return "\n\n Can not resolve some parameters:\n Either you are using a parameter that is not defined, or there are some circular dependencies.\n\n ".concat(block(pipelineIdentification), "\n\n **Can not resolve:**\n ").concat(block(unresovedTemplates
1305
+ spaceTrim$1(function (block) { return "\n\n Can not resolve some parameters:\n Either you are using a parameter that is not defined, or there are some circular dependencies.\n\n ".concat(block(pipelineIdentification), "\n\n **Can not resolve:**\n ").concat(block(unresovedTasks
1305
1306
  .map(function (_a) {
1306
1307
  var resultingParameterName = _a.resultingParameterName, dependentParameterNames = _a.dependentParameterNames;
1307
1308
  return "- Parameter `{".concat(resultingParameterName, "}` which depends on ").concat(dependentParameterNames
@@ -1320,13 +1321,13 @@ function validatePipelineCore(pipeline) {
1320
1321
  .map(function (name) { return "- Parameter `{".concat(name, "}`"); })
1321
1322
  .join('\n')), "\n\n\n "); }));
1322
1323
  }
1323
- resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTemplates.map(function (_a) {
1324
+ resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTasks.map(function (_a) {
1324
1325
  var resultingParameterName = _a.resultingParameterName;
1325
1326
  return resultingParameterName;
1326
1327
  })), false);
1327
- unresovedTemplates = unresovedTemplates.filter(function (task) { return !currentlyResovedTemplates.includes(task); });
1328
+ unresovedTasks = unresovedTasks.filter(function (task) { return !currentlyResovedTasks.includes(task); });
1328
1329
  };
1329
- while (unresovedTemplates.length > 0) {
1330
+ while (unresovedTasks.length > 0) {
1330
1331
  _loop_3();
1331
1332
  }
1332
1333
  // TODO: !!!!!! Test that pipeline interface implements declared formfactor interface
@@ -1388,7 +1389,7 @@ var PipelineUrlError = /** @class */ (function (_super) {
1388
1389
  /**
1389
1390
  * Parses the task and returns the list of all parameter names
1390
1391
  *
1391
- * @param template the task with parameters in {curly} braces
1392
+ * @param template the string template with parameters in {curly} braces
1392
1393
  * @returns the list of parameter names
1393
1394
  * @public exported from `@promptbook/utils`
1394
1395
  */
@@ -1422,13 +1423,13 @@ function unpreparePipeline(pipeline) {
1422
1423
  var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, tasks = pipeline.tasks;
1423
1424
  personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1424
1425
  knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1425
- tasks = tasks.map(function (template) {
1426
- var dependentParameterNames = template.dependentParameterNames;
1427
- var parameterNames = extractParameterNames(template.preparedContent || '');
1426
+ tasks = tasks.map(function (task) {
1427
+ var dependentParameterNames = task.dependentParameterNames;
1428
+ var parameterNames = extractParameterNames(task.preparedContent || '');
1428
1429
  dependentParameterNames = dependentParameterNames.filter(function (dependentParameterName) { return !parameterNames.has(dependentParameterName); });
1429
- var templateUnprepared = __assign(__assign({}, template), { dependentParameterNames: dependentParameterNames });
1430
- delete templateUnprepared.preparedContent;
1431
- return templateUnprepared;
1430
+ var taskUnprepared = __assign(__assign({}, task), { dependentParameterNames: dependentParameterNames });
1431
+ delete taskUnprepared.preparedContent;
1432
+ return taskUnprepared;
1432
1433
  });
1433
1434
  return $asDeeplyFrozenSerializableJson('Unprepared PipelineJson', __assign(__assign({}, pipeline), { tasks: tasks, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] }));
1434
1435
  }
@@ -2142,7 +2143,7 @@ function isPipelinePrepared(pipeline) {
2142
2143
  * TODO: [🐠] Maybe base this on `makeValidator`
2143
2144
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2144
2145
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2145
- * - [🏍] ? Is context in each template
2146
+ * - [🏍] ? Is context in each task
2146
2147
  * - [♨] Are examples prepared
2147
2148
  * - [♨] Are tasks prepared
2148
2149
  */
@@ -3516,10 +3517,10 @@ function prepareTasks(pipeline, tools, options) {
3516
3517
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a;
3517
3518
  tasks = pipeline.tasks, pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3518
3519
  tasksPrepared = new Array(tasks.length);
3519
- return [4 /*yield*/, forEachAsync(tasks, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3520
- var dependentParameterNames, preparedContent, preparedTemplate;
3520
+ return [4 /*yield*/, forEachAsync(tasks, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (task, index) { return __awaiter(_this, void 0, void 0, function () {
3521
+ var dependentParameterNames, preparedContent, preparedTask;
3521
3522
  return __generator(this, function (_a) {
3522
- dependentParameterNames = template.dependentParameterNames;
3523
+ dependentParameterNames = task.dependentParameterNames;
3523
3524
  preparedContent = undefined;
3524
3525
  if (knowledgePiecesCount > 0 && !dependentParameterNames.includes('knowledge')) {
3525
3526
  preparedContent = spaceTrim$1("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
@@ -3528,8 +3529,8 @@ function prepareTasks(pipeline, tools, options) {
3528
3529
  'knowledge',
3529
3530
  ], false);
3530
3531
  }
3531
- preparedTemplate = __assign(__assign({}, template), { dependentParameterNames: dependentParameterNames, preparedContent: preparedContent });
3532
- tasksPrepared[index] = preparedTemplate;
3532
+ preparedTask = __assign(__assign({}, task), { dependentParameterNames: dependentParameterNames, preparedContent: preparedContent });
3533
+ tasksPrepared[index] = preparedTask;
3533
3534
  return [2 /*return*/];
3534
3535
  });
3535
3536
  }); })];
@@ -3541,8 +3542,8 @@ function prepareTasks(pipeline, tools, options) {
3541
3542
  });
3542
3543
  }
3543
3544
  /**
3544
- * TODO: [🧠] Add context to each template (if missing)
3545
- * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3545
+ * TODO: [🧠] Add context to each task (if missing)
3546
+ * TODO: [🧠] What is better name `prepareTask` or `prepareTaskAndParameters`
3546
3547
  * TODO: [♨][main] !!! Prepare index the examples and maybe tasks
3547
3548
  * TODO: Write tests for `preparePipeline`
3548
3549
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -3628,7 +3629,7 @@ function preparePipeline(pipeline, tools, options) {
3628
3629
  })];
3629
3630
  case 3:
3630
3631
  tasksPrepared = (_c.sent()).tasksPrepared;
3631
- // ----- /Templates preparation -----
3632
+ // ----- /Tasks preparation -----
3632
3633
  // Note: Count total usage
3633
3634
  currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
3634
3635
  return [2 /*return*/, $asDeeplyFrozenSerializableJson('Prepared PipelineJson', __assign(__assign({}, clonePipeline(pipeline)), { tasks: __spreadArray([], __read(tasksPrepared), false),
@@ -3697,16 +3698,16 @@ function extractVariables(script) {
3697
3698
  */
3698
3699
 
3699
3700
  /**
3700
- * Parses the template and returns the set of all used parameters
3701
+ * Parses the task and returns the set of all used parameters
3701
3702
  *
3702
- * @param template the template with used parameters
3703
+ * @param task the task with used parameters
3703
3704
  * @returns the set of parameter names
3704
3705
  * @throws {ParseError} if the script is invalid
3705
3706
  * @public exported from `@promptbook/utils`
3706
3707
  */
3707
- function extractParameterNamesFromTask(template) {
3708
+ function extractParameterNamesFromTask(task) {
3708
3709
  var e_1, _a, e_2, _b, e_3, _c, e_4, _d;
3709
- var title = template.title, description = template.description, taskType = template.taskType, content = template.content, preparedContent = template.preparedContent, jokerParameterNames = template.jokerParameterNames, foreach = template.foreach;
3710
+ var title = task.title, description = task.description, taskType = task.taskType, content = task.content, preparedContent = task.preparedContent, jokerParameterNames = task.jokerParameterNames, foreach = task.foreach;
3710
3711
  var parameterNames = new Set();
3711
3712
  try {
3712
3713
  for (var _e = __values(__spreadArray(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameterNames(title)), false), __read(extractParameterNames(description || '')), false), __read(extractParameterNames(content)), false), __read(extractParameterNames(preparedContent || '')), false)), _f = _e.next(); !_f.done; _f = _e.next()) {