@promptbook/pdf 0.75.0-2 → 0.75.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +46 -30
  2. package/esm/index.es.js +45 -44
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +16 -4
  5. package/esm/typings/src/_packages/types.index.d.ts +8 -6
  6. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  7. package/esm/typings/src/commands/EXPECT/ExpectCommand.d.ts +1 -1
  8. package/esm/typings/src/commands/SECTION/SectionCommand.d.ts +1 -1
  9. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +0 -2
  10. package/esm/typings/src/config.d.ts +34 -2
  11. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +1 -1
  12. package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +2 -2
  13. package/esm/typings/src/conversion/utils/extractParameterNamesFromTask.d.ts +3 -3
  14. package/esm/typings/src/conversion/utils/renameParameter.d.ts +2 -2
  15. package/esm/typings/src/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  16. package/esm/typings/src/execution/ScriptExecutionTools.d.ts +1 -1
  17. package/esm/typings/src/formfactors/_boilerplate/BoilerplateFormfactorDefinition.d.ts +2 -2
  18. package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +3 -0
  19. package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +32 -2
  20. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +14 -0
  21. package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
  22. package/esm/typings/src/formfactors/index.d.ts +72 -10
  23. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +2 -2
  24. package/esm/typings/src/formfactors/sheets/SheetsFormfactorDefinition.d.ts +12 -2
  25. package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +12 -2
  26. package/esm/typings/src/pipeline/PipelineInterface/PipelineInterface.d.ts +5 -4
  27. package/esm/typings/src/pipeline/PipelineInterface/constants.d.ts +2 -2
  28. package/esm/typings/src/pipeline/PipelineJson/{TaskJsonCommon.d.ts → CommonTaskJson.d.ts} +13 -13
  29. package/esm/typings/src/pipeline/PipelineJson/DialogTaskJson.d.ts +2 -2
  30. package/esm/typings/src/pipeline/PipelineJson/ParameterJson.d.ts +2 -0
  31. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +1 -1
  32. package/esm/typings/src/pipeline/PipelineJson/PipelineJson.d.ts +2 -2
  33. package/esm/typings/src/pipeline/PipelineJson/PromptTaskJson.d.ts +2 -2
  34. package/esm/typings/src/pipeline/PipelineJson/ScriptTaskJson.d.ts +2 -2
  35. package/esm/typings/src/pipeline/PipelineJson/SimpleTaskJson.d.ts +2 -2
  36. package/esm/typings/src/pipeline/PipelineJson/TaskJson.d.ts +1 -1
  37. package/esm/typings/src/pipeline/PipelineString.d.ts +1 -1
  38. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  39. package/esm/typings/src/prepare/prepareTasks.d.ts +5 -5
  40. package/esm/typings/src/types/Prompt.d.ts +3 -3
  41. package/esm/typings/src/types/SectionType.d.ts +21 -0
  42. package/esm/typings/src/types/TaskProgress.d.ts +1 -1
  43. package/esm/typings/src/types/TaskType.d.ts +15 -0
  44. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  45. package/esm/typings/src/utils/organization/TODO_remove_as.d.ts +6 -0
  46. package/esm/typings/src/utils/organization/spaceTrim.d.ts +11 -0
  47. package/esm/typings/src/utils/parameters/extractParameterNames.d.ts +1 -1
  48. package/esm/typings/src/version.d.ts +1 -1
  49. package/package.json +2 -2
  50. package/umd/index.umd.js +45 -44
  51. package/umd/index.umd.js.map +1 -1
  52. package/esm/typings/src/commands/SECTION/SectionType.d.ts +0 -13
  53. /package/esm/typings/{promptbook-collection → books}/index.d.ts +0 -0
package/README.md CHANGED
@@ -2,8 +2,6 @@
2
2
 
3
3
  # ![Promptbook logo - cube with letters P and B](./other/design/logo-h1.png) Promptbook
4
4
 
5
- Build responsible, controlled and transparent applications on top of LLM models!
6
-
7
5
 
8
6
 
9
7
 
@@ -25,10 +23,6 @@ Build responsible, controlled and transparent applications on top of LLM models!
25
23
 
26
24
 
27
25
 
28
- <blockquote style="color: #ff8811">
29
- <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
30
- </blockquote>
31
-
32
26
  ## 📦 Package `@promptbook/pdf`
33
27
 
34
28
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
@@ -53,28 +47,33 @@ Read knowledge from `.pdf` documents
53
47
 
54
48
  Rest of the documentation is common for **entire promptbook ecosystem**:
55
49
 
56
- ## 🤍 The Promptbook Whitepaper
57
50
 
58
- If you have a simple, single prompt for ChatGPT, GPT-4, Anthropic Claude, Google Gemini, Llama 3, or whatever, it doesn't matter how you integrate it. Whether it's calling a REST API directly, using the SDK, hardcoding the prompt into the source code, or importing a text file, the process remains the same.
59
51
 
60
- But often you will struggle with the **limitations of LLMs**, such as **hallucinations, off-topic responses, poor quality output, language and prompt drift, word repetition repetition repetition repetition or misuse, lack of context, or just plain w𝒆𝐢rd resp0nses**. When this happens, you generally have three options:
61
52
 
62
- 1. **Fine-tune** the model to your specifications or even train your own.
63
- 2. **Prompt-engineer** the prompt to the best shape you can achieve.
64
- 3. Orchestrate **multiple prompts** in a [pipeline](https://github.com/webgptorg/promptbook/discussions/64) to get the best result.
53
+ ## 🤍 The Book Abstract
54
+
55
+ **It's time for a paradigm shift! The future of software is in plain English, French or Latin.**
56
+
57
+ During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
58
+
59
+ It's a revolution of writing software in plain human language that is understandable and executable by both humans and machines – and it's going to change everything!
60
+
61
+ The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
65
62
 
66
- In all of these situations, but especially in 3., the **✨ Promptbook can make your life waaaaaaaaaay easier**.
63
+ This shift is going to happen, whether we are ready for it or not. Our mission is to make it excellently, not just good.
67
64
 
68
- - [**Separates concerns**](https://github.com/webgptorg/promptbook/discussions/32) between prompt-engineer and programmer, between code files and prompt files, and between prompts and their execution logic. For this purpose, it introduces a new language called [the **💙 Book**](https://github.com/webgptorg/book).
69
- - Book allows you to **focus on the business** logic without having to write code or deal with the technicalities of LLMs.
70
- - **Forget** about **low-level details** like choosing the right model, tokens, context size, `temperature`, `top-k`, `top-p`, or kernel sampling. **Just write your intent** and [**persona**](https://github.com/webgptorg/promptbook/discussions/22) who should be responsible for the task and let the library do the rest.
71
- - We have built-in **orchestration** of [pipeline](https://github.com/webgptorg/promptbook/discussions/64) execution and many tools to make the process easier, more reliable, and more efficient, such as caching, [compilation+preparation](https://github.com/webgptorg/promptbook/discussions/78), [just-in-time fine-tuning](https://github.com/webgptorg/promptbook/discussions/33), [expectation-aware generation](https://github.com/webgptorg/promptbook/discussions/37), [agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39), and more.
72
- - Sometimes even the best prompts with the best framework like Promptbook `:)` can't avoid the problems. In this case, the library has built-in **[anomaly detection](https://github.com/webgptorg/promptbook/discussions/40) and logging** to help you find and fix the problems.
73
- - Versioning is build in. You can test multiple **A/B versions** of pipelines and see which one works best.
74
- - Promptbook is designed to use [**RAG** (Retrieval-Augmented Generation)](https://github.com/webgptorg/promptbook/discussions/41) and other advanced techniques to bring the context of your business to generic LLM. You can use **knowledge** to improve the quality of the output.
65
+ **Join us in this journey!**
75
66
 
76
67
 
77
68
 
69
+ ## 🚀 Get started
70
+
71
+ Take a look at the simple starter kit with books integrated into the **Hello World** sample applications:
72
+
73
+ - [Hello Book](https://github.com/webgptorg/hello-world)
74
+ - [Hello Book in Node.js](https://github.com/webgptorg/hello-world-node-js)
75
+ - [Hello Book in Next.js](https://github.com/webgptorg/hello-world-next-js)
76
+
78
77
 
79
78
 
80
79
 
@@ -93,7 +92,7 @@ Promptbook project is ecosystem of multiple projects and tools, following is a l
93
92
  <tbody>
94
93
  <tr>
95
94
  <td>Core</td>
96
- <td>Promptbook core is a description and documentation of basic innerworkings how should be Promptbook implemented and defines which fetures must be descriable by book language</td>
95
+ <td>Promptbook Core is a description and documentation of the basic concepts, ideas and inner workings of how Promptbook should be implemented, and defines what features must be describable by book language.</td>
97
96
  <td rowspan=2>https://ptbk.io<br/>https://github.com/webgptorg/book</td>
98
97
  </tr>
99
98
  <tr>
@@ -104,17 +103,23 @@ Promptbook project is ecosystem of multiple projects and tools, following is a l
104
103
  </tr>
105
104
  <tr>
106
105
  <td>Promptbook typescript project</td>
107
- <td>Implementation of Promptbook in TypeScript published into multiple packages to NPM</td>
106
+ <td>Promptbook implementation in TypeScript released as multiple NPM packages</td>
108
107
  <td>https://github.com/webgptorg/promptbook + Multiple packages on NPM</td>
109
108
  </tr>
110
109
  <tr>
111
110
  <td>Promptbook studio</td>
112
- <td>No-code studio to write book without need to write even the markdown</td>
113
- <td rowspan=2>https://promptbook.studio<br/>https://github.com/hejny/promptbook-studio</td>
114
- </tr>
115
- <tr>
116
- <td>Promptbook miniapps</td>
117
- <td>Builder of LLM miniapps from book notation</td>
111
+ <td>Studio to write Books and instantly publish them as miniapps</td>
112
+ <td>
113
+ https://promptbook.studio<br/>
114
+ https://github.com/hejny/promptbook-studio</td>
115
+ </tr><tr>
116
+ <td>Hello World</td>
117
+ <td>Simple starter kit with Books integrated into the sample applications</td>
118
+ <td>
119
+ https://github.com/webgptorg/hello-world<br/>
120
+ https://github.com/webgptorg/hello-world-node-js<br/>
121
+ https://github.com/webgptorg/hello-world-next-js
122
+ </td>
118
123
  </tr>
119
124
  </tbody>
120
125
  </table>
@@ -183,7 +188,9 @@ Reserved words:
183
188
 
184
189
  #### Parameter notation
185
190
 
186
- ### Template
191
+ ### Task
192
+
193
+ ### Task type
187
194
 
188
195
  Todo todo
189
196
 
@@ -244,6 +251,11 @@ Or you can install them separately:
244
251
 
245
252
  ## 📚 Dictionary
246
253
 
254
+
255
+
256
+
257
+
258
+
247
259
  ### 📚 Dictionary
248
260
 
249
261
  The following glossary is used to clarify certain concepts:
@@ -259,6 +271,8 @@ The following glossary is used to clarify certain concepts:
259
271
  - **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
260
272
  - **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
261
273
 
274
+
275
+
262
276
  _Note: Thos section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
263
277
 
264
278
  #### Promptbook core
@@ -295,7 +309,7 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
295
309
 
296
310
  - [📚 Collection of pipelines](https://github.com/webgptorg/promptbook/discussions/65)
297
311
  - [📯 Pipeline](https://github.com/webgptorg/promptbook/discussions/64)
298
- - [🎺 Pipeline templates](https://github.com/webgptorg/promptbook/discussions/88)
312
+ - [🙇‍♂️ Tasks and pipeline sections](https://github.com/webgptorg/promptbook/discussions/88)
299
313
  - [🤼 Personas](https://github.com/webgptorg/promptbook/discussions/22)
300
314
  - [⭕ Parameters](https://github.com/webgptorg/promptbook/discussions/83)
301
315
  - [🚀 Pipeline execution](https://github.com/webgptorg/promptbook/discussions/84)
@@ -319,6 +333,8 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
319
333
  - [👮 Agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39)
320
334
  - [view more](https://github.com/webgptorg/promptbook/discussions/categories/concepts)
321
335
 
336
+
337
+
322
338
  ### Terms specific to Promptbook TypeScript implementation
323
339
 
324
340
  - Anonymous mode
package/esm/index.es.js CHANGED
@@ -20,7 +20,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
20
20
  *
21
21
  * @see https://github.com/webgptorg/promptbook
22
22
  */
23
- var PROMPTBOOK_ENGINE_VERSION = '0.75.0-1';
23
+ var PROMPTBOOK_ENGINE_VERSION = '0.75.0';
24
24
  /**
25
25
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
26
26
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -174,7 +174,7 @@ var NotYetImplementedError = /** @class */ (function (_super) {
174
174
  function TODO_USE() {
175
175
  }
176
176
 
177
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.book.md"}];
177
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-persona.book.md"}];
178
178
 
179
179
  /**
180
180
  * Prettify the html code
@@ -249,7 +249,7 @@ function pipelineJsonToString(pipelineJson) {
249
249
  return isInput;
250
250
  })), _h = _g.next(); !_h.done; _h = _g.next()) {
251
251
  var parameter = _h.value;
252
- commands.push("INPUT PARAMETER ".concat(templateParameterJsonToString(parameter)));
252
+ commands.push("INPUT PARAMETER ".concat(taskParameterJsonToString(parameter)));
253
253
  }
254
254
  }
255
255
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
@@ -265,7 +265,7 @@ function pipelineJsonToString(pipelineJson) {
265
265
  return isOutput;
266
266
  })), _k = _j.next(); !_k.done; _k = _j.next()) {
267
267
  var parameter = _k.value;
268
- commands.push("OUTPUT PARAMETER ".concat(templateParameterJsonToString(parameter)));
268
+ commands.push("OUTPUT PARAMETER ".concat(taskParameterJsonToString(parameter)));
269
269
  }
270
270
  }
271
271
  catch (e_2_1) { e_2 = { error: e_2_1 }; }
@@ -279,12 +279,12 @@ function pipelineJsonToString(pipelineJson) {
279
279
  pipelineString += commands.map(function (command) { return "- ".concat(command); }).join('\n');
280
280
  try {
281
281
  for (var tasks_1 = __values(tasks), tasks_1_1 = tasks_1.next(); !tasks_1_1.done; tasks_1_1 = tasks_1.next()) {
282
- var template = tasks_1_1.value;
282
+ var task = tasks_1_1.value;
283
283
  var
284
284
  /* Note: Not using:> name, */
285
- title_1 = template.title, description_1 = template.description,
285
+ title_1 = task.title, description_1 = task.description,
286
286
  /* Note: dependentParameterNames, */
287
- jokers = template.jokerParameterNames, taskType = template.taskType, content = template.content, postprocessing = template.postprocessingFunctionNames, expectations = template.expectations, format = template.format, resultingParameterName = template.resultingParameterName;
287
+ jokers = task.jokerParameterNames, taskType = task.taskType, content = task.content, postprocessing = task.postprocessingFunctionNames, expectations = task.expectations, format = task.format, resultingParameterName = task.resultingParameterName;
288
288
  pipelineString += '\n\n';
289
289
  pipelineString += "## ".concat(title_1);
290
290
  if (description_1) {
@@ -294,9 +294,10 @@ function pipelineJsonToString(pipelineJson) {
294
294
  var commands_1 = [];
295
295
  var contentLanguage = 'text';
296
296
  if (taskType === 'PROMPT_TASK') {
297
- var modelRequirements = template.modelRequirements;
297
+ var modelRequirements = task.modelRequirements;
298
298
  var _l = modelRequirements || {}, modelName = _l.modelName, modelVariant = _l.modelVariant;
299
- commands_1.push("EXECUTE PROMPT TEMPLATE");
299
+ // Note: Do nothing, it is default
300
+ // commands.push(`PROMPT`);
300
301
  if (modelVariant) {
301
302
  commands_1.push("MODEL VARIANT ".concat(capitalize(modelVariant)));
302
303
  }
@@ -309,16 +310,16 @@ function pipelineJsonToString(pipelineJson) {
309
310
  // Note: Nothing special here
310
311
  }
311
312
  else if (taskType === 'SCRIPT_TASK') {
312
- commands_1.push("SCRIPT TEMPLATE");
313
- if (template.contentLanguage) {
314
- contentLanguage = template.contentLanguage;
313
+ commands_1.push("SCRIPT");
314
+ if (task.contentLanguage) {
315
+ contentLanguage = task.contentLanguage;
315
316
  }
316
317
  else {
317
318
  contentLanguage = '';
318
319
  }
319
320
  }
320
321
  else if (taskType === 'DIALOG_TASK') {
321
- commands_1.push("DIALOG TEMPLATE");
322
+ commands_1.push("DIALOG");
322
323
  // Note: Nothing special here
323
324
  } // <- }else if([🅱]
324
325
  if (jokers) {
@@ -393,7 +394,7 @@ function pipelineJsonToString(pipelineJson) {
393
394
  pipelineString += '\n';
394
395
  pipelineString += '```';
395
396
  pipelineString += '\n\n';
396
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: [main] !!! If the parameter here has description, add it and use templateParameterJsonToString
397
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: [main] !!! If the parameter here has description, add it and use taskParameterJsonToString
397
398
  }
398
399
  }
399
400
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -408,8 +409,8 @@ function pipelineJsonToString(pipelineJson) {
408
409
  /**
409
410
  * @private internal utility of `pipelineJsonToString`
410
411
  */
411
- function templateParameterJsonToString(templateParameterJson) {
412
- var name = templateParameterJson.name, description = templateParameterJson.description;
412
+ function taskParameterJsonToString(taskParameterJson) {
413
+ var name = taskParameterJson.name, description = taskParameterJson.description;
413
414
  var parameterString = "{".concat(name, "}");
414
415
  if (description) {
415
416
  parameterString = "".concat(parameterString, " ").concat(description);
@@ -417,7 +418,7 @@ function templateParameterJsonToString(templateParameterJson) {
417
418
  return parameterString;
418
419
  }
419
420
  /**
420
- * TODO: [🛋] Implement new features and commands into `pipelineJsonToString` + `templateParameterJsonToString` , use `stringifyCommand`
421
+ * TODO: [🛋] Implement new features and commands into `pipelineJsonToString` + `taskParameterJsonToString` , use `stringifyCommand`
421
422
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
422
423
  * TODO: [🏛] Maybe make some markdown builder
423
424
  * TODO: [🏛] Escape all
@@ -1121,20 +1122,20 @@ function validatePipelineCore(pipeline) {
1121
1122
  }
1122
1123
  finally { if (e_3) throw e_3.error; }
1123
1124
  }
1124
- var unresovedTemplates = __spreadArray([], __read(pipeline.tasks), false);
1125
+ var unresovedTasks = __spreadArray([], __read(pipeline.tasks), false);
1125
1126
  var loopLimit = LOOP_LIMIT;
1126
1127
  var _loop_3 = function () {
1127
1128
  if (loopLimit-- < 0) {
1128
1129
  // Note: Really UnexpectedError not LimitReachedError - this should not happen and be caught below
1129
1130
  throw new UnexpectedError(spaceTrim(function (block) { return "\n Loop limit reached during detection of circular dependencies in `validatePipeline`\n\n ".concat(block(pipelineIdentification), "\n "); }));
1130
1131
  }
1131
- var currentlyResovedTemplates = unresovedTemplates.filter(function (task) {
1132
+ var currentlyResovedTasks = unresovedTasks.filter(function (task) {
1132
1133
  return task.dependentParameterNames.every(function (name) { return resovedParameters.includes(name); });
1133
1134
  });
1134
- if (currentlyResovedTemplates.length === 0) {
1135
+ if (currentlyResovedTasks.length === 0) {
1135
1136
  throw new PipelineLogicError(
1136
1137
  // TODO: [🐎] DRY
1137
- spaceTrim(function (block) { return "\n\n Can not resolve some parameters:\n Either you are using a parameter that is not defined, or there are some circular dependencies.\n\n ".concat(block(pipelineIdentification), "\n\n **Can not resolve:**\n ").concat(block(unresovedTemplates
1138
+ spaceTrim(function (block) { return "\n\n Can not resolve some parameters:\n Either you are using a parameter that is not defined, or there are some circular dependencies.\n\n ".concat(block(pipelineIdentification), "\n\n **Can not resolve:**\n ").concat(block(unresovedTasks
1138
1139
  .map(function (_a) {
1139
1140
  var resultingParameterName = _a.resultingParameterName, dependentParameterNames = _a.dependentParameterNames;
1140
1141
  return "- Parameter `{".concat(resultingParameterName, "}` which depends on ").concat(dependentParameterNames
@@ -1153,13 +1154,13 @@ function validatePipelineCore(pipeline) {
1153
1154
  .map(function (name) { return "- Parameter `{".concat(name, "}`"); })
1154
1155
  .join('\n')), "\n\n\n "); }));
1155
1156
  }
1156
- resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTemplates.map(function (_a) {
1157
+ resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTasks.map(function (_a) {
1157
1158
  var resultingParameterName = _a.resultingParameterName;
1158
1159
  return resultingParameterName;
1159
1160
  })), false);
1160
- unresovedTemplates = unresovedTemplates.filter(function (task) { return !currentlyResovedTemplates.includes(task); });
1161
+ unresovedTasks = unresovedTasks.filter(function (task) { return !currentlyResovedTasks.includes(task); });
1161
1162
  };
1162
- while (unresovedTemplates.length > 0) {
1163
+ while (unresovedTasks.length > 0) {
1163
1164
  _loop_3();
1164
1165
  }
1165
1166
  // TODO: !!!!!! Test that pipeline interface implements declared formfactor interface
@@ -1221,7 +1222,7 @@ var PipelineUrlError = /** @class */ (function (_super) {
1221
1222
  /**
1222
1223
  * Parses the task and returns the list of all parameter names
1223
1224
  *
1224
- * @param template the task with parameters in {curly} braces
1225
+ * @param template the string template with parameters in {curly} braces
1225
1226
  * @returns the list of parameter names
1226
1227
  * @public exported from `@promptbook/utils`
1227
1228
  */
@@ -1255,13 +1256,13 @@ function unpreparePipeline(pipeline) {
1255
1256
  var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, tasks = pipeline.tasks;
1256
1257
  personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1257
1258
  knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1258
- tasks = tasks.map(function (template) {
1259
- var dependentParameterNames = template.dependentParameterNames;
1260
- var parameterNames = extractParameterNames(template.preparedContent || '');
1259
+ tasks = tasks.map(function (task) {
1260
+ var dependentParameterNames = task.dependentParameterNames;
1261
+ var parameterNames = extractParameterNames(task.preparedContent || '');
1261
1262
  dependentParameterNames = dependentParameterNames.filter(function (dependentParameterName) { return !parameterNames.has(dependentParameterName); });
1262
- var templateUnprepared = __assign(__assign({}, template), { dependentParameterNames: dependentParameterNames });
1263
- delete templateUnprepared.preparedContent;
1264
- return templateUnprepared;
1263
+ var taskUnprepared = __assign(__assign({}, task), { dependentParameterNames: dependentParameterNames });
1264
+ delete taskUnprepared.preparedContent;
1265
+ return taskUnprepared;
1265
1266
  });
1266
1267
  return $asDeeplyFrozenSerializableJson('Unprepared PipelineJson', __assign(__assign({}, pipeline), { tasks: tasks, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] }));
1267
1268
  }
@@ -1959,7 +1960,7 @@ function isPipelinePrepared(pipeline) {
1959
1960
  * TODO: [🐠] Maybe base this on `makeValidator`
1960
1961
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
1961
1962
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
1962
- * - [🏍] ? Is context in each template
1963
+ * - [🏍] ? Is context in each task
1963
1964
  * - [♨] Are examples prepared
1964
1965
  * - [♨] Are tasks prepared
1965
1966
  */
@@ -3333,10 +3334,10 @@ function prepareTasks(pipeline, tools, options) {
3333
3334
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a;
3334
3335
  tasks = pipeline.tasks, pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3335
3336
  tasksPrepared = new Array(tasks.length);
3336
- return [4 /*yield*/, forEachAsync(tasks, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3337
- var dependentParameterNames, preparedContent, preparedTemplate;
3337
+ return [4 /*yield*/, forEachAsync(tasks, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (task, index) { return __awaiter(_this, void 0, void 0, function () {
3338
+ var dependentParameterNames, preparedContent, preparedTask;
3338
3339
  return __generator(this, function (_a) {
3339
- dependentParameterNames = template.dependentParameterNames;
3340
+ dependentParameterNames = task.dependentParameterNames;
3340
3341
  preparedContent = undefined;
3341
3342
  if (knowledgePiecesCount > 0 && !dependentParameterNames.includes('knowledge')) {
3342
3343
  preparedContent = spaceTrim("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
@@ -3345,8 +3346,8 @@ function prepareTasks(pipeline, tools, options) {
3345
3346
  'knowledge',
3346
3347
  ], false);
3347
3348
  }
3348
- preparedTemplate = __assign(__assign({}, template), { dependentParameterNames: dependentParameterNames, preparedContent: preparedContent });
3349
- tasksPrepared[index] = preparedTemplate;
3349
+ preparedTask = __assign(__assign({}, task), { dependentParameterNames: dependentParameterNames, preparedContent: preparedContent });
3350
+ tasksPrepared[index] = preparedTask;
3350
3351
  return [2 /*return*/];
3351
3352
  });
3352
3353
  }); })];
@@ -3358,8 +3359,8 @@ function prepareTasks(pipeline, tools, options) {
3358
3359
  });
3359
3360
  }
3360
3361
  /**
3361
- * TODO: [🧠] Add context to each template (if missing)
3362
- * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3362
+ * TODO: [🧠] Add context to each task (if missing)
3363
+ * TODO: [🧠] What is better name `prepareTask` or `prepareTaskAndParameters`
3363
3364
  * TODO: [♨][main] !!! Prepare index the examples and maybe tasks
3364
3365
  * TODO: Write tests for `preparePipeline`
3365
3366
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -3445,7 +3446,7 @@ function preparePipeline(pipeline, tools, options) {
3445
3446
  })];
3446
3447
  case 3:
3447
3448
  tasksPrepared = (_c.sent()).tasksPrepared;
3448
- // ----- /Templates preparation -----
3449
+ // ----- /Tasks preparation -----
3449
3450
  // Note: Count total usage
3450
3451
  currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
3451
3452
  return [2 /*return*/, $asDeeplyFrozenSerializableJson('Prepared PipelineJson', __assign(__assign({}, clonePipeline(pipeline)), { tasks: __spreadArray([], __read(tasksPrepared), false),
@@ -3514,16 +3515,16 @@ function extractVariables(script) {
3514
3515
  */
3515
3516
 
3516
3517
  /**
3517
- * Parses the template and returns the set of all used parameters
3518
+ * Parses the task and returns the set of all used parameters
3518
3519
  *
3519
- * @param template the template with used parameters
3520
+ * @param task the task with used parameters
3520
3521
  * @returns the set of parameter names
3521
3522
  * @throws {ParseError} if the script is invalid
3522
3523
  * @public exported from `@promptbook/utils`
3523
3524
  */
3524
- function extractParameterNamesFromTask(template) {
3525
+ function extractParameterNamesFromTask(task) {
3525
3526
  var e_1, _a, e_2, _b, e_3, _c, e_4, _d;
3526
- var title = template.title, description = template.description, taskType = template.taskType, content = template.content, preparedContent = template.preparedContent, jokerParameterNames = template.jokerParameterNames, foreach = template.foreach;
3527
+ var title = task.title, description = task.description, taskType = task.taskType, content = task.content, preparedContent = task.preparedContent, jokerParameterNames = task.jokerParameterNames, foreach = task.foreach;
3527
3528
  var parameterNames = new Set();
3528
3529
  try {
3529
3530
  for (var _e = __values(__spreadArray(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameterNames(title)), false), __read(extractParameterNames(description || '')), false), __read(extractParameterNames(content)), false), __read(extractParameterNames(preparedContent || '')), false)), _f = _e.next(); !_f.done; _f = _e.next()) {