@promptbook/pdf 0.74.0-0 → 0.74.0-12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +135 -30
  2. package/esm/index.es.js +235 -287
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/azure-openai.index.d.ts +2 -2
  6. package/esm/typings/src/_packages/browser.index.d.ts +2 -2
  7. package/esm/typings/src/_packages/cli.index.d.ts +2 -2
  8. package/esm/typings/src/_packages/core.index.d.ts +7 -3
  9. package/esm/typings/src/_packages/documents.index.d.ts +2 -2
  10. package/esm/typings/src/_packages/execute-javascript.index.d.ts +2 -2
  11. package/esm/typings/src/_packages/fake-llm.index.d.ts +2 -2
  12. package/esm/typings/src/_packages/langtail.index.d.ts +2 -2
  13. package/esm/typings/src/_packages/legacy-documents.index.d.ts +2 -2
  14. package/esm/typings/src/_packages/markdown-utils.index.d.ts +2 -2
  15. package/esm/typings/src/_packages/node.index.d.ts +2 -2
  16. package/esm/typings/src/_packages/openai.index.d.ts +2 -2
  17. package/esm/typings/src/_packages/pdf.index.d.ts +2 -2
  18. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  19. package/esm/typings/src/_packages/remote-server.index.d.ts +2 -2
  20. package/esm/typings/src/_packages/utils.index.d.ts +2 -2
  21. package/esm/typings/src/_packages/website-crawler.index.d.ts +2 -2
  22. package/esm/typings/src/cli/cli-commands/make.d.ts +0 -1
  23. package/esm/typings/src/cli/cli-commands/run.d.ts +14 -0
  24. package/esm/typings/src/cli/promptbookCli.d.ts +1 -0
  25. package/esm/typings/src/cli/test/ptbk.d.ts +5 -2
  26. package/esm/typings/src/collection/collectionToJson.test.d.ts +1 -1
  27. package/esm/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +1 -1
  28. package/esm/typings/src/commands/BOOK_VERSION/BookVersionCommand.d.ts +11 -0
  29. package/esm/typings/src/commands/BOOK_VERSION/bookVersionCommandParser.d.ts +9 -0
  30. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +2 -2
  31. package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
  32. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +1 -1
  33. package/esm/typings/src/commands/index.d.ts +1 -1
  34. package/esm/typings/src/config.d.ts +6 -0
  35. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +3 -3
  36. package/esm/typings/src/conversion/pipelineStringToJson.d.ts +2 -2
  37. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +2 -2
  38. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  39. package/esm/typings/src/conversion/validation/_importPipeline.d.ts +7 -7
  40. package/esm/typings/src/formats/_common/FormatDefinition.d.ts +1 -1
  41. package/esm/typings/src/formats/_common/FormatSubvalueDefinition.d.ts +1 -1
  42. package/esm/typings/src/storage/blackhole/BlackholeStorage.d.ts +33 -0
  43. package/esm/typings/src/storage/memory/MemoryStorage.d.ts +1 -1
  44. package/esm/typings/src/storage/{memory/utils → utils}/PrefixStorage.d.ts +1 -1
  45. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +6 -4
  46. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  47. package/esm/typings/src/types/Prompt.d.ts +1 -1
  48. package/esm/typings/src/types/typeAliases.d.ts +2 -2
  49. package/esm/typings/src/version.d.ts +13 -2
  50. package/package.json +2 -2
  51. package/umd/index.umd.js +236 -287
  52. package/umd/index.umd.js.map +1 -1
  53. package/esm/typings/src/commands/PROMPTBOOK_VERSION/PromptbookVersionCommand.d.ts +0 -11
  54. package/esm/typings/src/commands/PROMPTBOOK_VERSION/promptbookVersionCommandParser.d.ts +0 -9
  55. /package/esm/typings/src/commands/{PROMPTBOOK_VERSION/promptbookVersionCommand.test.d.ts → BOOK_VERSION/bookVersionCommand.test.d.ts} +0 -0
  56. /package/esm/typings/src/storage/{memory → local-storage}/utils/makePromptbookStorageFromWebStorage.d.ts +0 -0
package/umd/index.umd.js CHANGED
@@ -12,10 +12,20 @@
12
12
 
13
13
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
14
14
  /**
15
- * The version of the Promptbook library
15
+ * The version of the Book language
16
+ *
17
+ * @see https://github.com/webgptorg/book
18
+ */
19
+ var BOOK_LANGUAGE_VERSION = '1.0.0';
20
+ /**
21
+ * The version of the Promptbook engine
22
+ *
23
+ * @see https://github.com/webgptorg/promptbook
24
+ */
25
+ var PROMPTBOOK_ENGINE_VERSION = '0.74.0-11';
26
+ /**
27
+ * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
16
28
  */
17
- var PROMPTBOOK_VERSION = '0.73.0';
18
- // TODO: [main] !!!! List here all the versions and annotate + put into script
19
29
 
20
30
  /*! *****************************************************************************
21
31
  Copyright (c) Microsoft Corporation.
@@ -163,13 +173,9 @@
163
173
  * @private within the repository
164
174
  */
165
175
  function TODO_USE() {
166
- var value = [];
167
- for (var _i = 0; _i < arguments.length; _i++) {
168
- value[_i] = arguments[_i];
169
- }
170
176
  }
171
177
 
172
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
178
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.book.md"}];
173
179
 
174
180
  /**
175
181
  * Prettify the html code
@@ -217,13 +223,13 @@
217
223
  /**
218
224
  * Converts promptbook in JSON format to string format
219
225
  *
220
- * @param pipelineJson Promptbook in JSON format (.ptbk.json)
221
- * @returns Promptbook in string format (.ptbk.md)
226
+ * @param pipelineJson Promptbook in JSON format (.book.json)
227
+ * @returns Promptbook in string format (.book.md)
222
228
  * @public exported from `@promptbook/core`
223
229
  */
224
230
  function pipelineJsonToString(pipelineJson) {
225
231
  var e_1, _a, e_2, _b, e_3, _c, e_4, _d, e_5, _e, e_6, _f;
226
- var title = pipelineJson.title, pipelineUrl = pipelineJson.pipelineUrl, promptbookVersion = pipelineJson.promptbookVersion, description = pipelineJson.description, parameters = pipelineJson.parameters, templates = pipelineJson.templates;
232
+ var title = pipelineJson.title, pipelineUrl = pipelineJson.pipelineUrl, bookVersion = pipelineJson.bookVersion, description = pipelineJson.description, parameters = pipelineJson.parameters, templates = pipelineJson.templates;
227
233
  var pipelineString = "# ".concat(title);
228
234
  if (description) {
229
235
  pipelineString += '\n\n';
@@ -233,8 +239,10 @@
233
239
  if (pipelineUrl) {
234
240
  commands.push("PIPELINE URL ".concat(pipelineUrl));
235
241
  }
236
- commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
237
- // TODO: [main] !!! This increase size of the bundle and is probbably not necessary
242
+ if (bookVersion !== "undefined") {
243
+ commands.push("BOOK VERSION ".concat(bookVersion));
244
+ }
245
+ // TODO: [main] !!!!!! This increase size of the bundle and is probbably not necessary
238
246
  pipelineString = prettifyMarkdown(pipelineString);
239
247
  try {
240
248
  for (var _g = __values(parameters.filter(function (_a) {
@@ -414,7 +422,7 @@
414
422
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
415
423
  * TODO: [🏛] Maybe make some markdown builder
416
424
  * TODO: [🏛] Escape all
417
- * TODO: [🧠] Should be in generated .ptbk.md file GENERATOR_WARNING
425
+ * TODO: [🧠] Should be in generated .book.md file GENERATOR_WARNING
418
426
  */
419
427
 
420
428
  /**
@@ -794,7 +802,7 @@
794
802
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
795
803
  return false;
796
804
  }
797
- // <- TODO: [main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
805
+ // <- TODO: [main] !!! Check isValidPromptbookVersion against PROMPTBOOK_ENGINE_VERSIONS
798
806
  return true;
799
807
  }
800
808
 
@@ -894,7 +902,7 @@
894
902
  if (!url.startsWith('https://')) {
895
903
  return false;
896
904
  }
897
- if (!url.endsWith('.ptbk.md')) {
905
+ if (!(url.endsWith('.book.md') || url.endsWith('.book') || url.endsWith('.book.md') || url.endsWith('.ptbk'))) {
898
906
  return false;
899
907
  }
900
908
  if (url.includes('#')) {
@@ -963,9 +971,9 @@
963
971
  // <- Note: [🚲]
964
972
  throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
965
973
  }
966
- if (pipeline.promptbookVersion !== undefined && !isValidPromptbookVersion(pipeline.promptbookVersion)) {
974
+ if (pipeline.bookVersion !== undefined && !isValidPromptbookVersion(pipeline.bookVersion)) {
967
975
  // <- Note: [🚲]
968
- throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
976
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.bookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
969
977
  }
970
978
  // TODO: [🧠] Maybe do here some propper JSON-schema / ZOD checking
971
979
  if (!Array.isArray(pipeline.parameters)) {
@@ -2085,6 +2093,188 @@
2085
2093
  },
2086
2094
  });
2087
2095
 
2096
+ /**
2097
+ * @@@
2098
+ *
2099
+ * @public exported from `@promptbook/utils`
2100
+ */
2101
+ function deepClone(objectValue) {
2102
+ return JSON.parse(JSON.stringify(objectValue));
2103
+ /*
2104
+ TODO: [🧠] Is there a better implementation?
2105
+ > const propertyNames = Object.getOwnPropertyNames(objectValue);
2106
+ > for (const propertyName of propertyNames) {
2107
+ > const value = (objectValue as really_any)[propertyName];
2108
+ > if (value && typeof value === 'object') {
2109
+ > deepClone(value);
2110
+ > }
2111
+ > }
2112
+ > return Object.assign({}, objectValue);
2113
+ */
2114
+ }
2115
+ /**
2116
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
2117
+ */
2118
+
2119
+ /**
2120
+ * Function `addUsage` will add multiple usages into one
2121
+ *
2122
+ * Note: If you provide 0 values, it returns ZERO_USAGE
2123
+ *
2124
+ * @public exported from `@promptbook/core`
2125
+ */
2126
+ function addUsage() {
2127
+ var usageItems = [];
2128
+ for (var _i = 0; _i < arguments.length; _i++) {
2129
+ usageItems[_i] = arguments[_i];
2130
+ }
2131
+ return usageItems.reduce(function (acc, item) {
2132
+ var e_1, _a, e_2, _b;
2133
+ var _c;
2134
+ acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
2135
+ try {
2136
+ for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
2137
+ var key = _e.value;
2138
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2139
+ //@ts-ignore
2140
+ if (item.input[key]) {
2141
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2142
+ //@ts-ignore
2143
+ acc.input[key].value += item.input[key].value || 0;
2144
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2145
+ //@ts-ignore
2146
+ if (item.input[key].isUncertain) {
2147
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2148
+ //@ts-ignore
2149
+ acc.input[key].isUncertain = true;
2150
+ }
2151
+ }
2152
+ }
2153
+ }
2154
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2155
+ finally {
2156
+ try {
2157
+ if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
2158
+ }
2159
+ finally { if (e_1) throw e_1.error; }
2160
+ }
2161
+ try {
2162
+ for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
2163
+ var key = _g.value;
2164
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2165
+ //@ts-ignore
2166
+ if (item.output[key]) {
2167
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2168
+ //@ts-ignore
2169
+ acc.output[key].value += item.output[key].value || 0;
2170
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2171
+ //@ts-ignore
2172
+ if (item.output[key].isUncertain) {
2173
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2174
+ //@ts-ignore
2175
+ acc.output[key].isUncertain = true;
2176
+ }
2177
+ }
2178
+ }
2179
+ }
2180
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
2181
+ finally {
2182
+ try {
2183
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
2184
+ }
2185
+ finally { if (e_2) throw e_2.error; }
2186
+ }
2187
+ return acc;
2188
+ }, deepClone(ZERO_USAGE));
2189
+ }
2190
+
2191
+ /**
2192
+ * Intercepts LLM tools and counts total usage of the tools
2193
+ *
2194
+ * @param llmTools LLM tools to be intercepted with usage counting
2195
+ * @returns LLM tools with same functionality with added total cost counting
2196
+ * @public exported from `@promptbook/core`
2197
+ */
2198
+ function countTotalUsage(llmTools) {
2199
+ var _this = this;
2200
+ var totalUsage = ZERO_USAGE;
2201
+ var proxyTools = {
2202
+ get title() {
2203
+ // TODO: [🧠] Maybe put here some suffix
2204
+ return llmTools.title;
2205
+ },
2206
+ get description() {
2207
+ // TODO: [🧠] Maybe put here some suffix
2208
+ return llmTools.description;
2209
+ },
2210
+ checkConfiguration: function () {
2211
+ return __awaiter(this, void 0, void 0, function () {
2212
+ return __generator(this, function (_a) {
2213
+ return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
2214
+ });
2215
+ });
2216
+ },
2217
+ listModels: function () {
2218
+ return /* not await */ llmTools.listModels();
2219
+ },
2220
+ getTotalUsage: function () {
2221
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2222
+ return totalUsage;
2223
+ },
2224
+ };
2225
+ if (llmTools.callChatModel !== undefined) {
2226
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2227
+ var promptResult;
2228
+ return __generator(this, function (_a) {
2229
+ switch (_a.label) {
2230
+ case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
2231
+ case 1:
2232
+ promptResult = _a.sent();
2233
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2234
+ return [2 /*return*/, promptResult];
2235
+ }
2236
+ });
2237
+ }); };
2238
+ }
2239
+ if (llmTools.callCompletionModel !== undefined) {
2240
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2241
+ var promptResult;
2242
+ return __generator(this, function (_a) {
2243
+ switch (_a.label) {
2244
+ case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
2245
+ case 1:
2246
+ promptResult = _a.sent();
2247
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2248
+ return [2 /*return*/, promptResult];
2249
+ }
2250
+ });
2251
+ }); };
2252
+ }
2253
+ if (llmTools.callEmbeddingModel !== undefined) {
2254
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2255
+ var promptResult;
2256
+ return __generator(this, function (_a) {
2257
+ switch (_a.label) {
2258
+ case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
2259
+ case 1:
2260
+ promptResult = _a.sent();
2261
+ totalUsage = addUsage(totalUsage, promptResult.usage);
2262
+ return [2 /*return*/, promptResult];
2263
+ }
2264
+ });
2265
+ }); };
2266
+ }
2267
+ // <- Note: [🤖]
2268
+ return proxyTools;
2269
+ }
2270
+ /**
2271
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
2272
+ * TODO: [🧠] Is there some meaningfull way how to test this util
2273
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
2274
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
2275
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2276
+ */
2277
+
2088
2278
  /**
2089
2279
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
2090
2280
  *
@@ -2377,188 +2567,6 @@
2377
2567
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2378
2568
  */
2379
2569
 
2380
- /**
2381
- * @@@
2382
- *
2383
- * @public exported from `@promptbook/utils`
2384
- */
2385
- function deepClone(objectValue) {
2386
- return JSON.parse(JSON.stringify(objectValue));
2387
- /*
2388
- TODO: [🧠] Is there a better implementation?
2389
- > const propertyNames = Object.getOwnPropertyNames(objectValue);
2390
- > for (const propertyName of propertyNames) {
2391
- > const value = (objectValue as really_any)[propertyName];
2392
- > if (value && typeof value === 'object') {
2393
- > deepClone(value);
2394
- > }
2395
- > }
2396
- > return Object.assign({}, objectValue);
2397
- */
2398
- }
2399
- /**
2400
- * TODO: [🧠] Is there a way how to meaningfully test this utility
2401
- */
2402
-
2403
- /**
2404
- * Function `addUsage` will add multiple usages into one
2405
- *
2406
- * Note: If you provide 0 values, it returns ZERO_USAGE
2407
- *
2408
- * @public exported from `@promptbook/core`
2409
- */
2410
- function addUsage() {
2411
- var usageItems = [];
2412
- for (var _i = 0; _i < arguments.length; _i++) {
2413
- usageItems[_i] = arguments[_i];
2414
- }
2415
- return usageItems.reduce(function (acc, item) {
2416
- var e_1, _a, e_2, _b;
2417
- var _c;
2418
- acc.price.value += ((_c = item.price) === null || _c === void 0 ? void 0 : _c.value) || 0;
2419
- try {
2420
- for (var _d = __values(Object.keys(acc.input)), _e = _d.next(); !_e.done; _e = _d.next()) {
2421
- var key = _e.value;
2422
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2423
- //@ts-ignore
2424
- if (item.input[key]) {
2425
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2426
- //@ts-ignore
2427
- acc.input[key].value += item.input[key].value || 0;
2428
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2429
- //@ts-ignore
2430
- if (item.input[key].isUncertain) {
2431
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2432
- //@ts-ignore
2433
- acc.input[key].isUncertain = true;
2434
- }
2435
- }
2436
- }
2437
- }
2438
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2439
- finally {
2440
- try {
2441
- if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
2442
- }
2443
- finally { if (e_1) throw e_1.error; }
2444
- }
2445
- try {
2446
- for (var _f = __values(Object.keys(acc.output)), _g = _f.next(); !_g.done; _g = _f.next()) {
2447
- var key = _g.value;
2448
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2449
- //@ts-ignore
2450
- if (item.output[key]) {
2451
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2452
- //@ts-ignore
2453
- acc.output[key].value += item.output[key].value || 0;
2454
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2455
- //@ts-ignore
2456
- if (item.output[key].isUncertain) {
2457
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2458
- //@ts-ignore
2459
- acc.output[key].isUncertain = true;
2460
- }
2461
- }
2462
- }
2463
- }
2464
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
2465
- finally {
2466
- try {
2467
- if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
2468
- }
2469
- finally { if (e_2) throw e_2.error; }
2470
- }
2471
- return acc;
2472
- }, deepClone(ZERO_USAGE));
2473
- }
2474
-
2475
- /**
2476
- * Intercepts LLM tools and counts total usage of the tools
2477
- *
2478
- * @param llmTools LLM tools to be intercepted with usage counting
2479
- * @returns LLM tools with same functionality with added total cost counting
2480
- * @public exported from `@promptbook/core`
2481
- */
2482
- function countTotalUsage(llmTools) {
2483
- var _this = this;
2484
- var totalUsage = ZERO_USAGE;
2485
- var proxyTools = {
2486
- get title() {
2487
- // TODO: [🧠] Maybe put here some suffix
2488
- return llmTools.title;
2489
- },
2490
- get description() {
2491
- // TODO: [🧠] Maybe put here some suffix
2492
- return llmTools.description;
2493
- },
2494
- checkConfiguration: function () {
2495
- return __awaiter(this, void 0, void 0, function () {
2496
- return __generator(this, function (_a) {
2497
- return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
2498
- });
2499
- });
2500
- },
2501
- listModels: function () {
2502
- return /* not await */ llmTools.listModels();
2503
- },
2504
- getTotalUsage: function () {
2505
- // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2506
- return totalUsage;
2507
- },
2508
- };
2509
- if (llmTools.callChatModel !== undefined) {
2510
- proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2511
- var promptResult;
2512
- return __generator(this, function (_a) {
2513
- switch (_a.label) {
2514
- case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
2515
- case 1:
2516
- promptResult = _a.sent();
2517
- totalUsage = addUsage(totalUsage, promptResult.usage);
2518
- return [2 /*return*/, promptResult];
2519
- }
2520
- });
2521
- }); };
2522
- }
2523
- if (llmTools.callCompletionModel !== undefined) {
2524
- proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2525
- var promptResult;
2526
- return __generator(this, function (_a) {
2527
- switch (_a.label) {
2528
- case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
2529
- case 1:
2530
- promptResult = _a.sent();
2531
- totalUsage = addUsage(totalUsage, promptResult.usage);
2532
- return [2 /*return*/, promptResult];
2533
- }
2534
- });
2535
- }); };
2536
- }
2537
- if (llmTools.callEmbeddingModel !== undefined) {
2538
- proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
2539
- var promptResult;
2540
- return __generator(this, function (_a) {
2541
- switch (_a.label) {
2542
- case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
2543
- case 1:
2544
- promptResult = _a.sent();
2545
- totalUsage = addUsage(totalUsage, promptResult.usage);
2546
- return [2 /*return*/, promptResult];
2547
- }
2548
- });
2549
- }); };
2550
- }
2551
- // <- Note: [🤖]
2552
- return proxyTools;
2553
- }
2554
- /**
2555
- * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
2556
- * TODO: [🧠] Is there some meaningfull way how to test this util
2557
- * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
2558
- * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
2559
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2560
- */
2561
-
2562
2570
  /**
2563
2571
  * Takes an item or an array of items and returns an array of items
2564
2572
  *
@@ -2598,7 +2606,7 @@
2598
2606
  collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
2599
2607
  _b = createPipelineExecutor;
2600
2608
  _c = {};
2601
- return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.ptbk.md')];
2609
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book.md')];
2602
2610
  case 1:
2603
2611
  preparePersonaExecutor = _b.apply(void 0, [(_c.pipeline = _d.sent(),
2604
2612
  _c.tools = tools,
@@ -3010,16 +3018,13 @@
3010
3018
  function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3011
3019
  var _a;
3012
3020
  return __awaiter(this, void 0, void 0, function () {
3013
- var sourceContent, name, _b, _c, rootDirname, _d,
3014
- // <- TODO: process.cwd() if running in Node.js
3015
- isVerbose, url, response_1, mimeType, filename_1, fileExtension, mimeType;
3021
+ var sourceContent, name, _b, _c, rootDirname, url, response_1, mimeType, filename_1, fileExtension, mimeType;
3016
3022
  return __generator(this, function (_e) {
3017
3023
  switch (_e.label) {
3018
3024
  case 0:
3019
3025
  sourceContent = knowledgeSource.sourceContent;
3020
3026
  name = knowledgeSource.name;
3021
- _b = options || {}, _c = _b.rootDirname, rootDirname = _c === void 0 ? null : _c, _d = _b.isVerbose, isVerbose = _d === void 0 ? DEFAULT_IS_VERBOSE : _d;
3022
- TODO_USE(isVerbose);
3027
+ _b = options || {}, _c = _b.rootDirname, rootDirname = _c === void 0 ? null : _c, _b.isVerbose;
3023
3028
  if (!name) {
3024
3029
  name = sourceContentToName(sourceContent);
3025
3030
  }
@@ -3273,12 +3278,12 @@
3273
3278
  */
3274
3279
  function clonePipeline(pipeline) {
3275
3280
  // Note: Not using spread operator (...) because @@@
3276
- var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, templates = pipeline.templates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3281
+ var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, bookVersion = pipeline.bookVersion, description = pipeline.description, parameters = pipeline.parameters, templates = pipeline.templates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3277
3282
  return {
3278
3283
  pipelineUrl: pipelineUrl,
3279
3284
  sourceFile: sourceFile,
3280
3285
  title: title,
3281
- promptbookVersion: promptbookVersion,
3286
+ bookVersion: bookVersion,
3282
3287
  description: description,
3283
3288
  parameters: parameters,
3284
3289
  templates: templates,
@@ -3299,15 +3304,13 @@
3299
3304
  */
3300
3305
  function prepareTemplates(pipeline, tools, options) {
3301
3306
  return __awaiter(this, void 0, void 0, function () {
3302
- var _a, maxParallelCount, templates, parameters, knowledgePiecesCount, templatesPrepared;
3307
+ var _a, maxParallelCount, templates, knowledgePiecesCount, templatesPrepared;
3303
3308
  var _this = this;
3304
3309
  return __generator(this, function (_b) {
3305
3310
  switch (_b.label) {
3306
3311
  case 0:
3307
3312
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a;
3308
- templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3309
- // TODO: [main] !! Apply examples to each template (if missing and is for the template defined)
3310
- TODO_USE(parameters);
3313
+ templates = pipeline.templates, pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3311
3314
  templatesPrepared = new Array(templates.length);
3312
3315
  return [4 /*yield*/, forEachAsync(templates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3313
3316
  var dependentParameterNames, preparedContent, preparedTemplate;
@@ -3377,7 +3380,7 @@
3377
3380
  currentPreparation = {
3378
3381
  id: 1,
3379
3382
  // TODO: [🍥]> date: $currentDate(),
3380
- promptbookVersion: PROMPTBOOK_VERSION,
3383
+ promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
3381
3384
  usage: ZERO_USAGE,
3382
3385
  };
3383
3386
  preparations = [
@@ -3701,22 +3704,12 @@
3701
3704
  formatName: 'CSV',
3702
3705
  aliases: ['SPREADSHEET', 'TABLE'],
3703
3706
  isValid: function (value, settings, schema) {
3704
- // TODO: Implement CSV validation
3705
- TODO_USE(value /* <- TODO: Use value here */);
3706
- TODO_USE(settings /* <- TODO: Use settings here */);
3707
- TODO_USE(schema /* <- TODO: Use schema here */);
3708
3707
  return true;
3709
3708
  },
3710
3709
  canBeValid: function (partialValue, settings, schema) {
3711
- TODO_USE(partialValue /* <- TODO: Use partialValue here */);
3712
- TODO_USE(settings /* <- TODO: Use settings here */);
3713
- TODO_USE(schema /* <- TODO: Use schema here */);
3714
3710
  return true;
3715
3711
  },
3716
3712
  heal: function (value, settings, schema) {
3717
- TODO_USE(value /* <- TODO: Use partialValue here */);
3718
- TODO_USE(settings /* <- TODO: Use settings here */);
3719
- TODO_USE(schema /* <- TODO: Use schema here */);
3720
3713
  throw new Error('Not implemented');
3721
3714
  },
3722
3715
  subvalueDefinitions: [
@@ -3835,20 +3828,12 @@
3835
3828
  formatName: 'JSON',
3836
3829
  mimeType: 'application/json',
3837
3830
  isValid: function (value, settings, schema) {
3838
- TODO_USE(schema /* <- TODO: Use schema here */);
3839
- TODO_USE(settings /* <- TODO: Use settings here */);
3840
3831
  return isValidJsonString(value);
3841
3832
  },
3842
3833
  canBeValid: function (partialValue, settings, schema) {
3843
- TODO_USE(partialValue /* <- TODO: Use partialValue here */);
3844
- TODO_USE(settings /* <- TODO: Use settings here */);
3845
- TODO_USE(schema /* <- TODO: Use schema here */);
3846
3834
  return true;
3847
3835
  },
3848
3836
  heal: function (value, settings, schema) {
3849
- TODO_USE(value /* <- TODO: Use partialValue here */);
3850
- TODO_USE(settings /* <- TODO: Use settings here */);
3851
- TODO_USE(schema /* <- TODO: Use schema here */);
3852
3837
  throw new Error('Not implemented');
3853
3838
  },
3854
3839
  subvalueDefinitions: [],
@@ -3930,21 +3915,12 @@
3930
3915
  formatName: 'XML',
3931
3916
  mimeType: 'application/xml',
3932
3917
  isValid: function (value, settings, schema) {
3933
- TODO_USE(value /* <- TODO: Use value here */);
3934
- TODO_USE(settings /* <- TODO: Use settings here */);
3935
- TODO_USE(schema /* <- TODO: Use schema here */);
3936
3918
  return true;
3937
3919
  },
3938
3920
  canBeValid: function (partialValue, settings, schema) {
3939
- TODO_USE(partialValue /* <- TODO: Use partialValue here */);
3940
- TODO_USE(settings /* <- TODO: Use settings here */);
3941
- TODO_USE(schema /* <- TODO: Use schema here */);
3942
3921
  return true;
3943
3922
  },
3944
3923
  heal: function (value, settings, schema) {
3945
- TODO_USE(value /* <- TODO: Use partialValue here */);
3946
- TODO_USE(settings /* <- TODO: Use settings here */);
3947
- TODO_USE(schema /* <- TODO: Use schema here */);
3948
3924
  throw new Error('Not implemented');
3949
3925
  },
3950
3926
  subvalueDefinitions: [],
@@ -4158,27 +4134,6 @@
4158
4134
  * TODO: [🏢] Make this logic part of `JsonFormatDefinition` or `isValidJsonString`
4159
4135
  */
4160
4136
 
4161
- /**
4162
- * Just says that the variable is not used but should be kept
4163
- * No side effects.
4164
- *
4165
- * Note: It can be usefull for:
4166
- *
4167
- * 1) Suppressing eager optimization of unused imports
4168
- * 2) Suppressing eslint errors of unused variables in the tests
4169
- * 3) Keeping the type of the variable for type testing
4170
- *
4171
- * @param value any values
4172
- * @returns void
4173
- * @private within the repository
4174
- */
4175
- function keepUnused() {
4176
- var valuesToKeep = [];
4177
- for (var _i = 0; _i < arguments.length; _i++) {
4178
- valuesToKeep[_i] = arguments[_i];
4179
- }
4180
- }
4181
-
4182
4137
  /**
4183
4138
  * Replaces parameters in template with values from parameters object
4184
4139
  *
@@ -4291,10 +4246,12 @@
4291
4246
  * @public exported from `@promptbook/utils`
4292
4247
  */
4293
4248
  function countPages(text) {
4294
- var sentencesPerPage = 5; // Assuming each page has 5 sentences
4295
- var sentences = text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
4296
- var pageCount = Math.ceil(sentences.length / sentencesPerPage);
4297
- return pageCount;
4249
+ if (text === '') {
4250
+ return 0;
4251
+ }
4252
+ var pagesByLinesCount = Math.ceil(countLines(text) / 44);
4253
+ var pagesByCharactersCount = Math.ceil(countCharacters(text) / 2772);
4254
+ return Math.max(pagesByLinesCount, pagesByCharactersCount);
4298
4255
  }
4299
4256
 
4300
4257
  /**
@@ -4574,7 +4531,7 @@
4574
4531
  promptTitle: template.title,
4575
4532
  promptMessage: replaceParameters(template.description || '', parameters),
4576
4533
  defaultValue: replaceParameters(preparedContent, parameters),
4577
- // TODO: [🧠] !! Figure out how to define placeholder in .ptbk.md file
4534
+ // TODO: [🧠] !! Figure out how to define placeholder in .book.md file
4578
4535
  placeholder: undefined,
4579
4536
  priority: priority,
4580
4537
  }))];
@@ -4672,7 +4629,6 @@
4672
4629
  $ongoingTemplateResult.$resultString = extractJsonBlock($ongoingTemplateResult.$resultString || '');
4673
4630
  }
4674
4631
  catch (error) {
4675
- keepUnused(error);
4676
4632
  throw new ExpectError(spaceTrim.spaceTrim(function (block) { return "\n Expected valid JSON string\n\n ".concat(block(
4677
4633
  /*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ ''), "\n "); }));
4678
4634
  }
@@ -4850,7 +4806,6 @@
4850
4806
  function getContextForTemplate(template) {
4851
4807
  return __awaiter(this, void 0, void 0, function () {
4852
4808
  return __generator(this, function (_a) {
4853
- TODO_USE(template);
4854
4809
  return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [🏍] Implement */];
4855
4810
  });
4856
4811
  });
@@ -4863,11 +4818,9 @@
4863
4818
  */
4864
4819
  function getKnowledgeForTemplate(options) {
4865
4820
  return __awaiter(this, void 0, void 0, function () {
4866
- var preparedPipeline, template;
4821
+ var preparedPipeline;
4867
4822
  return __generator(this, function (_a) {
4868
- preparedPipeline = options.preparedPipeline, template = options.template;
4869
- // TODO: [♨] Implement Better - use real index and keyword search from `template` and {examples}
4870
- TODO_USE(template);
4823
+ preparedPipeline = options.preparedPipeline, options.template;
4871
4824
  return [2 /*return*/, preparedPipeline.knowledgePieces.map(function (_a) {
4872
4825
  var content = _a.content;
4873
4826
  return "- ".concat(content);
@@ -4884,8 +4837,6 @@
4884
4837
  function getExamplesForTemplate(template) {
4885
4838
  return __awaiter(this, void 0, void 0, function () {
4886
4839
  return __generator(this, function (_a) {
4887
- // TODO: [♨] Implement Better - use real index and keyword search
4888
- TODO_USE(template);
4889
4840
  return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: [♨] Implement */];
4890
4841
  });
4891
4842
  });
@@ -4904,13 +4855,13 @@
4904
4855
  switch (_b.label) {
4905
4856
  case 0:
4906
4857
  preparedPipeline = options.preparedPipeline, template = options.template, pipelineIdentification = options.pipelineIdentification;
4907
- return [4 /*yield*/, getContextForTemplate(template)];
4858
+ return [4 /*yield*/, getContextForTemplate()];
4908
4859
  case 1:
4909
4860
  context = _b.sent();
4910
4861
  return [4 /*yield*/, getKnowledgeForTemplate({ preparedPipeline: preparedPipeline, template: template })];
4911
4862
  case 2:
4912
4863
  knowledge = _b.sent();
4913
- return [4 /*yield*/, getExamplesForTemplate(template)];
4864
+ return [4 /*yield*/, getExamplesForTemplate()];
4914
4865
  case 3:
4915
4866
  examples = _b.sent();
4916
4867
  currentDate = new Date().toISOString();
@@ -5145,8 +5096,8 @@
5145
5096
  executionReport = {
5146
5097
  pipelineUrl: preparedPipeline.pipelineUrl,
5147
5098
  title: preparedPipeline.title,
5148
- promptbookUsedVersion: PROMPTBOOK_VERSION,
5149
- promptbookRequestedVersion: preparedPipeline.promptbookVersion,
5099
+ promptbookUsedVersion: PROMPTBOOK_ENGINE_VERSION,
5100
+ promptbookRequestedVersion: preparedPipeline.bookVersion,
5150
5101
  description: preparedPipeline.description,
5151
5102
  promptExecutions: [],
5152
5103
  };
@@ -5525,13 +5476,13 @@
5525
5476
  */
5526
5477
  MarkdownScraper.prototype.scrape = function (source) {
5527
5478
  return __awaiter(this, void 0, void 0, function () {
5528
- var _a, _b, maxParallelCount, _c, isVerbose, llm, _llms, llmTools, collection, prepareKnowledgeFromMarkdownExecutor, _d, prepareTitleExecutor, _e, prepareKeywordsExecutor, _f, knowledgeContent, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
5479
+ var _a, _c, isVerbose, llm, _llms, llmTools, collection, prepareKnowledgeFromMarkdownExecutor, _d, prepareTitleExecutor, _e, prepareKeywordsExecutor, _f, knowledgeContent, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
5529
5480
  var _g, _h, _j;
5530
5481
  var _this = this;
5531
5482
  return __generator(this, function (_k) {
5532
5483
  switch (_k.label) {
5533
5484
  case 0:
5534
- _a = this.options, _b = _a.maxParallelCount, maxParallelCount = _b === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _b, _c = _a.isVerbose, isVerbose = _c === void 0 ? DEFAULT_IS_VERBOSE : _c;
5485
+ _a = this.options, _a.maxParallelCount, _c = _a.isVerbose, isVerbose = _c === void 0 ? DEFAULT_IS_VERBOSE : _c;
5535
5486
  llm = this.tools.llm;
5536
5487
  if (llm === undefined) {
5537
5488
  throw new MissingToolsError('LLM tools are required for scraping external files');
@@ -5539,11 +5490,10 @@
5539
5490
  }
5540
5491
  _llms = arrayableToArray(llm);
5541
5492
  llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(_llms), false));
5542
- TODO_USE(maxParallelCount); // <- [🪂]
5543
5493
  collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
5544
5494
  _d = createPipelineExecutor;
5545
5495
  _g = {};
5546
- return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md')];
5496
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md')];
5547
5497
  case 1:
5548
5498
  prepareKnowledgeFromMarkdownExecutor = _d.apply(void 0, [(_g.pipeline = _k.sent(),
5549
5499
  _g.tools = {
@@ -5552,7 +5502,7 @@
5552
5502
  _g)]);
5553
5503
  _e = createPipelineExecutor;
5554
5504
  _h = {};
5555
- return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md')];
5505
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-knowledge-title.book.md')];
5556
5506
  case 2:
5557
5507
  prepareTitleExecutor = _e.apply(void 0, [(_h.pipeline = _k.sent(),
5558
5508
  _h.tools = {
@@ -5561,7 +5511,7 @@
5561
5511
  _h)]);
5562
5512
  _f = createPipelineExecutor;
5563
5513
  _j = {};
5564
- return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md')];
5514
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md')];
5565
5515
  case 3:
5566
5516
  prepareKeywordsExecutor = _f.apply(void 0, [(_j.pipeline = _k.sent(),
5567
5517
  _j.tools = {
@@ -5722,7 +5672,6 @@
5722
5672
  PdfScraper.prototype.$convert = function (source) {
5723
5673
  return __awaiter(this, void 0, void 0, function () {
5724
5674
  return __generator(this, function (_a) {
5725
- TODO_USE(source);
5726
5675
  TODO_USE(this.options);
5727
5676
  throw new NotYetImplementedError('PDF conversion not yet implemented');
5728
5677
  });
@@ -5734,7 +5683,6 @@
5734
5683
  PdfScraper.prototype.scrape = function (source) {
5735
5684
  return __awaiter(this, void 0, void 0, function () {
5736
5685
  return __generator(this, function (_a) {
5737
- TODO_USE(source);
5738
5686
  TODO_USE(this.options);
5739
5687
  /*
5740
5688
  const {
@@ -5780,7 +5728,8 @@
5780
5728
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
5781
5729
  */
5782
5730
 
5783
- exports.PROMPTBOOK_VERSION = PROMPTBOOK_VERSION;
5731
+ exports.BOOK_LANGUAGE_VERSION = BOOK_LANGUAGE_VERSION;
5732
+ exports.PROMPTBOOK_ENGINE_VERSION = PROMPTBOOK_ENGINE_VERSION;
5784
5733
  exports.PdfScraper = PdfScraper;
5785
5734
  exports._PdfScraperRegistration = _PdfScraperRegistration;
5786
5735
  exports.createPdfScraper = createPdfScraper;