@promptbook/cli 0.61.0-10 → 0.61.0-12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (207) hide show
  1. package/esm/index.es.js +760 -475
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +83 -2
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/types.index.d.ts +2 -2
  6. package/esm/typings/src/_packages/utils.index.d.ts +3 -1
  7. package/esm/typings/src/cli/cli-commands/hello.d.ts +3 -0
  8. package/esm/typings/src/cli/cli-commands/make.d.ts +3 -0
  9. package/esm/typings/src/cli/cli-commands/prettify.d.ts +3 -0
  10. package/esm/typings/src/cli/promptbookCli.d.ts +1 -0
  11. package/esm/typings/src/collection/PipelineCollection.d.ts +1 -1
  12. package/esm/typings/src/collection/SimplePipelineCollection.d.ts +1 -1
  13. package/esm/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +1 -0
  14. package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -0
  15. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +4 -1
  16. package/esm/typings/src/config.d.ts +4 -0
  17. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +1 -1
  18. package/esm/typings/src/conversion/pipelineStringToJson.d.ts +1 -0
  19. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +2 -0
  20. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +6 -0
  21. package/esm/typings/src/errors/EnvironmentMismatchError.d.ts +7 -0
  22. package/{umd/typings/src/errors/VersionMismatch.d.ts → esm/typings/src/errors/VersionMismatchError.d.ts} +1 -1
  23. package/esm/typings/src/execution/CommonExecutionToolsOptions.d.ts +1 -0
  24. package/esm/typings/src/execution/LlmExecutionTools.d.ts +3 -1
  25. package/esm/typings/src/execution/PipelineExecutor.d.ts +4 -3
  26. package/esm/typings/src/execution/PromptResult.d.ts +12 -0
  27. package/esm/typings/src/execution/ScriptExecutionTools.d.ts +4 -2
  28. package/esm/typings/src/execution/createPipelineExecutor.d.ts +2 -0
  29. package/esm/typings/src/formats/csv/ListFormatDefinition.d.ts +1 -0
  30. package/esm/typings/src/formats/json/JsonFormatDefinition.d.ts +1 -0
  31. package/esm/typings/src/formats/list/ListFormatDefinition.d.ts +1 -0
  32. package/esm/typings/src/formats/xml/XmlFormatDefinition.d.ts +1 -0
  33. package/esm/typings/src/knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +3 -0
  34. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.d.ts +1 -1
  35. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -0
  36. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +3 -0
  37. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -0
  38. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +22 -0
  39. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -0
  40. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -0
  41. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +29 -0
  42. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +10 -0
  43. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +16 -0
  44. package/esm/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +11 -0
  45. package/esm/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts +14 -0
  46. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  47. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  48. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +3 -0
  49. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -0
  50. package/esm/typings/src/llm-providers/azure-openai/playground/playground.d.ts +3 -0
  51. package/esm/typings/src/llm-providers/langtail/LangtailExecutionTools.d.ts +3 -0
  52. package/esm/typings/src/llm-providers/langtail/LangtailExecutionToolsOptions.d.ts +1 -1
  53. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  54. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  55. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +3 -1
  56. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +1 -0
  57. package/esm/typings/src/llm-providers/openai/computeUsage.d.ts +2 -2
  58. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -0
  59. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +3 -0
  60. package/esm/typings/src/personas/preparePersona.d.ts +4 -2
  61. package/esm/typings/src/prepare/preparePipeline.d.ts +3 -1
  62. package/esm/typings/src/prepare/unpreparePipeline.d.ts +1 -0
  63. package/esm/typings/src/storage/_common/PromptbookStorage.d.ts +25 -0
  64. package/esm/typings/src/storage/_common/PromptbookStorage.test-type.d.ts +5 -0
  65. package/esm/typings/src/storage/files-storage/FilesStorage.d.ts +30 -0
  66. package/esm/typings/src/storage/files-storage/FilesStorageOptions.d.ts +13 -0
  67. package/esm/typings/src/storage/files-storage/utils/nameToSubfolderPath.d.ts +7 -0
  68. package/esm/typings/src/storage/files-storage/utils/nameToSubfolderPath.test.d.ts +1 -0
  69. package/esm/typings/src/storage/local-storage/getLocalStorage.d.ts +9 -0
  70. package/esm/typings/src/storage/local-storage/getSessionStorage.d.ts +9 -0
  71. package/esm/typings/src/storage/memory/MemoryStorage.d.ts +34 -0
  72. package/esm/typings/src/storage/utils/PrefixStorage.d.ts +26 -0
  73. package/esm/typings/src/storage/utils/makePromptbookStorageFromWebStorage.d.ts +11 -0
  74. package/esm/typings/src/types/ModelRequirements.d.ts +21 -1
  75. package/esm/typings/src/types/Parameters.d.ts +0 -1
  76. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -2
  77. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -5
  78. package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +1 -0
  79. package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +11 -2
  80. package/esm/typings/src/types/PipelineString.d.ts +1 -1
  81. package/esm/typings/src/types/Prompt.d.ts +5 -2
  82. package/esm/typings/src/types/execution-report/countWorkingDuration.d.ts +1 -1
  83. package/esm/typings/src/types/typeAliases.d.ts +35 -1
  84. package/esm/typings/src/utils/emojis.d.ts +2 -2
  85. package/esm/typings/src/utils/formatNumber.d.ts +1 -1
  86. package/esm/typings/src/utils/isRunningInWhatever.d.ts +3 -0
  87. package/esm/typings/src/utils/markdown/addAutoGeneratedSection.d.ts +1 -1
  88. package/esm/typings/src/utils/markdown/createMarkdownChart.d.ts +1 -1
  89. package/esm/typings/src/utils/markdown/createMarkdownTable.d.ts +1 -1
  90. package/esm/typings/src/utils/markdown/extractAllBlocksFromMarkdown-real.test.d.ts +1 -0
  91. package/esm/typings/src/utils/markdown/extractAllBlocksFromMarkdown.d.ts +7 -0
  92. package/esm/typings/src/utils/markdown/extractOneBlockFromMarkdown.d.ts +1 -1
  93. package/esm/typings/src/utils/organization/TODO.d.ts +2 -0
  94. package/esm/typings/src/utils/organization/TODO_USE.d.ts +1 -0
  95. package/esm/typings/src/utils/organization/___.d.ts +2 -0
  96. package/esm/typings/src/utils/organization/just.d.ts +1 -0
  97. package/esm/typings/src/utils/organization/keepImported.d.ts +12 -0
  98. package/esm/typings/src/utils/organization/notUsing.d.ts +12 -0
  99. package/esm/typings/src/utils/organization/really_any.d.ts +2 -0
  100. package/esm/typings/src/utils/random/randomSeed.d.ts +7 -0
  101. package/esm/typings/src/version.d.ts +3 -0
  102. package/package.json +2 -3
  103. package/umd/index.umd.js +763 -477
  104. package/umd/index.umd.js.map +1 -1
  105. package/umd/typings/promptbook-collection/index.d.ts +83 -2
  106. package/umd/typings/src/_packages/core.index.d.ts +2 -2
  107. package/umd/typings/src/_packages/types.index.d.ts +2 -2
  108. package/umd/typings/src/_packages/utils.index.d.ts +3 -1
  109. package/umd/typings/src/cli/cli-commands/hello.d.ts +3 -0
  110. package/umd/typings/src/cli/cli-commands/make.d.ts +3 -0
  111. package/umd/typings/src/cli/cli-commands/prettify.d.ts +3 -0
  112. package/umd/typings/src/cli/promptbookCli.d.ts +1 -0
  113. package/umd/typings/src/collection/PipelineCollection.d.ts +1 -1
  114. package/umd/typings/src/collection/SimplePipelineCollection.d.ts +1 -1
  115. package/umd/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +1 -0
  116. package/umd/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -0
  117. package/umd/typings/src/commands/_common/types/CommandParser.d.ts +4 -1
  118. package/umd/typings/src/config.d.ts +4 -0
  119. package/umd/typings/src/conversion/pipelineJsonToString.d.ts +1 -1
  120. package/umd/typings/src/conversion/pipelineStringToJson.d.ts +1 -0
  121. package/umd/typings/src/conversion/pipelineStringToJsonSync.d.ts +2 -0
  122. package/umd/typings/src/conversion/validation/validatePipeline.d.ts +6 -0
  123. package/umd/typings/src/errors/EnvironmentMismatchError.d.ts +7 -0
  124. package/{esm/typings/src/errors/VersionMismatch.d.ts → umd/typings/src/errors/VersionMismatchError.d.ts} +1 -1
  125. package/umd/typings/src/execution/CommonExecutionToolsOptions.d.ts +1 -0
  126. package/umd/typings/src/execution/LlmExecutionTools.d.ts +3 -1
  127. package/umd/typings/src/execution/PipelineExecutor.d.ts +4 -3
  128. package/umd/typings/src/execution/PromptResult.d.ts +12 -0
  129. package/umd/typings/src/execution/ScriptExecutionTools.d.ts +4 -2
  130. package/umd/typings/src/execution/createPipelineExecutor.d.ts +2 -0
  131. package/umd/typings/src/formats/csv/ListFormatDefinition.d.ts +1 -0
  132. package/umd/typings/src/formats/json/JsonFormatDefinition.d.ts +1 -0
  133. package/umd/typings/src/formats/list/ListFormatDefinition.d.ts +1 -0
  134. package/umd/typings/src/formats/xml/XmlFormatDefinition.d.ts +1 -0
  135. package/umd/typings/src/knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +3 -0
  136. package/umd/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.d.ts +1 -1
  137. package/umd/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -0
  138. package/umd/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +3 -0
  139. package/umd/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -0
  140. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +22 -0
  141. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -0
  142. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -0
  143. package/umd/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +29 -0
  144. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +10 -0
  145. package/umd/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +16 -0
  146. package/umd/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +11 -0
  147. package/umd/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts +14 -0
  148. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  149. package/umd/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  150. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +3 -0
  151. package/umd/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -0
  152. package/umd/typings/src/llm-providers/azure-openai/playground/playground.d.ts +3 -0
  153. package/umd/typings/src/llm-providers/langtail/LangtailExecutionTools.d.ts +3 -0
  154. package/umd/typings/src/llm-providers/langtail/LangtailExecutionToolsOptions.d.ts +1 -1
  155. package/umd/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  156. package/umd/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  157. package/umd/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +3 -1
  158. package/umd/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +1 -0
  159. package/umd/typings/src/llm-providers/openai/computeUsage.d.ts +2 -2
  160. package/umd/typings/src/llm-providers/openai/openai-models.d.ts +1 -0
  161. package/umd/typings/src/llm-providers/openai/playground/playground.d.ts +3 -0
  162. package/umd/typings/src/personas/preparePersona.d.ts +4 -2
  163. package/umd/typings/src/prepare/preparePipeline.d.ts +3 -1
  164. package/umd/typings/src/prepare/unpreparePipeline.d.ts +1 -0
  165. package/umd/typings/src/storage/_common/PromptbookStorage.d.ts +25 -0
  166. package/umd/typings/src/storage/_common/PromptbookStorage.test-type.d.ts +5 -0
  167. package/umd/typings/src/storage/files-storage/FilesStorage.d.ts +30 -0
  168. package/umd/typings/src/storage/files-storage/FilesStorageOptions.d.ts +13 -0
  169. package/umd/typings/src/storage/files-storage/utils/nameToSubfolderPath.d.ts +7 -0
  170. package/umd/typings/src/storage/files-storage/utils/nameToSubfolderPath.test.d.ts +1 -0
  171. package/umd/typings/src/storage/local-storage/getLocalStorage.d.ts +9 -0
  172. package/umd/typings/src/storage/local-storage/getSessionStorage.d.ts +9 -0
  173. package/umd/typings/src/storage/memory/MemoryStorage.d.ts +34 -0
  174. package/umd/typings/src/storage/utils/PrefixStorage.d.ts +26 -0
  175. package/umd/typings/src/storage/utils/makePromptbookStorageFromWebStorage.d.ts +11 -0
  176. package/umd/typings/src/types/ModelRequirements.d.ts +21 -1
  177. package/umd/typings/src/types/Parameters.d.ts +0 -1
  178. package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -2
  179. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -5
  180. package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +1 -0
  181. package/umd/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +11 -2
  182. package/umd/typings/src/types/PipelineString.d.ts +1 -1
  183. package/umd/typings/src/types/Prompt.d.ts +5 -2
  184. package/umd/typings/src/types/execution-report/countWorkingDuration.d.ts +1 -1
  185. package/umd/typings/src/types/typeAliases.d.ts +35 -1
  186. package/umd/typings/src/utils/emojis.d.ts +2 -2
  187. package/umd/typings/src/utils/formatNumber.d.ts +1 -1
  188. package/umd/typings/src/utils/isRunningInWhatever.d.ts +3 -0
  189. package/umd/typings/src/utils/markdown/addAutoGeneratedSection.d.ts +1 -1
  190. package/umd/typings/src/utils/markdown/createMarkdownChart.d.ts +1 -1
  191. package/umd/typings/src/utils/markdown/createMarkdownTable.d.ts +1 -1
  192. package/umd/typings/src/utils/markdown/extractAllBlocksFromMarkdown-real.test.d.ts +1 -0
  193. package/umd/typings/src/utils/markdown/extractAllBlocksFromMarkdown.d.ts +7 -0
  194. package/umd/typings/src/utils/markdown/extractOneBlockFromMarkdown.d.ts +1 -1
  195. package/umd/typings/src/utils/organization/TODO.d.ts +2 -0
  196. package/umd/typings/src/utils/organization/TODO_USE.d.ts +1 -0
  197. package/umd/typings/src/utils/organization/___.d.ts +2 -0
  198. package/umd/typings/src/utils/organization/just.d.ts +1 -0
  199. package/umd/typings/src/utils/organization/keepImported.d.ts +12 -0
  200. package/umd/typings/src/utils/organization/notUsing.d.ts +12 -0
  201. package/umd/typings/src/utils/organization/really_any.d.ts +2 -0
  202. package/umd/typings/src/utils/random/randomSeed.d.ts +7 -0
  203. package/umd/typings/src/version.d.ts +3 -0
  204. package/esm/typings/src/collection/constructors/justTestFsImport.d.ts +0 -7
  205. package/esm/typings/src/knowledge/prepare-knowledge/_common/utils/getLlmToolsForTests.d.ts +0 -7
  206. package/umd/typings/src/collection/constructors/justTestFsImport.d.ts +0 -7
  207. package/umd/typings/src/knowledge/prepare-knowledge/_common/utils/getLlmToolsForTests.d.ts +0 -7
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('@anthropic-ai/sdk'), require('lorem-ipsum'), require('openai'), require('glob-promise')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', '@anthropic-ai/sdk', 'lorem-ipsum', 'openai', 'glob-promise'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.Anthropic, global.loremIpsum, global.OpenAI, global.glob));
5
- })(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, Anthropic, loremIpsum, OpenAI, glob) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('@anthropic-ai/sdk'), require('openai'), require('glob-promise')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', '@anthropic-ai/sdk', 'openai', 'glob-promise'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.Anthropic, global.OpenAI, global.glob));
5
+ })(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, Anthropic, OpenAI, glob) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -10,6 +10,8 @@
10
10
  var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
11
11
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
12
12
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
13
+ var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
14
+ var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
13
15
  var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
14
16
  var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
15
17
  var glob__default = /*#__PURE__*/_interopDefaultLegacy(glob);
@@ -145,12 +147,18 @@
145
147
  * Detects if the code is running in a web worker
146
148
  */
147
149
  new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {\n return true;\n } else {\n return false;\n }\n } catch (e) {\n return false;\n }\n");
150
+ /**
151
+ * TODO: [🔼] !!! Export via `@promptbook/utils`
152
+ */
148
153
 
149
154
  /**
150
155
  * The version of the Promptbook library
151
156
  */
152
- var PROMPTBOOK_VERSION = '0.61.0-9';
157
+ var PROMPTBOOK_VERSION = '0.61.0-11';
153
158
  // TODO: !!!! List here all the versions and annotate + put into script
159
+ /**
160
+ * TODO: [🔼] !!! Export via `@promptbook/code`
161
+ */
154
162
 
155
163
  /**
156
164
  * Initializes testing `hello` command for Promptbook CLI utilities
@@ -181,6 +189,9 @@
181
189
  });
182
190
  });
183
191
  }
192
+ /**
193
+ * Note: [🟡] This code should never be published outside of `@promptbook/cli`
194
+ */
184
195
 
185
196
  /**
186
197
  * Converts PipelineCollection to serialized JSON
@@ -211,14 +222,14 @@
211
222
  * The maximum number of iterations for a loops
212
223
  */
213
224
  var LOOP_LIMIT = 1000;
214
- /**
215
- * The maximum number of iterations for a loops which adds characters one by one
216
- */
217
- var CHARACTER_LOOP_LIMIT = 100000;
218
225
  /**
219
226
  * The maximum number of (LLM) tasks running in parallel
220
227
  */
221
228
  var MAX_PARALLEL_COUNT = 5;
229
+ /**
230
+ * The maximum length of the (generated) filename
231
+ */
232
+ var MAX_FILENAME_LENGTH = 30;
222
233
  /**
223
234
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
224
235
  */
@@ -231,6 +242,10 @@
231
242
  // <- TODO: Add more like 'date', 'modelName',...
232
243
  // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
233
244
  ];
245
+ /*
246
+ TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
247
+ */
248
+ // [🟡][🟢][🔵][⚪]
234
249
 
235
250
  /**
236
251
  * Function `addUsage` will add multiple usages into one
@@ -397,7 +412,7 @@
397
412
  });
398
413
  }
399
414
 
400
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-9",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{name:"knowledge",title:"Knowledge",dependentParameterNames:["content"],blockType:"PROMPT_TEMPLATE",personaName:null,modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,date:"2024-07-29T00:04:31.966Z",promptbookVersion:"0.61.0-9",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-9",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{name:"knowledge",title:"Knowledge",dependentParameterNames:["content"],blockType:"PROMPT_TEMPLATE",personaName:null,modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,date:"2024-07-29T00:04:31.970Z",promptbookVersion:"0.61.0-9",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-9",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{name:"knowledge",title:"Knowledge",dependentParameterNames:["content"],blockType:"PROMPT_TEMPLATE",expectations:{words:{min:1,max:8}},personaName:null,modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,date:"2024-07-29T00:04:31.976Z",promptbookVersion:"0.61.0-9",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"}];
415
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-11",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{name:"knowledge",title:"Knowledge",dependentParameterNames:["content"],blockType:"PROMPT_TEMPLATE",personaName:null,modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-11",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-11",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{name:"knowledge",title:"Knowledge",dependentParameterNames:["content"],blockType:"PROMPT_TEMPLATE",personaName:null,modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-11",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-11",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{name:"knowledge",title:"Knowledge",dependentParameterNames:["content"],blockType:"PROMPT_TEMPLATE",expectations:{words:{min:1,max:8}},personaName:null,modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-11",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-11",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{name:"make-model-requirements",title:"Make modelRequirements",dependentParameterNames:["availableModelNames","personaDescription"],blockType:"PROMPT_TEMPLATE",expectFormat:"JSON",personaName:null,modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-11",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
401
416
 
402
417
  /**
403
418
  * Prettify the html code
@@ -637,7 +652,7 @@
637
652
  return parameterString;
638
653
  }
639
654
  /**
640
- * TODO: !!!!! Implement new features and commands into `promptTemplateParameterJsonToString`
655
+ * TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
641
656
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
642
657
  * TODO: Escape all
643
658
  */
@@ -1014,6 +1029,12 @@
1014
1029
  * > * - ...
1015
1030
  * > ex port function validatePipeline(promptbook: unknown): asserts promptbook is PipelineJson {
1016
1031
  */
1032
+ /**
1033
+ * TODO: [🧠][🐣] !!!! Validate that all samples match expectations
1034
+ * TODO: [🧠][🐣] !!!! Validate that knowledge is valid (non-void)
1035
+ * TODO: [🧠] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
1036
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1037
+ */
1017
1038
 
1018
1039
  /**
1019
1040
  * This error indicates that promptbook not found in the collection
@@ -1054,7 +1075,7 @@
1054
1075
  /**
1055
1076
  * Constructs a pipeline collection from promptbooks
1056
1077
  *
1057
- * @param promptbooks !!!
1078
+ * @param promptbooks @@@
1058
1079
  *
1059
1080
  * @private Use instead `createCollectionFromJson`
1060
1081
  * Note: During the construction logic of all promptbooks are validated
@@ -1695,7 +1716,7 @@
1695
1716
  };
1696
1717
  /**
1697
1718
  * List all available models that can be used
1698
- * This liost is a combination of all available models from all execution tools
1719
+ * This lists is a combination of all available models from all execution tools
1699
1720
  */
1700
1721
  MultipleLlmExecutionTools.prototype.listModels = function () {
1701
1722
  return __awaiter(this, void 0, void 0, function () {
@@ -1741,6 +1762,8 @@
1741
1762
  }());
1742
1763
  /**
1743
1764
  * TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first aviable model BUT all of them
1765
+ * TODO: [🏖] If no llmTools have for example not defined `callCompletionModel` this will still return object with defined `callCompletionModel` which just throws `PipelineExecutionError`, make it undefined instead
1766
+ * Look how `countTotalUsage` (and `cacheLlmTools`) implements it
1744
1767
  */
1745
1768
 
1746
1769
  /**
@@ -1914,27 +1937,6 @@
1914
1937
  finally { if (e_1) throw e_1.error; }
1915
1938
  }
1916
1939
  }
1917
- /**
1918
- * Function checkExpectations will check if the expectations on given value are met
1919
- *
1920
- * Note: There are two simmilar functions:
1921
- * - `checkExpectations` which throws an error if the expectations are not met
1922
- * - `isPassingExpectations` which returns a boolean
1923
- *
1924
- * @returns {boolean} True if the expectations are met
1925
- */
1926
- function isPassingExpectations(expectations, value) {
1927
- try {
1928
- checkExpectations(expectations, value);
1929
- return true;
1930
- }
1931
- catch (error) {
1932
- if (!(error instanceof ExpectError)) {
1933
- throw error;
1934
- }
1935
- return false;
1936
- }
1937
- }
1938
1940
  /**
1939
1941
  * TODO: [💝] Unite object for expecting amount and format
1940
1942
  */
@@ -2009,7 +2011,6 @@
2009
2011
  var _this = this;
2010
2012
  var pipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2011
2013
  var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? 3 : _b;
2012
- // TODO: !!!!! Implement new commands
2013
2014
  validatePipeline(pipeline);
2014
2015
  var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
2015
2016
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
@@ -2085,7 +2086,10 @@
2085
2086
  : 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
2086
2087
  parameters: parametersToPass,
2087
2088
  content: replaceParameters(currentTemplate.content, parametersToPass) /* <- [2] */,
2089
+ // <- TODO: !!!!! Apply {context} and knowledges
2090
+ // <- TODO: !!!!! Apply samples
2088
2091
  modelRequirements: currentTemplate.modelRequirements,
2092
+ // <- TODO: !!!!! Apply persona
2089
2093
  expectations: currentTemplate.expectations,
2090
2094
  expectFormat: currentTemplate.expectFormat,
2091
2095
  postprocessing: (currentTemplate.postprocessing || []).map(function (functionName) { return function (result) { return __awaiter(_this, void 0, void 0, function () {
@@ -2532,10 +2536,12 @@
2532
2536
  }
2533
2537
  /**
2534
2538
  * TODO: [🪂] Pass maxParallelCount here
2539
+ * TODO: [♈] Probbably move expectations from templates to parameters
2535
2540
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
2536
2541
  * TODO: [👧] Strongly type the executors to avoid need of remove nullables whtn noUncheckedIndexedAccess in tsconfig.json
2537
2542
  * Note: CreatePipelineExecutorOptions are just connected to PipelineExecutor so do not extract to types folder
2538
2543
  * TODO: [🧠][3] transparent = (report intermediate parameters) / opaque execution = (report only output parameters) progress reporting mode
2544
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
2539
2545
  */
2540
2546
 
2541
2547
  /**
@@ -2546,6 +2552,7 @@
2546
2552
  *
2547
2553
  * @param value any values
2548
2554
  * @returns void
2555
+ * @private within the repository
2549
2556
  */
2550
2557
  function TODO_USE() {
2551
2558
  var value = [];
@@ -2553,7 +2560,6 @@
2553
2560
  value[_i] = arguments[_i];
2554
2561
  }
2555
2562
  }
2556
- // TODO: !!!! Find ACRY all just(...) and replace with TODO_USE
2557
2563
 
2558
2564
  /**
2559
2565
  * @@@
@@ -2682,6 +2688,7 @@
2682
2688
  });
2683
2689
  }
2684
2690
  /**
2691
+ * TODO: [🔼] !!! Export via `@promptbook/markdown`
2685
2692
  * TODO: [🪂] Do it in parallel 11:11
2686
2693
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
2687
2694
  */
@@ -2690,7 +2697,6 @@
2690
2697
  * Prepares the knowle
2691
2698
  *
2692
2699
  * @see https://github.com/webgptorg/promptbook/discussions/41
2693
- * @private within the package
2694
2700
  */
2695
2701
  function prepareKnowledgePieces(knowledgeSources, options) {
2696
2702
  return __awaiter(this, void 0, void 0, function () {
@@ -2705,7 +2711,7 @@
2705
2711
  var partialPieces, pieces;
2706
2712
  return __generator(this, function (_a) {
2707
2713
  switch (_a.label) {
2708
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.source, // <- TODO: !!!!! Unhardcode markdown, detect which type it is
2714
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.source, // <- TODO: [🐝] !!!!! Unhardcode markdown, detect which type it is
2709
2715
  options)];
2710
2716
  case 1:
2711
2717
  partialPieces = _a.sent();
@@ -2743,6 +2749,7 @@
2743
2749
  > ):
2744
2750
  */
2745
2751
  /**
2752
+ * TODO: [🔼] !!! Export via `@promptbook/core`
2746
2753
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
2747
2754
  * Put `knowledgePieces` into `PrepareKnowledgeOptions`
2748
2755
  * TODO: [🪂] More than max things can run in parallel by acident [1,[2a,2b,_],[3a,3b,_]]
@@ -2755,36 +2762,65 @@
2755
2762
  * Prepares the persona for the pipeline
2756
2763
  *
2757
2764
  * @see https://github.com/webgptorg/promptbook/discussions/22
2758
- * @private within the package
2759
2765
  */
2760
2766
  function preparePersona(personaDescription, options) {
2761
2767
  return __awaiter(this, void 0, void 0, function () {
2762
- var llmTools, _a, maxParallelCount, _b, isVerbose;
2763
- return __generator(this, function (_c) {
2764
- llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
2765
- TODO_USE(maxParallelCount); // <- [🪂]
2766
- TODO_USE(personaDescription); // <- !!!!!
2767
- TODO_USE(llmTools); // <- !!!!!
2768
- TODO_USE(isVerbose); // <- !!!!!
2769
- return [2 /*return*/, {
2770
- modelVariant: 'CHAT',
2771
- modelName: 'gpt-4',
2772
- }];
2768
+ var llmTools, _a, isVerbose, collection, preparePersonaExecutor, _b, availableModels, availableModelNames, result, outputParameters, modelRequirementsRaw, modelRequirements, modelName, systemMessage, temperature;
2769
+ var _c;
2770
+ return __generator(this, function (_d) {
2771
+ switch (_d.label) {
2772
+ case 0:
2773
+ llmTools = options.llmTools, _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
2774
+ collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
2775
+ _b = createPipelineExecutor;
2776
+ _c = {};
2777
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.ptbk.md')];
2778
+ case 1:
2779
+ preparePersonaExecutor = _b.apply(void 0, [(_c.pipeline = _d.sent(),
2780
+ _c.tools = {
2781
+ llm: llmTools,
2782
+ },
2783
+ _c)]);
2784
+ return [4 /*yield*/, llmTools.listModels()];
2785
+ case 2:
2786
+ availableModels = _d.sent();
2787
+ availableModelNames = availableModels
2788
+ .filter(function (_a) {
2789
+ var modelVariant = _a.modelVariant;
2790
+ return modelVariant === 'CHAT';
2791
+ })
2792
+ .map(function (_a) {
2793
+ var modelName = _a.modelName;
2794
+ return modelName;
2795
+ })
2796
+ .join(',');
2797
+ return [4 /*yield*/, preparePersonaExecutor({ availableModelNames: availableModelNames, personaDescription: personaDescription })];
2798
+ case 3:
2799
+ result = _d.sent();
2800
+ assertsExecutionSuccessful(result);
2801
+ outputParameters = result.outputParameters;
2802
+ modelRequirementsRaw = outputParameters.modelRequirements;
2803
+ modelRequirements = JSON.parse(modelRequirementsRaw);
2804
+ if (isVerbose) {
2805
+ console.info("PERSONA ".concat(personaDescription), modelRequirements);
2806
+ }
2807
+ modelName = modelRequirements.modelName, systemMessage = modelRequirements.systemMessage, temperature = modelRequirements.temperature;
2808
+ return [2 /*return*/, {
2809
+ modelVariant: 'CHAT',
2810
+ modelName: modelName,
2811
+ systemMessage: systemMessage,
2812
+ temperature: temperature,
2813
+ }];
2814
+ }
2773
2815
  });
2774
2816
  });
2775
2817
  }
2776
2818
  /**
2777
- * TODO: [🪂] Do it in parallel
2778
- */
2779
-
2780
- /**
2781
- * Simple wrapper `new Date().toISOString()`
2782
- *
2783
- * @returns string_date branded type
2819
+ * TODO: [🔼] !!! Export via `@promptbook/core`
2820
+ * TODO: [🏢] !! Check validity of `modelName` in pipeline
2821
+ * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
2822
+ * TODO: [🏢] !! Check validity of `temperature` in pipeline
2784
2823
  */
2785
- function $currentDate() {
2786
- return new Date().toISOString();
2787
- }
2788
2824
 
2789
2825
  /**
2790
2826
  * Prepare pipeline from string (markdown) format to JSON format
@@ -2808,7 +2844,7 @@
2808
2844
  knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
2809
2845
  currentPreparation = {
2810
2846
  id: 1,
2811
- date: $currentDate(),
2847
+ // TODO: [🍥]> date: $currentDate(),
2812
2848
  promptbookVersion: PROMPTBOOK_VERSION,
2813
2849
  modelUsage: addUsage(),
2814
2850
  };
@@ -2817,8 +2853,8 @@
2817
2853
  // <- TODO: [🧊]
2818
2854
  currentPreparation,
2819
2855
  ];
2820
- preparedPersonas = [];
2821
- return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona) { return __awaiter(_this, void 0, void 0, function () {
2856
+ preparedPersonas = new Array(personas.length);
2857
+ return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
2822
2858
  var modelRequirements, preparedPersona;
2823
2859
  return __generator(this, function (_a) {
2824
2860
  switch (_a.label) {
@@ -2826,7 +2862,7 @@
2826
2862
  case 1:
2827
2863
  modelRequirements = _a.sent();
2828
2864
  preparedPersona = __assign(__assign({}, persona), { modelRequirements: modelRequirements, preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] });
2829
- preparedPersonas.push(preparedPersona);
2865
+ preparedPersonas[index] = preparedPersona;
2830
2866
  return [2 /*return*/];
2831
2867
  }
2832
2868
  });
@@ -2845,11 +2881,186 @@
2845
2881
  });
2846
2882
  }
2847
2883
  /**
2884
+ * TODO: [🔼] !!! Export via `@promptbook/core`
2848
2885
  * TODO: Write tests for `preparePipeline`
2849
2886
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
2850
2887
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
2851
- * TODO: [💸] Make utilities `interceptLlmTools` and `costLlmTools` to compute cost and DO put this counting logic in `prepareKnowledge` or `preparePersona`
2888
+ * TODO: [🎐] !!!! Use here countTotalUsage
2889
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
2890
+ */
2891
+
2892
+ /**
2893
+ * Tests if given string is valid URL.
2894
+ *
2895
+ * Note: This does not check if the file exists only if the path is valid
2896
+ */
2897
+ function isValidFilePath(filePath) {
2898
+ if (typeof filePath !== 'string') {
2899
+ return false;
2900
+ }
2901
+ var filePathSlashes = filePath.split('\\').join('/');
2902
+ // Absolute Unix path: /hello.txt
2903
+ if (/^(\/)/i.test(filePathSlashes)) {
2904
+ return true;
2905
+ }
2906
+ // Absolute Windows path: /hello.txt
2907
+ if (/^([A-Z]{1,2}:\/?)\//i.test(filePathSlashes)) {
2908
+ return true;
2909
+ }
2910
+ // Relative path: ./hello.txt
2911
+ if (/^(\.\.?\/)+/i.test(filePathSlashes)) {
2912
+ return true;
2913
+ }
2914
+ return false;
2915
+ }
2916
+
2917
+ /**
2918
+ * Parses the knowledge command
2919
+ *
2920
+ * @see ./KNOWLEDGE-README.md for more details
2921
+ * @private within the commands folder
2922
+ */
2923
+ var knowledgeCommandParser = {
2924
+ /**
2925
+ * Name of the command
2926
+ */
2927
+ name: 'KNOWLEDGE',
2928
+ /**
2929
+ * BOILERPLATE command can be used in:
2930
+ */
2931
+ usagePlaces: ['PIPELINE_HEAD'],
2932
+ /**
2933
+ * Description of the KNOWLEDGE command
2934
+ */
2935
+ description: "Tells promptbook which external knowledge to use",
2936
+ /**
2937
+ * Link to discussion
2938
+ */
2939
+ discussionUrl: 'https://github.com/webgptorg/promptbook/discussions/41',
2940
+ /**
2941
+ * Example usages of the KNOWLEDGE command
2942
+ */
2943
+ examples: [
2944
+ 'KNOWLEDGE https://www.pavolhejny.com/',
2945
+ 'KNOWLEDGE ./hejny-cv.txt',
2946
+ 'KNOWLEDGE ./hejny-cv.md',
2947
+ 'KNOWLEDGE ./hejny-cv.pdf',
2948
+ 'KNOWLEDGE ./hejny-cv.docx',
2949
+ ],
2950
+ /**
2951
+ * Parses the KNOWLEDGE command
2952
+ */
2953
+ parse: function (input) {
2954
+ var args = input.args;
2955
+ var source = args[0];
2956
+ if (source === undefined) {
2957
+ throw new ParsingError("Source is not defined");
2958
+ }
2959
+ if (source.startsWith('http://')) {
2960
+ throw new ParsingError("Source is not secure");
2961
+ }
2962
+ if (!(isValidFilePath(source) || isValidUrl(source))) {
2963
+ throw new ParsingError("Source not valid");
2964
+ }
2965
+ if (source.startsWith('../') || source.startsWith('/') || /^[A-Z]:[\\/]+/i.test(source)) {
2966
+ throw new ParsingError("Source cannot be outside of the .ptbk.md folder");
2967
+ }
2968
+ return {
2969
+ type: 'KNOWLEDGE',
2970
+ source: source,
2971
+ };
2972
+ },
2973
+ /**
2974
+ * Note: Prototype of [🍧] (remove this comment after full implementation)
2975
+ */
2976
+ applyToPipelineJson: function (pipelineJson, personaCommand) {
2977
+ var source = personaCommand.source;
2978
+ var name = titleToName(source);
2979
+ pipelineJson.knowledgeSources.push({
2980
+ name: name,
2981
+ source: source,
2982
+ });
2983
+ },
2984
+ };
2985
+
2986
+ /**
2987
+ * Parses the persona command
2988
+ *
2989
+ * @see ./PERSONA-README.md for more details
2990
+ * @private within the commands folder
2852
2991
  */
2992
+ var personaCommandParser = {
2993
+ /**
2994
+ * Name of the command
2995
+ */
2996
+ name: 'PERSONA',
2997
+ /**
2998
+ * Aliases for the PERSONA command
2999
+ */
3000
+ aliasNames: ['PERSON'],
3001
+ /**
3002
+ * PERSONA command can be used in:
3003
+ */
3004
+ usagePlaces: ['PIPELINE_HEAD', 'PIPELINE_TEMPLATE'],
3005
+ /**
3006
+ * Description of the PERSONA command
3007
+ */
3008
+ description: "Persona command is used to specify who the system is, it will be transformed into system message, top_t,...",
3009
+ /**
3010
+ * Link to discussion
3011
+ */
3012
+ discussionUrl: 'https://github.com/webgptorg/promptbook/discussions/22',
3013
+ /**
3014
+ * Example usages of the PERSONA command
3015
+ */
3016
+ examples: ['PERSONA Jane, skilled copywriter', 'PERSONA Joe, male 28 years old, programmer'],
3017
+ /**
3018
+ * Parses the PERSONA command
3019
+ */
3020
+ parse: function (input) {
3021
+ var rawArgs = input.rawArgs;
3022
+ var _a = __read(rawArgs.split(/[,;:]/, 2), 2), personaNameRaw = _a[0], personaDescriptionRaw = _a[1];
3023
+ var personaName = (personaNameRaw || '').trim();
3024
+ if (personaName === '') {
3025
+ throw new ParsingError("You must set name for the persona");
3026
+ }
3027
+ var personaDescription = (personaDescriptionRaw || '').trim();
3028
+ if (personaDescription === '') {
3029
+ personaDescription = null;
3030
+ }
3031
+ return {
3032
+ type: 'PERSONA',
3033
+ personaName: personaName,
3034
+ personaDescription: personaDescription,
3035
+ };
3036
+ },
3037
+ /**
3038
+ * Note: Prototype of [🍧] (remove this comment after full implementation)
3039
+ */
3040
+ applyToPipelineJson: function (pipelineJson, personaCommand) {
3041
+ var personaName = personaCommand.personaName, personaDescription = personaCommand.personaDescription;
3042
+ var persona = pipelineJson.personas.find(function (persona) { return persona.name === personaName; });
3043
+ if (persona === undefined) {
3044
+ pipelineJson.personas.push({
3045
+ name: personaName,
3046
+ description: personaDescription || '',
3047
+ });
3048
+ return;
3049
+ }
3050
+ if (persona.description === personaDescription) {
3051
+ return;
3052
+ }
3053
+ if (personaDescription === null) {
3054
+ return;
3055
+ }
3056
+ if (persona.description === '') {
3057
+ persona.description = personaDescription;
3058
+ return;
3059
+ }
3060
+ console.warn(spaceTrim__default["default"]("\n\n Persona \"".concat(personaName, "\" is defined multiple times with different description:\n\n First definition:\n ").concat(persona.description, "\n\n Second definition:\n ").concat(personaDescription, "\n\n ")));
3061
+ persona.description += spaceTrim__default["default"]('\n\n' + personaDescription);
3062
+ },
3063
+ };
2853
3064
 
2854
3065
  /**
2855
3066
  * Removes Markdown formatting tags from a string.
@@ -3006,7 +3217,7 @@
3006
3217
  'Knowledge BLOCK',
3007
3218
  // 'Knowledge', // <- Note: [⛱] For execution blocks which are also separate commands shortcut does not work
3008
3219
  //---
3009
- /* TODO: !!!! Not implemented block types will be in examples in future -> */
3220
+ /* Note: Not implemented block types will be in examples in future -> */
3010
3221
  'Instrument BLOCK',
3011
3222
  // 'Instrument', // <- Note: [⛱]
3012
3223
  'Action BLOCK',
@@ -3022,13 +3233,12 @@
3022
3233
  normalized = normalized.split('EXAMPLE').join('SAMPLE');
3023
3234
  var blockTypes = BlockTypes.filter(function (blockType) { return normalized.includes(blockType); });
3024
3235
  if (blockTypes.length !== 1) {
3025
- // console.log('!!!', { blockType });
3026
3236
  throw new ParsingError(spaceTrim__default["default"](function (block) { return "\n Unknown block type in BLOCK command\n\n Supported block types are:\n ".concat(block(BlockTypes.join(', ')), "\n "); }));
3027
3237
  }
3028
- // TODO: !!!! Not supported yet
3238
+ var blockType = blockTypes[0];
3029
3239
  return {
3030
3240
  type: 'BLOCK',
3031
- blockType: blockTypes[0],
3241
+ blockType: blockType,
3032
3242
  };
3033
3243
  },
3034
3244
  };
@@ -3271,116 +3481,36 @@
3271
3481
  },
3272
3482
  };
3273
3483
 
3484
+ var MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
3274
3485
  /**
3275
- * Tests if given string is valid URL.
3276
- *
3277
- * Note: This does not check if the file exists only if the path is valid
3486
+ * TODO: [🧠][🈁] `seed` should maybe be somewhere else (not in `ModelRequirements`) (simmilar that `user` identification is not here)
3487
+ * TODO: [🧠][💱] Add more model options: `stop_token`, `logit_bias`, `logprobs` (`top_logprobs`), `top_k`, `top_p`, `presence_penalty`, `frequency_penalty`, `bestOf`, `logitBias`, `logitBiasType`,...
3488
+ * [💱] Probbably keep using just `temperature` in Promptbook (not `top_k` and `top_p`)
3489
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3490
+ * TODO: Maybe figure out better word than "variant"
3491
+ * TODO: Add here more requirement options like max context size, max tokens, etc.
3492
+ * TODO: [💕][🧠] Just selecting gpt3 or gpt4 level of model
3278
3493
  */
3279
- function isValidFilePath(filePath) {
3280
- if (typeof filePath !== 'string') {
3281
- return false;
3282
- }
3283
- var filePathSlashes = filePath.split('\\').join('/');
3284
- // Absolute Unix path: /hello.txt
3285
- if (/^(\/)/i.test(filePathSlashes)) {
3286
- return true;
3287
- }
3288
- // Absolute Windows path: /hello.txt
3289
- if (/^([A-Z]{1,2}:\/?)\//i.test(filePathSlashes)) {
3290
- return true;
3291
- }
3292
- // Relative path: ./hello.txt
3293
- if (/^(\.\.?\/)+/i.test(filePathSlashes)) {
3294
- return true;
3295
- }
3296
- return false;
3297
- }
3298
3494
 
3299
3495
  /**
3300
- * Parses the knowledge command
3496
+ * Parses the model command
3301
3497
  *
3302
- * @see ./KNOWLEDGE-README.md for more details
3498
+ * @see ./MODEL-README.md for more details
3303
3499
  * @private within the commands folder
3304
3500
  */
3305
- var knowledgeCommandParser = {
3501
+ var modelCommandParser = {
3306
3502
  /**
3307
3503
  * Name of the command
3308
3504
  */
3309
- name: 'KNOWLEDGE',
3505
+ name: 'MODEL',
3310
3506
  /**
3311
3507
  * BOILERPLATE command can be used in:
3312
3508
  */
3313
- usagePlaces: ['PIPELINE_HEAD'],
3509
+ usagePlaces: ['PIPELINE_HEAD', 'PIPELINE_TEMPLATE'],
3314
3510
  /**
3315
- * Description of the KNOWLEDGE command
3511
+ * Description of the MODEL command
3316
3512
  */
3317
- description: "Tells promptbook which external knowledge to use",
3318
- /**
3319
- * Link to discussion
3320
- */
3321
- discussionUrl: 'https://github.com/webgptorg/promptbook/discussions/41',
3322
- /**
3323
- * Example usages of the KNOWLEDGE command
3324
- */
3325
- examples: [
3326
- 'KNOWLEDGE https://www.pavolhejny.com/',
3327
- 'KNOWLEDGE ./hejny-cv.txt',
3328
- 'KNOWLEDGE ./hejny-cv.md',
3329
- 'KNOWLEDGE ./hejny-cv.pdf',
3330
- 'KNOWLEDGE ./hejny-cv.docx',
3331
- ],
3332
- /**
3333
- * Parses the KNOWLEDGE command
3334
- */
3335
- parse: function (input) {
3336
- var args = input.args;
3337
- var source = args[0];
3338
- if (source === undefined) {
3339
- throw new ParsingError("Source is not defined");
3340
- }
3341
- if (source.startsWith('http://')) {
3342
- throw new ParsingError("Source is not secure");
3343
- }
3344
- if (!(isValidFilePath(source) || isValidUrl(source))) {
3345
- throw new ParsingError("Source not valid");
3346
- }
3347
- if (source.startsWith('../') || source.startsWith('/') || /^[A-Z]:[\\/]+/i.test(source)) {
3348
- throw new ParsingError("Source cannot be outside of the .ptbk.md folder");
3349
- }
3350
- return {
3351
- type: 'KNOWLEDGE',
3352
- source: source,
3353
- };
3354
- },
3355
- };
3356
-
3357
- var MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
3358
- /**
3359
- * TODO: !!!!! Add and use systemMessage, temprerature, top_k, top_p, presencePenalty, frequencyPenalty, bestOf, n, logitBias, logitBiasType, stop, ... to ModelRequirements
3360
- * TODO: Maybe figure out better word than "variant"
3361
- * TODO: Add here more requirement options like max context size, max tokens, etc.
3362
- * TODO: [💕][🧠] Just selecting gpt3 or gpt4 level of model
3363
- */
3364
-
3365
- /**
3366
- * Parses the model command
3367
- *
3368
- * @see ./MODEL-README.md for more details
3369
- * @private within the commands folder
3370
- */
3371
- var modelCommandParser = {
3372
- /**
3373
- * Name of the command
3374
- */
3375
- name: 'MODEL',
3376
- /**
3377
- * BOILERPLATE command can be used in:
3378
- */
3379
- usagePlaces: ['PIPELINE_HEAD', 'PIPELINE_TEMPLATE'],
3380
- /**
3381
- * Description of the MODEL command
3382
- */
3383
- description: "Tells which model and modelRequirements to use for the prompt template execution",
3513
+ description: "Tells which model and modelRequirements to use for the prompt template execution",
3384
3514
  /**
3385
3515
  * Link to discussion
3386
3516
  */
@@ -3498,59 +3628,6 @@
3498
3628
  },
3499
3629
  };
3500
3630
 
3501
- /**
3502
- * Parses the persona command
3503
- *
3504
- * @see ./PERSONA-README.md for more details
3505
- * @private within the commands folder
3506
- */
3507
- var personaCommandParser = {
3508
- /**
3509
- * Name of the command
3510
- */
3511
- name: 'PERSONA',
3512
- /**
3513
- * Aliases for the PERSONA command
3514
- */
3515
- aliasNames: ['PERSON'],
3516
- /**
3517
- * PERSONA command can be used in:
3518
- */
3519
- usagePlaces: ['PIPELINE_HEAD', 'PIPELINE_TEMPLATE'],
3520
- /**
3521
- * Description of the PERSONA command
3522
- */
3523
- description: "Persona command is used to specify who the system is, it will be transformed into system message, top_t,...",
3524
- /**
3525
- * Link to discussion
3526
- */
3527
- discussionUrl: 'https://github.com/webgptorg/promptbook/discussions/22',
3528
- /**
3529
- * Example usages of the PERSONA command
3530
- */
3531
- examples: ['PERSONA Jane, skilled copywriter', 'PERSONA Joe, male 28 years old, programmer'],
3532
- /**
3533
- * Parses the PERSONA command
3534
- */
3535
- parse: function (input) {
3536
- var rawArgs = input.rawArgs;
3537
- var _a = __read(rawArgs.split(/[,;:]/, 2), 2), personaNameRaw = _a[0], personaDescriptionRaw = _a[1];
3538
- var personaName = (personaNameRaw || '').trim();
3539
- if (personaName === '') {
3540
- throw new ParsingError("You must set name for the persona");
3541
- }
3542
- var personaDescription = (personaDescriptionRaw || '').trim();
3543
- if (personaDescription === '') {
3544
- personaDescription = null;
3545
- }
3546
- return {
3547
- type: 'PERSONA',
3548
- personaName: personaName,
3549
- personaDescription: personaDescription,
3550
- };
3551
- },
3552
- };
3553
-
3554
3631
  function isValidJavascriptName(javascriptName) {
3555
3632
  if (typeof javascriptName !== 'string') {
3556
3633
  return false;
@@ -3768,7 +3845,8 @@
3768
3845
  * Parses the ACTION command
3769
3846
  */
3770
3847
  parse: function (input) {
3771
- input.args;
3848
+ var args = input.args;
3849
+ TODO_USE(args);
3772
3850
  return {
3773
3851
  type: 'ACTION',
3774
3852
  };
@@ -3806,7 +3884,8 @@
3806
3884
  * Parses the INSTRUMENT command
3807
3885
  */
3808
3886
  parse: function (input) {
3809
- input.args;
3887
+ var args = input.args;
3888
+ TODO_USE(args);
3810
3889
  return {
3811
3890
  type: 'INSTRUMENT',
3812
3891
  };
@@ -3864,6 +3943,7 @@
3864
3943
  };
3865
3944
  /**
3866
3945
  * TODO: [💐] Implement BOILERPLATE command into `pipelineStringToJsonSync` function
3946
+ * Note: [⚪] This should never be in any released package
3867
3947
  */
3868
3948
 
3869
3949
  /**
@@ -3882,7 +3962,7 @@
3882
3962
  actionCommandParser,
3883
3963
  instrumentCommandParser,
3884
3964
  personaCommandParser,
3885
- boilerplateCommandParser, // <- TODO: !!!! Only in development, remove in production
3965
+ boilerplateCommandParser, // <- TODO: !! Only in development, remove in production
3886
3966
  ];
3887
3967
 
3888
3968
  /**
@@ -3968,7 +4048,7 @@
3968
4048
  }));
3969
4049
  }
3970
4050
  /**
3971
- * !!!
4051
+ * @@@
3972
4052
  */
3973
4053
  function getSupportedCommandsMessage() {
3974
4054
  return COMMANDS.flatMap(function (_a) {
@@ -3979,7 +4059,7 @@
3979
4059
  }).join('\n');
3980
4060
  }
3981
4061
  /**
3982
- * !!!
4062
+ * @@@
3983
4063
  */
3984
4064
  function parseCommandVariant(input) {
3985
4065
  var e_1, _a;
@@ -3988,7 +4068,6 @@
3988
4068
  var _loop_1 = function (commandParser) {
3989
4069
  var name_1 = commandParser.name, aliasNames = commandParser.aliasNames, deprecatedNames = commandParser.deprecatedNames, parse = commandParser.parse;
3990
4070
  var names = __spreadArray(__spreadArray([name_1], __read((aliasNames || [])), false), __read((deprecatedNames || [])), false);
3991
- // console.log('!!!', { commandName, names });
3992
4071
  if (names.includes(commandName)) {
3993
4072
  try {
3994
4073
  return { value: parse({ usagePlace: usagePlace, raw: raw, rawArgs: rawArgs, normalized: normalized, args: args }) };
@@ -4098,25 +4177,42 @@
4098
4177
  var e_1, _a;
4099
4178
  var codeBlocks = [];
4100
4179
  var lines = markdown.split('\n');
4180
+ // Note: [0] Ensure that the last block notated by gt > will be closed
4181
+ lines.push('');
4101
4182
  var currentCodeBlock = null;
4102
4183
  try {
4103
4184
  for (var lines_1 = __values(lines), lines_1_1 = lines_1.next(); !lines_1_1.done; lines_1_1 = lines_1.next()) {
4104
4185
  var line = lines_1_1.value;
4186
+ if (line.startsWith('> ') || line === '>') {
4187
+ if (currentCodeBlock === null) {
4188
+ currentCodeBlock = { blockNotation: '>', language: null, content: '' };
4189
+ } /* not else */
4190
+ if (currentCodeBlock.blockNotation === '>') {
4191
+ if (currentCodeBlock.content !== '') {
4192
+ currentCodeBlock.content += '\n';
4193
+ }
4194
+ currentCodeBlock.content += line.slice(2);
4195
+ }
4196
+ }
4197
+ else if (currentCodeBlock !== null && currentCodeBlock.blockNotation === '>' /* <- Note: [0] */) {
4198
+ codeBlocks.push(currentCodeBlock);
4199
+ currentCodeBlock = null;
4200
+ }
4201
+ /* not else */
4105
4202
  if (line.startsWith('```')) {
4106
4203
  var language = line.slice(3).trim() || null;
4107
4204
  if (currentCodeBlock === null) {
4108
- currentCodeBlock = { language: language, content: '' };
4205
+ currentCodeBlock = { blockNotation: '```', language: language, content: '' };
4109
4206
  }
4110
4207
  else {
4111
4208
  if (language !== null) {
4112
- // [🌻]
4113
- throw new Error("".concat(capitalize(currentCodeBlock.language || 'the'), " code block was not closed and already opening new ").concat(language, " code block"));
4209
+ throw new ParsingError("".concat(capitalize(currentCodeBlock.language || 'the'), " code block was not closed and already opening new ").concat(language, " code block"));
4114
4210
  }
4115
4211
  codeBlocks.push(currentCodeBlock);
4116
4212
  currentCodeBlock = null;
4117
4213
  }
4118
4214
  }
4119
- else if (currentCodeBlock !== null) {
4215
+ else if (currentCodeBlock !== null && currentCodeBlock.blockNotation === '```') {
4120
4216
  if (currentCodeBlock.content !== '') {
4121
4217
  currentCodeBlock.content += '\n';
4122
4218
  }
@@ -4132,11 +4228,13 @@
4132
4228
  finally { if (e_1) throw e_1.error; }
4133
4229
  }
4134
4230
  if (currentCodeBlock !== null) {
4135
- // [🌻]
4136
- throw new Error("".concat(capitalize(currentCodeBlock.language || 'the'), " code block was not closed at the end of the markdown"));
4231
+ throw new ParsingError("".concat(capitalize(currentCodeBlock.language || 'the'), " code block was not closed at the end of the markdown"));
4137
4232
  }
4138
4233
  return codeBlocks;
4139
4234
  }
4235
+ /**
4236
+ * TODO: Maybe name for `blockNotation` instead of '```' and '>'
4237
+ */
4140
4238
 
4141
4239
  /**
4142
4240
  * Extracts exactly ONE code block from markdown.
@@ -4154,13 +4252,12 @@
4154
4252
  function extractOneBlockFromMarkdown(markdown) {
4155
4253
  var codeBlocks = extractAllBlocksFromMarkdown(markdown);
4156
4254
  if (codeBlocks.length !== 1) {
4157
- // TODO: Report more specific place where the error happened
4158
- throw new Error(/* <- [🌻] */ 'There should be exactly one code block in the markdown');
4255
+ throw new ParsingError(spaceTrim__default["default"](function (block) { return "\n There should be exactly 1 code block, found ".concat(codeBlocks.length, " code blocks\n\n ").concat(block(codeBlocks.map(function (block, i) { return "Block ".concat(i + 1, ":\n").concat(block.content); }).join('\n\n\n')), "\n "); }));
4159
4256
  }
4160
4257
  return codeBlocks[0];
4161
4258
  }
4162
4259
  /***
4163
- * TODO: [🍓][🌻] !!! Decide of this is internal util, external util OR validator/postprocessor
4260
+ * TODO: [🍓][🌻] Decide of this is internal util, external util OR validator/postprocessor
4164
4261
  */
4165
4262
 
4166
4263
  /**
@@ -4170,13 +4267,13 @@
4170
4267
  var _a, _b;
4171
4268
  var lines = value.split('\n');
4172
4269
  if (!lines[0].startsWith('#')) {
4173
- throw new Error('Markdown section must start with heading');
4270
+ throw new ParsingError('Markdown section must start with heading');
4174
4271
  }
4175
4272
  var title = lines[0].replace(/^#+\s*/, '');
4176
4273
  var level = (_b = (_a = lines[0].match(/^#+/)) === null || _a === void 0 ? void 0 : _a[0].length) !== null && _b !== void 0 ? _b : 0;
4177
4274
  var content = spaceTrim__default["default"](lines.slice(1).join('\n'));
4178
4275
  if (level < 1 || level > 6) {
4179
- throw new Error('Markdown section must have heading level between 1 and 6');
4276
+ throw new ParsingError('Markdown section must have heading level between 1 and 6');
4180
4277
  }
4181
4278
  return { title: title, level: level, content: content };
4182
4279
  }
@@ -4558,7 +4655,7 @@
4558
4655
  existingParameter.description &&
4559
4656
  existingParameter.description !== parameterDescription &&
4560
4657
  parameterDescription) {
4561
- throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Parameter {".concat(parameterName, "} is defined multiple times with different description.\n\n First definition:\n ").concat(block(existingParameter.description || '[undefined]'), "\n\n Second definition:\n ").concat(block(parameterDescription || '[undefined]'), "\n "); }));
4658
+ throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Parameter {".concat(parameterName, "} is defined multiple times with different description:\n\n First definition:\n ").concat(block(existingParameter.description || '[undefined]'), "\n\n Second definition:\n ").concat(block(parameterDescription || '[undefined]'), "\n "); }));
4562
4659
  }
4563
4660
  if (existingParameter) {
4564
4661
  if (parameterDescription) {
@@ -4577,11 +4674,12 @@
4577
4674
  // =============================================================
4578
4675
  // Note: 3️⃣ Process pipeline head
4579
4676
  pipelineJson.title = pipelineHead.title;
4580
- // TODO: [1] DRY description
4677
+ // TODO: [🎾][1] DRY description
4581
4678
  var description = pipelineHead.content;
4582
- // Note: Remove codeblocks - TODO: Maybe put this into util (exported from `@promptbool/utils`)
4679
+ // Note: Remove codeblocks - TODO: [🎾] Make util removeAllBlocksFromMarkdown (exported from `@promptbool/utils`)
4583
4680
  description = description.split(/^```.*^```/gms).join('');
4584
- //Note: Remove lists and return statement - TODO: Maybe put this into util (exported from `@promptbool/utils`)
4681
+ description = description.split(/^>.*$/gm).join('');
4682
+ //Note: Remove lists and return statement - TODO: [🎾] Make util (exported from `@promptbool/utils`)
4585
4683
  description = description.split(/^(?:(?:-)|(?:\d\))|(?:`?->))\s+.*$/gm).join('');
4586
4684
  description = spaceTrim.spaceTrim(description);
4587
4685
  if (description === '') {
@@ -4609,7 +4707,7 @@
4609
4707
  pipelineJson.pipelineUrl = command.pipelineUrl.href;
4610
4708
  break;
4611
4709
  case 'KNOWLEDGE':
4612
- console.error(new NotYetImplementedError('Knowledge is not implemented yet'));
4710
+ knowledgeCommandParser.applyToPipelineJson(pipelineJson, command);
4613
4711
  break;
4614
4712
  case 'ACTION':
4615
4713
  console.error(new NotYetImplementedError('Actions are not implemented yet'));
@@ -4618,7 +4716,8 @@
4618
4716
  console.error(new NotYetImplementedError('Instruments are not implemented yet'));
4619
4717
  break;
4620
4718
  case 'PERSONA':
4621
- console.error(new NotYetImplementedError('Personas are not implemented yet'));
4719
+ personaCommandParser.applyToPipelineJson(pipelineJson, command);
4720
+ // <- Note: Prototype of [🍧] (remove this comment after full implementation)
4622
4721
  break;
4623
4722
  case 'BOILERPLATE':
4624
4723
  throw new ParsingError('BOILERPLATE command is only for testing purposes and should not be used in the .ptbk.md file'); // <- TODO: [🚞]
@@ -4647,7 +4746,27 @@
4647
4746
  var postprocessing = [];
4648
4747
  var expectAmount = {};
4649
4748
  var expectFormat = undefined;
4650
- var isBlockTypeChanged = false;
4749
+ var isBlockTypeSet = false;
4750
+ var lastLine = section.content.split('\n').pop();
4751
+ var resultingParameterNameMatch = /^->\s*\{(?<resultingParamName>[a-z0-9_]+)\}/im.exec(lastLine);
4752
+ var resultingParameterName = null;
4753
+ if (resultingParameterNameMatch &&
4754
+ resultingParameterNameMatch.groups !== undefined &&
4755
+ resultingParameterNameMatch.groups.resultingParamName !== undefined) {
4756
+ resultingParameterName = resultingParameterNameMatch.groups.resultingParamName;
4757
+ }
4758
+ var expectResultingParameterName = function () {
4759
+ if (resultingParameterName !== null) {
4760
+ return resultingParameterName;
4761
+ }
4762
+ throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Template section must end with -> {parameterName}\n\n Invalid section:\n ".concat(block(
4763
+ // TODO: Show code of invalid sections each time + DRY
4764
+ section.content
4765
+ .split('\n')
4766
+ .map(function (line) { return " | ".concat(line); } /* <- TODO: [🚞] */)
4767
+ .join('\n')), "\n "); }));
4768
+ };
4769
+ var _e = extractOneBlockFromMarkdown(section.content), language = _e.language, content = _e.content;
4651
4770
  try {
4652
4771
  for (var listItems_2 = (e_3 = void 0, __values(listItems_3)), listItems_2_1 = listItems_2.next(); !listItems_2_1.done; listItems_2_1 = listItems_2.next()) {
4653
4772
  var listItem = listItems_2_1.value;
@@ -4655,15 +4774,24 @@
4655
4774
  switch (command.type) {
4656
4775
  // TODO: [🍧] Use here applyToPipelineJson and remove switch statement
4657
4776
  case 'BLOCK':
4658
- if (isBlockTypeChanged) {
4777
+ if (isBlockTypeSet) {
4659
4778
  throw new ParsingError('Block type is already defined in the prompt template. It can be defined only once.');
4660
4779
  }
4661
4780
  if (command.blockType === 'SAMPLE') {
4662
- console.error(new NotYetImplementedError('Block type SAMPLE is not implemented yet'));
4781
+ expectResultingParameterName();
4782
+ var parameter = pipelineJson.parameters.find(function (param) { return param.name === resultingParameterName; });
4783
+ if (parameter === undefined) {
4784
+ throw new UnexpectedError("Can not find parameter {".concat(resultingParameterName, "} to assign sample value"));
4785
+ }
4786
+ parameter.sampleValues = parameter.sampleValues || [];
4787
+ parameter.sampleValues.push(content);
4663
4788
  return "continue-templates";
4664
4789
  }
4665
4790
  if (command.blockType === 'KNOWLEDGE') {
4666
- console.error(new NotYetImplementedError('Knowledge is not implemented yet'));
4791
+ knowledgeCommandParser.applyToPipelineJson(pipelineJson, {
4792
+ type: 'KNOWLEDGE',
4793
+ source: content, // <- TODO: [🐝] !!!! Work with KNOWLEDGE which not referring to the source file/wweb, but its content itself
4794
+ });
4667
4795
  return "continue-templates";
4668
4796
  }
4669
4797
  if (command.blockType === 'ACTION') {
@@ -4674,8 +4802,9 @@
4674
4802
  console.error(new NotYetImplementedError('Instruments are not implemented yet'));
4675
4803
  return "continue-templates";
4676
4804
  }
4805
+ expectResultingParameterName();
4677
4806
  blockType = command.blockType;
4678
- isBlockTypeChanged = true;
4807
+ isBlockTypeSet = true;
4679
4808
  break;
4680
4809
  case 'EXPECT_AMOUNT':
4681
4810
  // eslint-disable-next-line no-case-declarations
@@ -4715,16 +4844,20 @@
4715
4844
  postprocessing.push(command.functionName);
4716
4845
  break;
4717
4846
  case 'KNOWLEDGE':
4718
- console.error(new NotYetImplementedError('Knowledge is not implemented yet'));
4847
+ // TODO: [👙] The knowledge is maybe relevant for just this template
4848
+ knowledgeCommandParser.applyToPipelineJson(pipelineJson, command);
4719
4849
  break;
4720
4850
  case 'ACTION':
4851
+ // TODO: [👙] The action is maybe relevant for just this template
4721
4852
  console.error(new NotYetImplementedError('Actions are not implemented yet'));
4722
4853
  break;
4723
4854
  case 'INSTRUMENT':
4855
+ // TODO: [👙] The instrument is maybe relevant for just this template
4724
4856
  console.error(new NotYetImplementedError('Instruments are not implemented yet'));
4725
4857
  break;
4726
4858
  case 'PERSONA':
4727
- console.error(new NotYetImplementedError('Personas are not implemented yet'));
4859
+ personaCommandParser.applyToPipelineJson(pipelineJson, command);
4860
+ // <- Note: Prototype of [🍧] (remove this comment after full implementation)
4728
4861
  break;
4729
4862
  case 'BOILERPLATE':
4730
4863
  console.error(new ParsingError('BOILERPLATE command is only for testing purposes and should not be used in the .ptbk.md file'));
@@ -4742,7 +4875,6 @@
4742
4875
  }
4743
4876
  finally { if (e_3) throw e_3.error; }
4744
4877
  }
4745
- var _e = extractOneBlockFromMarkdown(section.content), language = _e.language, content = _e.content;
4746
4878
  if (blockType === 'SCRIPT') {
4747
4879
  if (!language) {
4748
4880
  throw new ParsingError('You must specify the language of the script in the prompt template');
@@ -4751,22 +4883,12 @@
4751
4883
  throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Script language ".concat(language, " is not supported.\n\n Supported languages are:\n ").concat(block(SUPPORTED_SCRIPT_LANGUAGES.join(', ')), "\n\n "); }));
4752
4884
  }
4753
4885
  }
4754
- var lastLine = section.content.split('\n').pop();
4755
- var match = /^->\s*\{(?<resultingParamName>[a-z0-9_]+)\}/im.exec(lastLine);
4756
- if (!match || match.groups === undefined || match.groups.resultingParamName === undefined) {
4757
- throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Each section must end with -> {parameterName}\n\n Invalid section:\n ".concat(block(
4758
- // TODO: Show code of invalid sections each time + DRY
4759
- section.content
4760
- .split('\n')
4761
- .map(function (line) { return " | ".concat(line); } /* <- TODO: [🚞] */)
4762
- .join('\n')), "\n "); }));
4763
- }
4764
- var resultingParameterName = match.groups.resultingParamName;
4765
- // TODO: [1] DRY description
4886
+ // TODO: [🎾][1] DRY description
4766
4887
  var description_1 = section.content;
4767
- // Note: Remove codeblocks
4888
+ // Note: Remove codeblocks - TODO: [🎾]
4768
4889
  description_1 = description_1.split(/^```.*^```/gms).join('');
4769
- //Note: Remove lists and return statement
4890
+ description_1 = description_1.split(/^>.*$/gm).join('');
4891
+ //Note: Remove lists and return statement - TODO: [🎾]
4770
4892
  description_1 = description_1.split(/^(?:(?:-)|(?:\d\))|(?:`?->))\s+.*$/gm).join('');
4771
4893
  description_1 = spaceTrim.spaceTrim(description_1);
4772
4894
  if (description_1 === '') {
@@ -4800,13 +4922,13 @@
4800
4922
  modelRequirements: templateModelRequirements,
4801
4923
  contentLanguage: blockType === 'SCRIPT' ? language : undefined,
4802
4924
  content: content,
4803
- resultingParameterName: resultingParameterName,
4925
+ resultingParameterName: expectResultingParameterName( /* <- Note: This is once more redundant */),
4804
4926
  };
4805
4927
  if (blockType !== 'PROMPT_TEMPLATE') {
4806
4928
  delete template.modelRequirements;
4807
4929
  }
4808
4930
  // TODO: [🍧] What actually about preparation and pushing the block into `promptTemplates`
4809
- pipelineJson.promptTemplates.push(template /* <- !!! */);
4931
+ pipelineJson.promptTemplates.push(template);
4810
4932
  };
4811
4933
  try {
4812
4934
  // =============================================================
@@ -4873,6 +4995,8 @@
4873
4995
  * TODO: Use spaceTrim more effectively
4874
4996
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
4875
4997
  * TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
4998
+ * TODO: [♈] Probbably move expectations from templates to parameters
4999
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
4876
5000
  */
4877
5001
 
4878
5002
  /**
@@ -4912,6 +5036,7 @@
4912
5036
  }
4913
5037
  /**
4914
5038
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
5039
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
4915
5040
  */
4916
5041
 
4917
5042
  /**
@@ -5045,6 +5170,7 @@
5045
5170
  else {
5046
5171
  colors__default["default"].green("(In future, not implemented yet) Using your prebuild pipeline collection ".concat(makedLibraryFilePath));
5047
5172
  // TODO: !! Implement;
5173
+ // TODO: [🌗]
5048
5174
  }
5049
5175
  _a = options || {}, _b = _a.isRecursive, isRecursive = _b === void 0 ? true : _b, _c = _a.isVerbose, isVerbose = _c === void 0 ? false : _c, _d = _a.isLazyLoaded, isLazyLoaded = _d === void 0 ? false : _d, _e = _a.isCrashedOnError, isCrashedOnError = _e === void 0 ? true : _e;
5050
5176
  collection = createCollectionFromPromise(function () { return __awaiter(_this, void 0, void 0, function () {
@@ -5089,6 +5215,7 @@
5089
5215
  case 5:
5090
5216
  // TODO: Handle non-valid JSON files
5091
5217
  promptbook = _d.apply(_c, [_e.sent()]);
5218
+ // TODO: [🌗]
5092
5219
  promptbook = __assign(__assign({}, promptbook), { sourceFile: sourceFile });
5093
5220
  return [3 /*break*/, 7];
5094
5221
  case 6:
@@ -5235,6 +5362,126 @@
5235
5362
  }
5236
5363
  /**
5237
5364
  * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
5365
+ * Note: [🟢] This code should never be published outside of `@promptbook/node`
5366
+ */
5367
+
5368
+ /**
5369
+ * This error type indicates that you try to use a feature that is not available in the current environment
5370
+ */
5371
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
5372
+ __extends(EnvironmentMismatchError, _super);
5373
+ function EnvironmentMismatchError(message) {
5374
+ var _this = _super.call(this, message) || this;
5375
+ _this.name = 'EnvironmentMismatchError';
5376
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
5377
+ return _this;
5378
+ }
5379
+ return EnvironmentMismatchError;
5380
+ }(Error));
5381
+
5382
+ /**
5383
+ * @@@
5384
+ *
5385
+ * @private for `FilesStorage`
5386
+ */
5387
+ function nameToSubfolderPath(name) {
5388
+ return [name.substr(0, 1).toLowerCase(), name.substr(1, 1).toLowerCase()];
5389
+ }
5390
+
5391
+ /**
5392
+ * @@@
5393
+ */
5394
+ var FilesStorage = /** @class */ (function () {
5395
+ function FilesStorage(options) {
5396
+ this.options = options;
5397
+ if (!isRunningInNode()) {
5398
+ throw new EnvironmentMismatchError("FilesStorage works only in Node.js environment");
5399
+ }
5400
+ }
5401
+ /**
5402
+ * @@@
5403
+ */
5404
+ FilesStorage.prototype.getFilenameForKey = function (key) {
5405
+ var name = titleToName(key);
5406
+ var hash = sha256__default["default"](hexEncoder__default["default"].parse(name)).toString( /* hex */);
5407
+ return path.join.apply(void 0, __spreadArray(__spreadArray([this.options.cacheFolderPath], __read(nameToSubfolderPath(hash /* <- TODO: [🎎] Maybe add some SHA256 prefix */)), false), ["".concat(name.substring(0, MAX_FILENAME_LENGTH), ".json")], false));
5408
+ };
5409
+ /**
5410
+ * @@@ Returns the current value associated with the given key, or null if the given key does not exist in the list associated with the object.
5411
+ */
5412
+ FilesStorage.prototype.getItem = function (key) {
5413
+ return __awaiter(this, void 0, void 0, function () {
5414
+ var filename, isFileExisting, fileContent, value;
5415
+ return __generator(this, function (_a) {
5416
+ switch (_a.label) {
5417
+ case 0:
5418
+ filename = this.getFilenameForKey(key);
5419
+ return [4 /*yield*/, promises.stat(filename)
5420
+ .then(function (fileStat) { return fileStat.isFile(); })
5421
+ .catch(function () { return false; })];
5422
+ case 1:
5423
+ isFileExisting = _a.sent();
5424
+ if (!isFileExisting) {
5425
+ return [2 /*return*/, null];
5426
+ }
5427
+ return [4 /*yield*/, promises.readFile(filename, 'utf-8')];
5428
+ case 2:
5429
+ fileContent = _a.sent();
5430
+ value = JSON.parse(fileContent);
5431
+ // TODO: [🌗]
5432
+ return [2 /*return*/, value];
5433
+ }
5434
+ });
5435
+ });
5436
+ };
5437
+ /**
5438
+ * @@@ Sets the value of the pair identified by key to value, creating a new key/value pair if none existed for key previously.
5439
+ */
5440
+ FilesStorage.prototype.setItem = function (key, value) {
5441
+ return __awaiter(this, void 0, void 0, function () {
5442
+ var filename, fileContent;
5443
+ return __generator(this, function (_a) {
5444
+ switch (_a.label) {
5445
+ case 0:
5446
+ filename = this.getFilenameForKey(key);
5447
+ fileContent = JSON.stringify(value, null, 4);
5448
+ return [4 /*yield*/, promises.mkdir(path.dirname(filename), { recursive: true })];
5449
+ case 1:
5450
+ _a.sent(); // <- [0]
5451
+ return [4 /*yield*/, promises.writeFile(filename, fileContent, 'utf-8')];
5452
+ case 2:
5453
+ _a.sent();
5454
+ return [2 /*return*/];
5455
+ }
5456
+ });
5457
+ });
5458
+ };
5459
+ /**
5460
+ * @@@ Removes the key/value pair with the given key from the list associated with the object, if a key/value pair with the given key exists.
5461
+ */
5462
+ FilesStorage.prototype.removeItem = function (key) {
5463
+ return __awaiter(this, void 0, void 0, function () {
5464
+ var filename;
5465
+ return __generator(this, function (_a) {
5466
+ switch (_a.label) {
5467
+ case 0:
5468
+ filename = this.getFilenameForKey(key);
5469
+ // TODO: [🧠] What to use `unlink` or `rm`
5470
+ return [4 /*yield*/, promises.unlink(filename)];
5471
+ case 1:
5472
+ // TODO: [🧠] What to use `unlink` or `rm`
5473
+ _a.sent();
5474
+ return [2 /*return*/];
5475
+ }
5476
+ });
5477
+ });
5478
+ };
5479
+ return FilesStorage;
5480
+ }());
5481
+ /**
5482
+ * TODO: [🔼] !!! Export via `@promptbook/node`
5483
+ * TODO: [🌗] Maybe some checkers, not all valid JSONs are desired and valid values
5484
+ * Note: [🟢] This code should never be published outside of `@promptbook/node`
5238
5485
  */
5239
5486
 
5240
5487
  /**
@@ -5282,7 +5529,7 @@
5282
5529
  /**
5283
5530
  * Function computeUsage will create price per one token based on the string value found on openai page
5284
5531
  *
5285
- * @private within the package, used only as internal helper for `OPENAI_MODELS`
5532
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
5286
5533
  */
5287
5534
  function computeUsage(value) {
5288
5535
  var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
@@ -5355,7 +5602,7 @@
5355
5602
  ];
5356
5603
  /**
5357
5604
  * Note: [🤖] Add models of new variant
5358
- * TODO: !!!! Add embedding models OR Anthropic has only chat+completion models?
5605
+ * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
5359
5606
  * TODO: [🧠] Some mechanism to propagate unsureness
5360
5607
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
5361
5608
  * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
@@ -5413,6 +5660,7 @@
5413
5660
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
5414
5661
  max_tokens: modelRequirements.maxTokens || 4096,
5415
5662
  // <- TODO: Make some global max cap for maxTokens
5663
+ // <- TODO: !!!!! Use here `systemMessage`, `temperature` and `seed`
5416
5664
  messages: [
5417
5665
  {
5418
5666
  role: 'user',
@@ -5482,6 +5730,7 @@
5482
5730
  model: rawResponse.model || model,
5483
5731
  max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
5484
5732
  // <- TODO: Make some global max cap for maxTokens
5733
+ // <- TODO: Use here `systemMessage`, `temperature` and `seed`
5485
5734
  };
5486
5735
 
5487
5736
  const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
@@ -5568,201 +5817,7 @@
5568
5817
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
5569
5818
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
5570
5819
  * TODO: Maybe make custom OpenaiError
5571
- */
5572
-
5573
- /**
5574
- * Gets the expectations and creates a fake text that meets the expectations
5575
- *
5576
- * Note: You can provide postprocessing functions to modify the text before checking the expectations
5577
- * The result will be the text BEFORE the postprocessing
5578
- *
5579
- * @private internal util for MockedFackedLlmExecutionTools
5580
- */
5581
- function $fakeTextToExpectations(expectations, postprocessing) {
5582
- return __awaiter(this, void 0, void 0, function () {
5583
- var lorem, loremText, text, loopLimit, textToCheck, _a, _b, func, e_1_1;
5584
- var e_1, _c;
5585
- return __generator(this, function (_d) {
5586
- switch (_d.label) {
5587
- case 0:
5588
- lorem = new loremIpsum.LoremIpsum({
5589
- wordsPerSentence: { min: 5, max: 15 },
5590
- sentencesPerParagraph: { min: 5, max: 15 },
5591
- });
5592
- loremText = '';
5593
- text = '';
5594
- loopLimit = CHARACTER_LOOP_LIMIT;
5595
- _d.label = 1;
5596
- case 1:
5597
- if (!(loopLimit-- > 0)) return [3 /*break*/, 11];
5598
- textToCheck = text;
5599
- _d.label = 2;
5600
- case 2:
5601
- _d.trys.push([2, 7, 8, 9]);
5602
- _a = (e_1 = void 0, __values(postprocessing || [])), _b = _a.next();
5603
- _d.label = 3;
5604
- case 3:
5605
- if (!!_b.done) return [3 /*break*/, 6];
5606
- func = _b.value;
5607
- return [4 /*yield*/, func(textToCheck)];
5608
- case 4:
5609
- textToCheck = _d.sent();
5610
- _d.label = 5;
5611
- case 5:
5612
- _b = _a.next();
5613
- return [3 /*break*/, 3];
5614
- case 6: return [3 /*break*/, 9];
5615
- case 7:
5616
- e_1_1 = _d.sent();
5617
- e_1 = { error: e_1_1 };
5618
- return [3 /*break*/, 9];
5619
- case 8:
5620
- try {
5621
- if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
5622
- }
5623
- finally { if (e_1) throw e_1.error; }
5624
- return [7 /*endfinally*/];
5625
- case 9:
5626
- if (isPassingExpectations(expectations, textToCheck)) {
5627
- return [2 /*return*/, text]; // <- Note: Returning the text because the postprocessing
5628
- }
5629
- if (loremText === '') {
5630
- loremText = lorem.generateParagraphs(1) + '\n\n';
5631
- }
5632
- text += loremText.substring(0, 1);
5633
- loremText = loremText.substring(1);
5634
- _d.label = 10;
5635
- case 10: return [3 /*break*/, 1];
5636
- case 11: throw new Error(spaceTrim.spaceTrim(function (block) { return "\n Can not generate fake text to met the expectations\n\n Loop limit reached\n The expectations:\n ".concat(block(JSON.stringify(expectations, null, 4)), "\n\n The draft text:\n ").concat(block(text), "\n\n "); }));
5637
- }
5638
- });
5639
- });
5640
- }
5641
- /**
5642
- * TODO: [💝] Unite object for expecting amount and format - use here also a format
5643
- */
5644
-
5645
- /**
5646
- * Mocked execution Tools for just faking expected responses for testing purposes
5647
- */
5648
- var MockedFackedLlmExecutionTools = /** @class */ (function () {
5649
- function MockedFackedLlmExecutionTools(options) {
5650
- if (options === void 0) { options = {}; }
5651
- this.options = options;
5652
- }
5653
- Object.defineProperty(MockedFackedLlmExecutionTools.prototype, "title", {
5654
- get: function () {
5655
- return 'Mocked facked';
5656
- },
5657
- enumerable: false,
5658
- configurable: true
5659
- });
5660
- Object.defineProperty(MockedFackedLlmExecutionTools.prototype, "description", {
5661
- get: function () {
5662
- return 'Use faked lorem ipsum data - just for testing';
5663
- },
5664
- enumerable: false,
5665
- configurable: true
5666
- });
5667
- /**
5668
- * Fakes chat model
5669
- */
5670
- MockedFackedLlmExecutionTools.prototype.callChatModel = function (prompt) {
5671
- return __awaiter(this, void 0, void 0, function () {
5672
- var content, result;
5673
- return __generator(this, function (_a) {
5674
- switch (_a.label) {
5675
- case 0:
5676
- if (this.options.isVerbose) {
5677
- console.info('💬 Mocked faked prompt', prompt);
5678
- }
5679
- return [4 /*yield*/, $fakeTextToExpectations(prompt.expectations || {
5680
- sentences: { min: 1, max: 1 },
5681
- }, prompt.postprocessing)];
5682
- case 1:
5683
- content = _a.sent();
5684
- result = {
5685
- content: content,
5686
- modelName: 'mocked-facked',
5687
- timing: {
5688
- start: getCurrentIsoDate(),
5689
- complete: getCurrentIsoDate(),
5690
- },
5691
- usage: addUsage( /* <- TODO: [🧠] Compute here at least words, characters,... etc */),
5692
- rawResponse: {
5693
- note: 'This is mocked echo',
5694
- },
5695
- // <- [🤹‍♂️]
5696
- };
5697
- if (this.options.isVerbose) {
5698
- console.info('💬 Mocked faked result', result);
5699
- }
5700
- return [2 /*return*/, result];
5701
- }
5702
- });
5703
- });
5704
- };
5705
- /**
5706
- * Fakes completion model
5707
- */
5708
- MockedFackedLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
5709
- return __awaiter(this, void 0, void 0, function () {
5710
- return __generator(this, function (_a) {
5711
- return [2 /*return*/, this.callChatModel(prompt)];
5712
- });
5713
- });
5714
- };
5715
- /**
5716
- * Fakes embedding model
5717
- */
5718
- MockedFackedLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
5719
- return __awaiter(this, void 0, void 0, function () {
5720
- var content, result;
5721
- return __generator(this, function (_a) {
5722
- content = new Array(25).fill(0).map(function () { return Math.random() * 2 - 1; });
5723
- result = {
5724
- content: content,
5725
- modelName: 'mocked-facked',
5726
- timing: {
5727
- start: getCurrentIsoDate(),
5728
- complete: getCurrentIsoDate(),
5729
- },
5730
- usage: addUsage( /* <- TODO: [🧠] Compute here at least words, characters,... etc */),
5731
- rawResponse: {
5732
- note: 'This is mocked embedding',
5733
- },
5734
- // <- [🤹‍♂️]
5735
- };
5736
- if (this.options.isVerbose) {
5737
- console.info('💬 Mocked faked result', result);
5738
- }
5739
- return [2 /*return*/, result];
5740
- });
5741
- });
5742
- };
5743
- // <- Note: [🤖] callXxxModel
5744
- /**
5745
- * List all available fake-models that can be used
5746
- */
5747
- MockedFackedLlmExecutionTools.prototype.listModels = function () {
5748
- return [
5749
- {
5750
- modelTitle: 'Fake chat',
5751
- modelName: 'mocked-echo',
5752
- modelVariant: 'CHAT',
5753
- },
5754
- {
5755
- modelTitle: 'Fake completion',
5756
- modelName: 'mocked-echo',
5757
- modelVariant: 'COMPLETION',
5758
- },
5759
- // <- Note: [🤖]
5760
- ];
5761
- };
5762
- return MockedFackedLlmExecutionTools;
5763
- }());
5764
- /**
5765
- * TODO: [🕵️‍♀️] Maybe just remove
5820
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
5766
5821
  */
5767
5822
 
5768
5823
  /**
@@ -6112,6 +6167,7 @@
6112
6167
  * @see /other/playground/playground.ts
6113
6168
  * TODO: [🍓] Make better
6114
6169
  * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
6170
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
6115
6171
  */
6116
6172
 
6117
6173
  /**
@@ -6203,6 +6259,7 @@
6203
6259
  model: model,
6204
6260
  max_tokens: modelRequirements.maxTokens,
6205
6261
  // <- TODO: Make some global max cap for maxTokens
6262
+ // <- TODO: !!!!! Use here `systemMessage`, `temperature` and `seed`
6206
6263
  };
6207
6264
  if (expectFormat === 'JSON') {
6208
6265
  modelSettings.response_format = {
@@ -6276,6 +6333,7 @@
6276
6333
  model: model,
6277
6334
  max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
6278
6335
  // <- TODO: Make some global max cap for maxTokens
6336
+ // <- TODO: !!!!! Use here `systemMessage`, `temperature` and `seed`
6279
6337
  };
6280
6338
  rawRequest = __assign(__assign({}, modelSettings), { prompt: content, user: this.options.user });
6281
6339
  start = getCurrentIsoDate();
@@ -6426,6 +6484,236 @@
6426
6484
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
6427
6485
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6428
6486
  * TODO: Maybe make custom OpenaiError
6487
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6488
+ */
6489
+
6490
+ /**
6491
+ * @@@
6492
+ *
6493
+ * Note: This function is not cached, every call creates new instance of LlmExecutionTools
6494
+ *
6495
+ * It looks for environment variables:
6496
+ * - `process.env.OPENAI_API_KEY`
6497
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
6498
+ *
6499
+ * @returns @@@
6500
+ */
6501
+ function createLlmToolsFromEnv() {
6502
+ if (!isRunningInNode()) {
6503
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6504
+ }
6505
+ var llmTools = [];
6506
+ if (typeof process.env.OPENAI_API_KEY === 'string') {
6507
+ llmTools.push(new OpenAiExecutionTools({
6508
+ isVerbose: true,
6509
+ apiKey: process.env.OPENAI_API_KEY,
6510
+ }));
6511
+ }
6512
+ if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
6513
+ llmTools.push(new AnthropicClaudeExecutionTools({
6514
+ isVerbose: true,
6515
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
6516
+ }));
6517
+ }
6518
+ if (llmTools.length === 0) {
6519
+ throw new Error(spaceTrim__default["default"]("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
6520
+ }
6521
+ else if (llmTools.length === 1) {
6522
+ return llmTools[0];
6523
+ }
6524
+ else {
6525
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
6526
+ }
6527
+ }
6528
+ /**
6529
+ * TODO: [🔼] !!! Export via `@promptbook/node`
6530
+ * TODO: @@@ write discussion about this - wizzard
6531
+ * TODO: Add Azure
6532
+ * TODO: [🧠] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
6533
+ * TODO: [🧠] Is there some meaningfull way how to test this util
6534
+ * TODO: [🧠] Maybe pass env as argument
6535
+ * Note: [🟢] This code should never be published outside of `@promptbook/node`
6536
+ */
6537
+
6538
+ /**
6539
+ * Stores
6540
+ */
6541
+ var MemoryStorage = /** @class */ (function () {
6542
+ function MemoryStorage() {
6543
+ this.storage = {};
6544
+ }
6545
+ Object.defineProperty(MemoryStorage.prototype, "length", {
6546
+ /**
6547
+ * Returns the number of key/value pairs currently present in the list associated with the object.
6548
+ */
6549
+ get: function () {
6550
+ return Object.keys(this.storage).length;
6551
+ },
6552
+ enumerable: false,
6553
+ configurable: true
6554
+ });
6555
+ /**
6556
+ * Empties the list associated with the object of all key/value pairs, if there are any.
6557
+ */
6558
+ MemoryStorage.prototype.clear = function () {
6559
+ this.storage = {};
6560
+ };
6561
+ /**
6562
+ * Returns the current value associated with the given key, or null if the given key does not exist in the list associated with the object.
6563
+ */
6564
+ MemoryStorage.prototype.getItem = function (key) {
6565
+ return this.storage[key] || null;
6566
+ };
6567
+ /**
6568
+ * Returns the name of the nth key in the list, or null if n is greater than or equal to the number of key/value pairs in the object.
6569
+ */
6570
+ MemoryStorage.prototype.key = function (index) {
6571
+ return Object.keys(this.storage)[index] || null;
6572
+ };
6573
+ /**
6574
+ * Sets the value of the pair identified by key to value, creating a new key/value pair if none existed for key previously.
6575
+ */
6576
+ MemoryStorage.prototype.setItem = function (key, value) {
6577
+ this.storage[key] = value;
6578
+ };
6579
+ /**
6580
+ * Removes the key/value pair with the given key from the list associated with the object, if a key/value pair with the given key exists.
6581
+ */
6582
+ MemoryStorage.prototype.removeItem = function (key) {
6583
+ delete this.storage[key];
6584
+ };
6585
+ return MemoryStorage;
6586
+ }());
6587
+ /**
6588
+ * TODO: [🔼] !!! Export via `@promptbook/core`
6589
+ */
6590
+
6591
+ /**
6592
+ * Simple wrapper `new Date().toISOString()`
6593
+ *
6594
+ * @returns string_date branded type
6595
+ */
6596
+ function $currentDate() {
6597
+ return new Date().toISOString();
6598
+ }
6599
+
6600
+ /**
6601
+ * Intercepts LLM tools and counts total usage of the tools
6602
+ *
6603
+ * @param llmTools LLM tools to be intercepted with usage counting
6604
+ * @returns LLM tools with same functionality with added total cost counting
6605
+ */
6606
+ function cacheLlmTools(llmTools, options) {
6607
+ var _this = this;
6608
+ if (options === void 0) { options = {}; }
6609
+ var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a;
6610
+ var proxyTools = {
6611
+ get title() {
6612
+ // TODO: [🧠] Maybe put here some suffix
6613
+ return llmTools.title;
6614
+ },
6615
+ get description() {
6616
+ // TODO: [🧠] Maybe put here some suffix
6617
+ return llmTools.description;
6618
+ },
6619
+ listModels: function () {
6620
+ // TODO: [🧠] Should be model listing also cached?
6621
+ return /* not await */ llmTools.listModels();
6622
+ },
6623
+ };
6624
+ var callCommonModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
6625
+ var key, cacheItem, promptResult, _a;
6626
+ return __generator(this, function (_b) {
6627
+ switch (_b.label) {
6628
+ case 0:
6629
+ key = titleToName(prompt.title.substring(0, MAX_FILENAME_LENGTH - 10) +
6630
+ '-' +
6631
+ sha256__default["default"](hexEncoder__default["default"].parse(JSON.stringify(prompt.parameters))).toString( /* hex */));
6632
+ return [4 /*yield*/, storage.getItem(key)];
6633
+ case 1:
6634
+ cacheItem = _b.sent();
6635
+ if (cacheItem) {
6636
+ return [2 /*return*/, cacheItem.promptResult];
6637
+ }
6638
+ _a = prompt.modelRequirements.modelVariant;
6639
+ switch (_a) {
6640
+ case 'CHAT': return [3 /*break*/, 2];
6641
+ case 'COMPLETION': return [3 /*break*/, 4];
6642
+ case 'EMBEDDING': return [3 /*break*/, 6];
6643
+ }
6644
+ return [3 /*break*/, 8];
6645
+ case 2: return [4 /*yield*/, llmTools.callChatModel(prompt)];
6646
+ case 3:
6647
+ promptResult = _b.sent();
6648
+ return [3 /*break*/, 9];
6649
+ case 4: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
6650
+ case 5:
6651
+ promptResult = _b.sent();
6652
+ return [3 /*break*/, 9];
6653
+ case 6: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
6654
+ case 7:
6655
+ promptResult = _b.sent();
6656
+ return [3 /*break*/, 9];
6657
+ case 8: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
6658
+ case 9: return [4 /*yield*/, storage.setItem(key, {
6659
+ date: $currentDate(),
6660
+ promptbookVersion: PROMPTBOOK_VERSION,
6661
+ prompt: prompt,
6662
+ promptResult: promptResult,
6663
+ })];
6664
+ case 10:
6665
+ _b.sent();
6666
+ return [2 /*return*/, promptResult];
6667
+ }
6668
+ });
6669
+ }); };
6670
+ if (llmTools.callChatModel !== undefined) {
6671
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
6672
+ return __generator(this, function (_a) {
6673
+ return [2 /*return*/, /* not await */ callCommonModel(prompt)];
6674
+ });
6675
+ }); };
6676
+ }
6677
+ if (llmTools.callCompletionModel !== undefined) {
6678
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
6679
+ return __generator(this, function (_a) {
6680
+ return [2 /*return*/, /* not await */ callCommonModel(prompt)];
6681
+ });
6682
+ }); };
6683
+ }
6684
+ if (llmTools.callEmbeddingModel !== undefined) {
6685
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
6686
+ return __generator(this, function (_a) {
6687
+ return [2 /*return*/, /* not await */ callCommonModel(prompt)];
6688
+ });
6689
+ }); };
6690
+ }
6691
+ // <- Note: [🤖]
6692
+ return proxyTools;
6693
+ }
6694
+ /**
6695
+ * TODO: [🔼] !!! Export via `@promptbook/core`
6696
+ * TODO: @@@ write discussion about this and storages
6697
+ * write how to combine multiple interceptors
6698
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
6699
+ * TODO: [🧠] Is there some meaningfull way how to test this util
6700
+ */
6701
+
6702
+ /**
6703
+ * Returns LLM tools for CLI
6704
+ *
6705
+ * @private within the repository - for CLI utils
6706
+ */
6707
+ function getLlmToolsForCli() {
6708
+ if (!isRunningInNode()) {
6709
+ throw new EnvironmentMismatchError('Function `getLlmToolsForTestingAndScriptsAndPlayground` works only in Node.js environment');
6710
+ }
6711
+ return cacheLlmTools(createLlmToolsFromEnv(), {
6712
+ storage: new FilesStorage({ cacheFolderPath: path.join(process.cwd(), '/.promptbook/executions-cache') }),
6713
+ });
6714
+ }
6715
+ /**
6716
+ * Note: [🟡] This code should never be published outside of `@promptbook/cli`
6429
6717
  */
6430
6718
 
6431
6719
  /**
@@ -6444,7 +6732,6 @@
6444
6732
  helloCommand.option('--validation', "Types of validations separated by comma (options \"logic\",\"imports\")", 'logic,imports');
6445
6733
  helloCommand.option('--verbose', "Is verbose", false);
6446
6734
  helloCommand.option('-o, --out-file <path>', spaceTrim__default["default"]("\n Where to save the builded collection\n\n Note: If you keep it \"".concat(PIPELINE_COLLECTION_BASE_FILENAME, "\" it will be saved in the root of the promptbook directory\n If you set it to a path, it will be saved in that path\n BUT you can use only one format and set correct extension\n ")), PIPELINE_COLLECTION_BASE_FILENAME);
6447
- // TODO: !!! Auto-detect AI api keys + explicit api keys as argv
6448
6735
  helloCommand.action(function (path$1, _a) {
6449
6736
  var projectName = _a.projectName, format = _a.format, validation = _a.validation, verbose = _a.verbose, outFile = _a.outFile;
6450
6737
  return __awaiter(_this, void 0, void 0, function () {
@@ -6454,7 +6741,6 @@
6454
6741
  return __generator(this, function (_f) {
6455
6742
  switch (_f.label) {
6456
6743
  case 0:
6457
- console.info('!!!', { projectName: projectName, path: path$1, format: format, validation: validation, verbose: verbose, outFile: outFile });
6458
6744
  isVerbose = verbose;
6459
6745
  formats = (format || '')
6460
6746
  .split(',')
@@ -6468,17 +6754,7 @@
6468
6754
  console.error(colors__default["default"].red("You can use only one format when saving to a file"));
6469
6755
  process.exit(1);
6470
6756
  }
6471
- llmTools = joinLlmExecutionTools(
6472
- // TODO: !!!! Remove mocked
6473
- new MockedFackedLlmExecutionTools({
6474
- isVerbose: isVerbose,
6475
- }), new AnthropicClaudeExecutionTools({
6476
- isVerbose: isVerbose,
6477
- apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
6478
- }), new OpenAiExecutionTools({
6479
- isVerbose: isVerbose,
6480
- apiKey: process.env.OPENAI_API_KEY,
6481
- }));
6757
+ llmTools = getLlmToolsForCli();
6482
6758
  return [4 /*yield*/, createCollectionFromDirectory(path$1, {
6483
6759
  llmTools: llmTools,
6484
6760
  isVerbose: isVerbose,
@@ -6595,11 +6871,14 @@
6595
6871
  });
6596
6872
  });
6597
6873
  }
6874
+ /**
6875
+ * Note: [🟡] This code should never be published outside of `@promptbook/cli`
6876
+ */
6598
6877
 
6599
6878
  /**
6600
6879
  * Add or modify an auto-generated section in a markdown file
6601
6880
  *
6602
- * @private within the package
6881
+ * @private within the repository
6603
6882
  */
6604
6883
  function addAutoGeneratedSection(content, options) {
6605
6884
  var sectionName = options.sectionName, sectionContent = options.sectionContent;
@@ -6611,7 +6890,10 @@
6611
6890
  }
6612
6891
  var placeForSection = removeContentComments(content).match(/^##.*$/im);
6613
6892
  if (!placeForSection) {
6614
- throw new Error("No place where to put the section <!--".concat(sectionName, "-->"));
6893
+ throw new ParsingError(
6894
+ // <- [🧠] Maybe something better than `ParsingError`
6895
+ "No place where to put the section <!--".concat(sectionName, "-->"));
6896
+ // <- [🚞]
6615
6897
  }
6616
6898
  var _a = __read(placeForSection, 1), heading = _a[0];
6617
6899
  return content.replace(heading, "<!--".concat(sectionName, "-->\n").concat(warningLine, "\n").concat(sectionContent, "\n<!--/").concat(sectionName, "-->\n\n").concat(heading));
@@ -6851,6 +7133,9 @@
6851
7133
  });
6852
7134
  });
6853
7135
  }
7136
+ /**
7137
+ * Note: [🟡] This code should never be published outside of `@promptbook/cli`
7138
+ */
6854
7139
 
6855
7140
  /**
6856
7141
  * Runs CLI utilities of Promptbook package
@@ -6860,7 +7145,7 @@
6860
7145
  var program;
6861
7146
  return __generator(this, function (_a) {
6862
7147
  if (!isRunningInNode()) {
6863
- throw new Error(spaceTrim.spaceTrim("\n Function promptbookCli is initiator of CLI script and should be run in Node.js environment.\n\n - In browser use function exported from `@promptbook/utils` or `@promptbook/core` directly, for example `prettifyPipelineString`.\n\n "));
7148
+ throw new EnvironmentMismatchError(spaceTrim.spaceTrim("\n Function promptbookCli is initiator of CLI script and should be run in Node.js environment.\n\n - In browser use function exported from `@promptbook/utils` or `@promptbook/core` directly, for example `prettifyPipelineString`.\n\n "));
6864
7149
  }
6865
7150
  program = new commander__default["default"].Command();
6866
7151
  program.name('promptbook');
@@ -6878,6 +7163,7 @@
6878
7163
  * TODO: [🥠] Do not export to utils directly, its just for CLI script
6879
7164
  * TODO: [🕌] When more functionalities, rename
6880
7165
  * Note: 11:11
7166
+ * Note: [🟡] This code should never be published outside of `@promptbook/cli`
6881
7167
  */
6882
7168
 
6883
7169
  // @promptbook/cli