@promptbook/legacy-documents 0.84.0-21 → 0.84.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +5 -21
  2. package/esm/index.es.js +64 -162
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/cli.index.d.ts +0 -4
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -12
  6. package/esm/typings/src/_packages/types.index.d.ts +0 -2
  7. package/esm/typings/src/_packages/utils.index.d.ts +0 -2
  8. package/esm/typings/src/_packages/wizzard.index.d.ts +0 -4
  9. package/esm/typings/src/cli/cli-commands/about.d.ts +1 -3
  10. package/esm/typings/src/cli/cli-commands/hello.d.ts +1 -3
  11. package/esm/typings/src/cli/cli-commands/list-models.d.ts +1 -3
  12. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -3
  13. package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -3
  14. package/esm/typings/src/cli/cli-commands/run.d.ts +1 -3
  15. package/esm/typings/src/cli/cli-commands/runInteractiveChatbot.d.ts +1 -1
  16. package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -3
  17. package/esm/typings/src/config.d.ts +1 -27
  18. package/esm/typings/src/conversion/compilePipelineOnRemoteServer.d.ts +1 -1
  19. package/esm/typings/src/execution/FilesystemTools.d.ts +1 -1
  20. package/esm/typings/src/execution/assertsExecutionSuccessful.d.ts +1 -3
  21. package/esm/typings/src/pipeline/book-notation.d.ts +2 -3
  22. package/esm/typings/src/pipeline/prompt-notation.d.ts +5 -18
  23. package/esm/typings/src/prepare/preparePipelineOnRemoteServer.d.ts +1 -1
  24. package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -5
  25. package/esm/typings/src/wizzard/wizzard.d.ts +1 -7
  26. package/package.json +16 -9
  27. package/umd/index.umd.js +67 -165
  28. package/umd/index.umd.js.map +1 -1
  29. package/esm/typings/src/_packages/deepseek.index.d.ts +0 -8
  30. package/esm/typings/src/cli/cli-commands/list-scrapers.d.ts +0 -13
  31. package/esm/typings/src/llm-providers/deepseek/DeepseekExecutionToolsOptions.d.ts +0 -9
  32. package/esm/typings/src/llm-providers/deepseek/createDeepseekExecutionTools.d.ts +0 -14
  33. package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +0 -14
  34. package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +0 -15
  35. package/esm/typings/src/utils/editable/edit-pipeline-string/deflatePipeline.test.d.ts +0 -1
  36. package/esm/typings/src/utils/editable/utils/isFlatPipeline.test.d.ts +0 -1
  37. package/esm/typings/src/utils/files/mimeTypeToExtension.d.ts +0 -10
  38. package/esm/typings/src/utils/files/mimeTypeToExtension.test.d.ts +0 -1
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  <!-- ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten -->
2
2
 
3
- # Promptbook
3
+ # Promptbook
4
4
 
5
5
 
6
6
 
@@ -14,13 +14,12 @@
14
14
 
15
15
 
16
16
 
17
- ## 🌟 New Features
17
+ ## New Features
18
18
 
19
- - 💫 Support of [`o3-mini` model by OpenAI](https://openai.com/index/openai-o3-mini/)
20
- - 🐋 **Support of [DeepSeek models](https://www.npmjs.com/package/@promptbook/deepseek)**
21
19
  - 💙 Working [the **Book** language v1.0.0](https://github.com/webgptorg/book)
22
20
  - 🖤 Run books from CLI - `npx ptbk run path/to/your/book`
23
- - 📚 Support of `.docx`, `.doc` and `.pdf` documents as knowledge
21
+ - 📚 Support of `.docx`, `.doc` and `.pdf` documents
22
+ - ✨ **Support of [OpenAI o1 model](https://openai.com/o1/)**
24
23
 
25
24
 
26
25
 
@@ -266,9 +265,8 @@ Or you can install them separately:
266
265
  - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
267
266
  - **[@promptbook/vercel](https://www.npmjs.com/package/@promptbook/vercel)** - Adapter for Vercel functionalities
268
267
  - **[@promptbook/google](https://www.npmjs.com/package/@promptbook/google)** - Integration with Google's Gemini API
269
- - **[@promptbook/deepseek](https://www.npmjs.com/package/@promptbook/deepseek)** - Integration with [DeepSeek API](https://www.deepseek.com/)
270
268
  - **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API
271
-
269
+ - **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK
272
270
  - **[@promptbook/fake-llm](https://www.npmjs.com/package/@promptbook/fake-llm)** - Mocked execution tools for testing the library and saving the tokens
273
271
  - **[@promptbook/remote-client](https://www.npmjs.com/package/@promptbook/remote-client)** - Remote client for remote execution of promptbooks
274
272
  - **[@promptbook/remote-server](https://www.npmjs.com/package/@promptbook/remote-server)** - Remote server for remote execution of promptbooks
@@ -438,20 +436,6 @@ See [TODO.md](./TODO.md)
438
436
 
439
437
 
440
438
 
441
- ## 🤝 Partners
442
-
443
- <div style="display: flex; align-items: center; gap: 20px;">
444
-
445
- <a href="https://promptbook.studio/">
446
- <img src="./other/design/promptbook-studio-logo.png" alt="Partner 3" height="100">
447
- </a>
448
-
449
- <a href="https://technologickainkubace.org/en/about-technology-incubation/about-the-project/">
450
- <img src="./other/partners/CI-Technology-Incubation.png" alt="Technology Incubation" height="100">
451
- </a>
452
-
453
- </div>
454
-
455
439
  ## 🖋️ Contributing
456
440
 
457
441
  I am open to pull requests, feedback, and suggestions. Or if you like this utility, you can [☕ buy me a coffee](https://www.buymeacoffee.com/hejny) or [donate via cryptocurrencies](https://github.com/hejny/hejny/blob/main/documents/crypto.md).
package/esm/index.es.js CHANGED
@@ -8,8 +8,7 @@ import { SHA256 } from 'crypto-js';
8
8
  import hexEncoder from 'crypto-js/enc-hex';
9
9
  import { format } from 'prettier';
10
10
  import parserHtml from 'prettier/parser-html';
11
- import sha256 from 'crypto-js/sha256';
12
- import { lookup, extension } from 'mime-types';
11
+ import { lookup } from 'mime-types';
13
12
  import { unparse, parse } from 'papaparse';
14
13
 
15
14
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
@@ -26,7 +25,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
26
25
  * @generated
27
26
  * @see https://github.com/webgptorg/promptbook
28
27
  */
29
- var PROMPTBOOK_ENGINE_VERSION = '0.84.0-20';
28
+ var PROMPTBOOK_ENGINE_VERSION = '0.84.0-8';
30
29
  /**
31
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -197,12 +196,6 @@ var ADMIN_GITHUB_NAME = 'hejny';
197
196
  * @public exported from `@promptbook/core`
198
197
  */
199
198
  var DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
200
- /**
201
- * Maximum file size limit
202
- *
203
- * @public exported from `@promptbook/core`
204
- */
205
- var DEFAULT_MAX_FILE_SIZE = 100 * 1024 * 1024; // 100MB
206
199
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
207
200
  /**
208
201
  * The maximum number of iterations for a loops
@@ -236,12 +229,6 @@ var SMALL_NUMBER = 0.001;
236
229
  * @private within the repository - too low-level in comparison with other `MAX_...`
237
230
  */
238
231
  var IMMEDIATE_TIME = 10;
239
- /**
240
- * The maximum length of the (generated) filename
241
- *
242
- * @public exported from `@promptbook/core`
243
- */
244
- var MAX_FILENAME_LENGTH = 30;
245
232
  /**
246
233
  * Strategy for caching the intermediate results for knowledge sources
247
234
  *
@@ -261,15 +248,6 @@ var DEFAULT_MAX_PARALLEL_COUNT = 5; // <- TODO: [🤹‍♂️]
261
248
  * @public exported from `@promptbook/core`
262
249
  */
263
250
  var DEFAULT_MAX_EXECUTION_ATTEMPTS = 3; // <- TODO: [🤹‍♂️]
264
- // <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
265
- /**
266
- * Where to store the temporary downloads
267
- *
268
- * Note: When the folder does not exist, it is created recursively
269
- *
270
- * @public exported from `@promptbook/core`
271
- */
272
- var DEFAULT_DOWNLOAD_CACHE_DIRNAME = './.promptbook/download-cache';
273
251
  /**
274
252
  * Where to store the scrape cache
275
253
  *
@@ -1139,18 +1117,24 @@ function getScraperIntermediateSource(source, options) {
1139
1117
  var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book.md",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book.md`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book.md"}];
1140
1118
 
1141
1119
  /**
1142
- * Checks if value is valid email
1120
+ * Function isValidJsonString will tell you if the string is valid JSON or not
1143
1121
  *
1144
1122
  * @public exported from `@promptbook/utils`
1145
1123
  */
1146
- function isValidEmail(email) {
1147
- if (typeof email !== 'string') {
1148
- return false;
1124
+ function isValidJsonString(value /* <- [👨‍⚖️] */) {
1125
+ try {
1126
+ JSON.parse(value);
1127
+ return true;
1149
1128
  }
1150
- if (email.split('\n').length > 1) {
1129
+ catch (error) {
1130
+ if (!(error instanceof Error)) {
1131
+ throw error;
1132
+ }
1133
+ if (error.message.includes('Unexpected token')) {
1134
+ return false;
1135
+ }
1151
1136
  return false;
1152
1137
  }
1153
- return /^.+@.+\..+$/.test(email);
1154
1138
  }
1155
1139
 
1156
1140
  /**
@@ -1172,27 +1156,6 @@ var ParseError = /** @class */ (function (_super) {
1172
1156
  * TODO: Maybe split `ParseError` and `ApplyError`
1173
1157
  */
1174
1158
 
1175
- /**
1176
- * Function isValidJsonString will tell you if the string is valid JSON or not
1177
- *
1178
- * @public exported from `@promptbook/utils`
1179
- */
1180
- function isValidJsonString(value /* <- [👨‍⚖️] */) {
1181
- try {
1182
- JSON.parse(value);
1183
- return true;
1184
- }
1185
- catch (error) {
1186
- if (!(error instanceof Error)) {
1187
- throw error;
1188
- }
1189
- if (error.message.includes('Unexpected token')) {
1190
- return false;
1191
- }
1192
- return false;
1193
- }
1194
- }
1195
-
1196
1159
  /**
1197
1160
  * Function `validatePipelineString` will validate the if the string is a valid pipeline string
1198
1161
  * It does not check if the string is fully logically correct, but if it is a string that can be a pipeline string or the string looks completely different.
@@ -1206,15 +1169,6 @@ function validatePipelineString(pipelineString) {
1206
1169
  if (isValidJsonString(pipelineString)) {
1207
1170
  throw new ParseError('Expected a book, but got a JSON string');
1208
1171
  }
1209
- else if (isValidUrl(pipelineString)) {
1210
- throw new ParseError("Expected a book, but got just the URL \"".concat(pipelineString, "\""));
1211
- }
1212
- else if (isValidFilePath(pipelineString)) {
1213
- throw new ParseError("Expected a book, but got just the file path \"".concat(pipelineString, "\""));
1214
- }
1215
- else if (isValidEmail(pipelineString)) {
1216
- throw new ParseError("Expected a book, but got just the email \"".concat(pipelineString, "\""));
1217
- }
1218
1172
  // <- TODO: Implement the validation + add tests when the pipeline logic considered as invalid
1219
1173
  return pipelineString;
1220
1174
  }
@@ -2580,28 +2534,12 @@ function deserializeError(error) {
2580
2534
  /**
2581
2535
  * Asserts that the execution of a Promptbook is successful
2582
2536
  *
2583
- * Note: If there are only warnings, the execution is still successful but the warnings are logged in the console
2584
- *
2585
2537
  * @param executionResult - The partial result of the Promptbook execution
2586
2538
  * @throws {PipelineExecutionError} If the execution is not successful or if multiple errors occurred
2587
2539
  * @public exported from `@promptbook/core`
2588
2540
  */
2589
2541
  function assertsExecutionSuccessful(executionResult) {
2590
- var e_1, _a;
2591
- var isSuccessful = executionResult.isSuccessful, errors = executionResult.errors, warnings = executionResult.warnings;
2592
- try {
2593
- for (var warnings_1 = __values(warnings), warnings_1_1 = warnings_1.next(); !warnings_1_1.done; warnings_1_1 = warnings_1.next()) {
2594
- var warning = warnings_1_1.value;
2595
- console.warn(warning.message);
2596
- }
2597
- }
2598
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2599
- finally {
2600
- try {
2601
- if (warnings_1_1 && !warnings_1_1.done && (_a = warnings_1.return)) _a.call(warnings_1);
2602
- }
2603
- finally { if (e_1) throw e_1.error; }
2604
- }
2542
+ var isSuccessful = executionResult.isSuccessful, errors = executionResult.errors;
2605
2543
  if (isSuccessful === true) {
2606
2544
  return;
2607
2545
  }
@@ -3238,7 +3176,7 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
3238
3176
  if (!(error_1 instanceof Error) || error_1 instanceof UnexpectedError) {
3239
3177
  throw error_1;
3240
3178
  }
3241
- errors.push({ llmExecutionTools: llmExecutionTools, error: error_1 });
3179
+ errors.push(error_1);
3242
3180
  return [3 /*break*/, 13];
3243
3181
  case 13:
3244
3182
  _b = _a.next();
@@ -3265,10 +3203,7 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
3265
3203
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3266
3204
  // 3) ...
3267
3205
  spaceTrim$1(function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors
3268
- .map(function (_a, i) {
3269
- var error = _a.error, llmExecutionTools = _a.llmExecutionTools;
3270
- return "".concat(i + 1, ") **").concat(llmExecutionTools.title, "** thrown **").concat(error.name || 'Error', ":** ").concat(error.message);
3271
- })
3206
+ .map(function (error, i) { return "".concat(i + 1, ") **").concat(error.name || 'Error', ":** ").concat(error.message); })
3272
3207
  .join('\n')), "\n\n "); }));
3273
3208
  }
3274
3209
  else if (this.llmExecutionTools.length === 0) {
@@ -3740,17 +3675,6 @@ function extensionToMimeType(value) {
3740
3675
  return lookup(value) || 'application/octet-stream';
3741
3676
  }
3742
3677
 
3743
- /**
3744
- * Convert mime type to file extension
3745
- *
3746
- * Note: If the mime type is invalid, `null` is returned
3747
- *
3748
- * @private within the repository
3749
- */
3750
- function mimeTypeToExtension(value) {
3751
- return extension(value) || null;
3752
- }
3753
-
3754
3678
  /**
3755
3679
  * The built-in `fetch' function with a lightweight error handling wrapper as default fetch function used in Promptbook scrapers
3756
3680
  *
@@ -3786,9 +3710,9 @@ var scraperFetch = function (url, init) { return __awaiter(void 0, void 0, void
3786
3710
  function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3787
3711
  var _a;
3788
3712
  return __awaiter(this, void 0, void 0, function () {
3789
- var _b, fetch, knowledgeSourceContent, name, _c, _d, rootDirname, url, response_1, mimeType, basename, hash, rootDirname_1, filepath, fileContent, _f, _g, filename_1, fileExtension, mimeType;
3790
- return __generator(this, function (_h) {
3791
- switch (_h.label) {
3713
+ var _b, fetch, knowledgeSourceContent, name, _c, _d, rootDirname, url, response_1, mimeType, filename_1, fileExtension, mimeType;
3714
+ return __generator(this, function (_f) {
3715
+ switch (_f.label) {
3792
3716
  case 0:
3793
3717
  _b = tools.fetch, fetch = _b === void 0 ? scraperFetch : _b;
3794
3718
  knowledgeSourceContent = knowledgeSource.knowledgeSourceContent;
@@ -3797,76 +3721,54 @@ function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3797
3721
  if (!name) {
3798
3722
  name = knowledgeSourceContentToName(knowledgeSourceContent);
3799
3723
  }
3800
- if (!isValidUrl(knowledgeSourceContent)) return [3 /*break*/, 5];
3724
+ if (!isValidUrl(knowledgeSourceContent)) return [3 /*break*/, 2];
3801
3725
  url = knowledgeSourceContent;
3802
3726
  return [4 /*yield*/, fetch(url)];
3803
3727
  case 1:
3804
- response_1 = _h.sent();
3728
+ response_1 = _f.sent();
3805
3729
  mimeType = ((_a = response_1.headers.get('content-type')) === null || _a === void 0 ? void 0 : _a.split(';')[0]) || 'text/html';
3806
- if (tools.fs === undefined || !url.endsWith('.pdf' /* <- TODO: [💵] */)) {
3807
- return [2 /*return*/, {
3808
- source: name,
3809
- filename: null,
3810
- url: url,
3811
- mimeType: mimeType,
3812
- /*
3813
- TODO: [🥽]
3814
- > async asBlob() {
3815
- > // TODO: [👨🏻‍🤝‍👨🏻] This can be called multiple times BUT when called second time, response in already consumed
3816
- > const content = await response.blob();
3817
- > return content;
3818
- > },
3819
- */
3820
- asJson: function () {
3821
- return __awaiter(this, void 0, void 0, function () {
3822
- var content;
3823
- return __generator(this, function (_a) {
3824
- switch (_a.label) {
3825
- case 0: return [4 /*yield*/, response_1.json()];
3826
- case 1:
3827
- content = _a.sent();
3828
- return [2 /*return*/, content];
3829
- }
3830
- });
3730
+ return [2 /*return*/, {
3731
+ source: name,
3732
+ filename: null,
3733
+ url: url,
3734
+ mimeType: mimeType,
3735
+ /*
3736
+ TODO: [🥽]
3737
+ > async asBlob() {
3738
+ > // TODO: [👨🏻‍🤝‍👨🏻] This can be called multiple times BUT when called second time, response in already consumed
3739
+ > const content = await response.blob();
3740
+ > return content;
3741
+ > },
3742
+ */
3743
+ asJson: function () {
3744
+ return __awaiter(this, void 0, void 0, function () {
3745
+ var content;
3746
+ return __generator(this, function (_a) {
3747
+ switch (_a.label) {
3748
+ case 0: return [4 /*yield*/, response_1.json()];
3749
+ case 1:
3750
+ content = _a.sent();
3751
+ return [2 /*return*/, content];
3752
+ }
3831
3753
  });
3832
- },
3833
- asText: function () {
3834
- return __awaiter(this, void 0, void 0, function () {
3835
- var content;
3836
- return __generator(this, function (_a) {
3837
- switch (_a.label) {
3838
- case 0: return [4 /*yield*/, response_1.text()];
3839
- case 1:
3840
- content = _a.sent();
3841
- return [2 /*return*/, content];
3842
- }
3843
- });
3754
+ });
3755
+ },
3756
+ asText: function () {
3757
+ return __awaiter(this, void 0, void 0, function () {
3758
+ var content;
3759
+ return __generator(this, function (_a) {
3760
+ switch (_a.label) {
3761
+ case 0: return [4 /*yield*/, response_1.text()];
3762
+ case 1:
3763
+ content = _a.sent();
3764
+ return [2 /*return*/, content];
3765
+ }
3844
3766
  });
3845
- },
3846
- }];
3847
- }
3848
- basename = url.split('/').pop() || titleToName(url);
3849
- hash = sha256(hexEncoder.parse(url)).toString( /* hex */);
3850
- rootDirname_1 = join(process.cwd(), DEFAULT_DOWNLOAD_CACHE_DIRNAME);
3851
- filepath = join.apply(void 0, __spreadArray(__spreadArray([], __read(nameToSubfolderPath(hash /* <- TODO: [🎎] Maybe add some SHA256 prefix */)), false), ["".concat(basename.substring(0, MAX_FILENAME_LENGTH), ".").concat(mimeTypeToExtension(mimeType))], false));
3852
- return [4 /*yield*/, tools.fs.mkdir(dirname(join(rootDirname_1, filepath)), { recursive: true })];
3767
+ });
3768
+ },
3769
+ }];
3853
3770
  case 2:
3854
- _h.sent();
3855
- _g = (_f = Buffer).from;
3856
- return [4 /*yield*/, response_1.arrayBuffer()];
3857
- case 3:
3858
- fileContent = _g.apply(_f, [_h.sent()]);
3859
- if (fileContent.length > DEFAULT_MAX_FILE_SIZE /* <- TODO: Allow to pass different value to remote server */) {
3860
- throw new LimitReachedError("File is too large (".concat(Math.round(fileContent.length / 1024 / 1024), "MB). Maximum allowed size is ").concat(Math.round(DEFAULT_MAX_FILE_SIZE / 1024 / 1024), "MB."));
3861
- }
3862
- return [4 /*yield*/, tools.fs.writeFile(join(rootDirname_1, filepath), fileContent)];
3863
- case 4:
3864
- _h.sent();
3865
- // TODO: [💵] Check the file security
3866
- // TODO: [🧹][🧠] Delete the file after the scraping is done
3867
- return [2 /*return*/, makeKnowledgeSourceHandler({ name: name, knowledgeSourceContent: filepath }, tools, __assign(__assign({}, options), { rootDirname: rootDirname_1 }))];
3868
- case 5:
3869
- if (!isValidFilePath(knowledgeSourceContent)) return [3 /*break*/, 7];
3771
+ if (!isValidFilePath(knowledgeSourceContent)) return [3 /*break*/, 4];
3870
3772
  if (tools.fs === undefined) {
3871
3773
  throw new EnvironmentMismatchError('Can not import file knowledge without filesystem tools');
3872
3774
  // <- TODO: [🧠] What is the best error type here`
@@ -3879,8 +3781,8 @@ function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3879
3781
  fileExtension = getFileExtension(filename_1);
3880
3782
  mimeType = extensionToMimeType(fileExtension || '');
3881
3783
  return [4 /*yield*/, isFileExisting(filename_1, tools.fs)];
3882
- case 6:
3883
- if (!(_h.sent())) {
3784
+ case 3:
3785
+ if (!(_f.sent())) {
3884
3786
  throw new NotFoundError(spaceTrim$1(function (block) { return "\n Can not make source handler for file which does not exist:\n\n File:\n ".concat(block(knowledgeSourceContent), "\n\n Full file path:\n ").concat(block(filename_1), "\n "); }));
3885
3787
  }
3886
3788
  // TODO: [🧠][😿] Test security file - file is scoped to the project (BUT maybe do this in `filesystemTools`)
@@ -3926,7 +3828,7 @@ function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3926
3828
  });
3927
3829
  },
3928
3830
  }];
3929
- case 7: return [2 /*return*/, {
3831
+ case 4: return [2 /*return*/, {
3930
3832
  source: name,
3931
3833
  filename: null,
3932
3834
  url: null,