@promptbook/cli 0.94.0 → 0.95.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/README.md +2 -10
  2. package/esm/index.es.js +100 -100
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/types.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +2 -2
  6. package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
  7. package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
  8. package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
  10. package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
  11. package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  12. package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
  13. package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
  14. package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
  15. package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
  19. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
  20. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
  22. package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
  28. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  30. package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
  31. package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
  32. package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
  33. package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
  34. package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
  35. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
  36. package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
  37. package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
  38. package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
  39. package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
  40. package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
  41. package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
  42. package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
  43. package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
  44. package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
  45. package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
  46. package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
  47. package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
  48. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
  49. package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
  50. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
  51. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
  52. package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
  53. package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
  54. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  55. package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
  56. package/esm/typings/src/version.d.ts +1 -1
  57. package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
  58. package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
  59. package/package.json +1 -13
  60. package/umd/index.umd.js +100 -100
  61. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -57,7 +57,7 @@
57
57
  * @generated
58
58
  * @see https://github.com/webgptorg/promptbook
59
59
  */
60
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0';
60
+ const PROMPTBOOK_ENGINE_VERSION = '0.95.0';
61
61
  /**
62
62
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
63
63
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -315,11 +315,11 @@
315
315
  /*
316
316
  TODO: [🌃]
317
317
  /**
318
- * Id of application for the wizzard when using remote server
318
+ * Id of application for the wizard when using remote server
319
319
  *
320
320
  * @public exported from `@promptbook/core`
321
321
  * /
322
- ex-port const WIZZARD_APP_ID: string_app_id = 'wizzard';
322
+ ex-port const WIZARD_APP_ID: string_app_id = 'wizard';
323
323
  */
324
324
  /**
325
325
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
@@ -1443,7 +1443,7 @@
1443
1443
  else {
1444
1444
  for (const [subName, subValue] of Object.entries(value)) {
1445
1445
  if (subValue === undefined) {
1446
- // Note: undefined in object is serializable - it is just omited
1446
+ // Note: undefined in object is serializable - it is just omitted
1447
1447
  continue;
1448
1448
  }
1449
1449
  checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
@@ -2785,12 +2785,12 @@
2785
2785
  get title() {
2786
2786
  return `${llmTools.title} (cached)`;
2787
2787
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2788
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(cached)"?
2788
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(cached)"?
2789
2789
  },
2790
2790
  get description() {
2791
2791
  return `${llmTools.description} (cached)`;
2792
2792
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2793
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(cached)"?
2793
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(cached)"?
2794
2794
  },
2795
2795
  listModels() {
2796
2796
  // TODO: [🧠] Should be model listing also cached?
@@ -3020,12 +3020,12 @@
3020
3020
  get title() {
3021
3021
  return `${llmTools.title} (+usage)`;
3022
3022
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3023
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
3023
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3024
3024
  },
3025
3025
  get description() {
3026
3026
  return `${llmTools.description} (+usage)`;
3027
3027
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3028
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
3028
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3029
3029
  },
3030
3030
  checkConfiguration() {
3031
3031
  return /* not await */ llmTools.checkConfiguration();
@@ -3284,13 +3284,13 @@
3284
3284
 
3285
3285
  Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
3286
3286
  `);
3287
- // TODO: [🟥] Detect browser / node and make it colorfull
3287
+ // TODO: [🟥] Detect browser / node and make it colorful
3288
3288
  console.warn(warningMessage);
3289
3289
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3290
3290
  /*
3291
3291
  return {
3292
3292
  async listModels() {
3293
- // TODO: [🟥] Detect browser / node and make it colorfull
3293
+ // TODO: [🟥] Detect browser / node and make it colorful
3294
3294
  console.warn(
3295
3295
  spaceTrim(
3296
3296
  (block) => `
@@ -3442,9 +3442,9 @@
3442
3442
  *
3443
3443
  * @private within the repository - for CLI utils
3444
3444
  */
3445
- async function $provideLlmToolsForWizzardOrCli(options) {
3445
+ async function $provideLlmToolsForWizardOrCli(options) {
3446
3446
  if (!$isRunningInNode()) {
3447
- throw new EnvironmentMismatchError('Function `$provideLlmToolsForWizzardOrCli` works only in Node.js environment');
3447
+ throw new EnvironmentMismatchError('Function `$provideLlmToolsForWizardOrCli` works only in Node.js environment');
3448
3448
  }
3449
3449
  options = options !== null && options !== void 0 ? options : { strategy: 'BRING_YOUR_OWN_KEYS' };
3450
3450
  const { isLoginloaded, strategy, isCacheReloaded } = options;
@@ -3478,7 +3478,7 @@
3478
3478
  llmExecutionTools = await $provideLlmToolsFromEnv();
3479
3479
  }
3480
3480
  else {
3481
- throw new UnexpectedError(`\`$provideLlmToolsForWizzardOrCli\` wrong strategy "${strategy}"`);
3481
+ throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
3482
3482
  }
3483
3483
  return cacheLlmTools(countUsage(
3484
3484
  // <- TODO: [🌯] We dont use countUsage at all, maybe just unwrap it
@@ -3565,7 +3565,7 @@
3565
3565
  if (isLoginloaded) {
3566
3566
  throw new UnexpectedError(`\`$provideLlmToolsForCli\` isLoginloaded is not supported for strategy "BRING_YOUR_OWN_KEYS"`);
3567
3567
  }
3568
- const llm = await $provideLlmToolsForWizzardOrCli({ strategy, ...options });
3568
+ const llm = await $provideLlmToolsForWizardOrCli({ strategy, ...options });
3569
3569
  return { strategy, llm };
3570
3570
  }
3571
3571
  else if (strategy === 'REMOTE_SERVER') {
@@ -3574,7 +3574,7 @@
3574
3574
  process.exit(1);
3575
3575
  }
3576
3576
  const remoteServerUrl = remoteServerUrlRaw.endsWith('/') ? remoteServerUrlRaw.slice(0, -1) : remoteServerUrlRaw;
3577
- const llm = await $provideLlmToolsForWizzardOrCli({
3577
+ const llm = await $provideLlmToolsForWizardOrCli({
3578
3578
  isLoginloaded,
3579
3579
  strategy,
3580
3580
  appId: CLI_APP_ID,
@@ -4113,17 +4113,17 @@
4113
4113
  * Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
4114
4114
  */
4115
4115
  const all = [];
4116
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersMetadataRegister.list()) {
4116
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
4117
4117
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
4118
4118
  continue;
4119
4119
  }
4120
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
4120
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
4121
4121
  }
4122
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersRegister.list()) {
4122
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
4123
4123
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
4124
4124
  continue;
4125
4125
  }
4126
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
4126
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
4127
4127
  }
4128
4128
  for (const { metadata } of availableScrapers) {
4129
4129
  all.push(metadata);
@@ -4135,8 +4135,8 @@
4135
4135
  const isInstalled = $scrapersRegister
4136
4136
  .list()
4137
4137
  .find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
4138
- const isAvilableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
4139
- return { ...metadata, isMetadataAviailable, isInstalled, isAvilableInTools };
4138
+ const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
4139
+ return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
4140
4140
  });
4141
4141
  if (metadata.length === 0) {
4142
4142
  return spaceTrim__default["default"](`
@@ -4149,7 +4149,7 @@
4149
4149
  return spaceTrim__default["default"]((block) => `
4150
4150
  Available scrapers are:
4151
4151
  ${block(metadata
4152
- .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvilableInBrowser, isAvilableInTools, }, i) => {
4152
+ .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
4153
4153
  const more = [];
4154
4154
  // TODO: [🧠] Maybe use `documentationUrl`
4155
4155
  if (isMetadataAviailable) {
@@ -4158,16 +4158,16 @@
4158
4158
  if (isInstalled) {
4159
4159
  more.push(`🟩 Installed`);
4160
4160
  } // not else
4161
- if (isAvilableInTools) {
4161
+ if (isAvailableInTools) {
4162
4162
  more.push(`🟦 Available in tools`);
4163
4163
  } // not else
4164
4164
  if (!isMetadataAviailable && isInstalled) {
4165
4165
  more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
4166
4166
  } // not else
4167
- if (!isInstalled && isAvilableInTools) {
4167
+ if (!isInstalled && isAvailableInTools) {
4168
4168
  more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
4169
4169
  } // not else
4170
- if (!isAvilableInBrowser) {
4170
+ if (!isAvailableInBrowser) {
4171
4171
  more.push(`Not usable in browser`);
4172
4172
  }
4173
4173
  const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
@@ -4645,7 +4645,7 @@
4645
4645
  * @param fs Filesystem tools
4646
4646
  * @returns Pipelines loaded from the archive
4647
4647
  *
4648
- * @private utility of Prompbook
4648
+ * @private utility of Promptbook
4649
4649
  */
4650
4650
  async function loadArchive(filePath, fs) {
4651
4651
  if (!filePath.endsWith('.bookc')) {
@@ -4667,7 +4667,7 @@
4667
4667
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
4668
4668
  */
4669
4669
 
4670
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
4670
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
4671
4671
 
4672
4672
  /**
4673
4673
  * Function isValidJsonString will tell you if the string is valid JSON or not
@@ -4745,7 +4745,7 @@
4745
4745
  });
4746
4746
  }
4747
4747
  catch (error) {
4748
- // TODO: [🟥] Detect browser / node and make it colorfull
4748
+ // TODO: [🟥] Detect browser / node and make it colorful
4749
4749
  console.error('There was an error with prettifying the markdown, using the original as the fallback', {
4750
4750
  error,
4751
4751
  html: content,
@@ -4993,7 +4993,7 @@
4993
4993
 
4994
4994
  Note: You have probably forgotten to run "ptbk make" to update the collection
4995
4995
  Note: Pipelines with the same URL are not allowed
4996
- Only exepction is when the pipelines are identical
4996
+ Only exception is when the pipelines are identical
4997
4997
 
4998
4998
  `));
4999
4999
  }
@@ -6790,10 +6790,10 @@
6790
6790
  */
6791
6791
  async function getKnowledgeForTask(options) {
6792
6792
  const { tools, preparedPipeline, task, parameters } = options;
6793
- const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
6794
- const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
6793
+ const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
6794
+ const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
6795
6795
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
6796
- if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
6796
+ if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
6797
6797
  return ''; // <- Note: Np knowledge present, return empty string
6798
6798
  }
6799
6799
  try {
@@ -6804,7 +6804,7 @@
6804
6804
  title: 'Knowledge Search',
6805
6805
  modelRequirements: {
6806
6806
  modelVariant: 'EMBEDDING',
6807
- modelName: firstKnowlegeIndex.modelName,
6807
+ modelName: firstKnowledgeIndex.modelName,
6808
6808
  },
6809
6809
  content: task.content,
6810
6810
  parameters,
@@ -6812,7 +6812,7 @@
6812
6812
  const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
6813
6813
  const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
6814
6814
  const { index } = knowledgePiece;
6815
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
6815
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
6816
6816
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
6817
6817
  if (knowledgePieceIndex === undefined) {
6818
6818
  return {
@@ -6833,8 +6833,8 @@
6833
6833
  task,
6834
6834
  taskEmbeddingPrompt,
6835
6835
  taskEmbeddingResult,
6836
- firstKnowlegePiece,
6837
- firstKnowlegeIndex,
6836
+ firstKnowledgePiece,
6837
+ firstKnowledgeIndex,
6838
6838
  knowledgePiecesWithRelevance,
6839
6839
  knowledgePiecesSorted,
6840
6840
  knowledgePiecesLimited,
@@ -6903,7 +6903,7 @@
6903
6903
  * @private internal utility of `createPipelineExecutor`
6904
6904
  */
6905
6905
  async function executeTask(options) {
6906
- const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
6906
+ const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
6907
6907
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
6908
6908
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
6909
6909
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
@@ -6991,7 +6991,7 @@
6991
6991
  cacheDirname,
6992
6992
  intermediateFilesStrategy,
6993
6993
  isAutoInstalled,
6994
- isNotPreparedWarningSupressed,
6994
+ isNotPreparedWarningSuppressed,
6995
6995
  });
6996
6996
  await onProgress({
6997
6997
  outputParameters: {
@@ -7086,7 +7086,7 @@
7086
7086
  }
7087
7087
  return exportJson({
7088
7088
  name: `executionReport`,
7089
- message: `Unuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
7089
+ message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
7090
7090
  order: [],
7091
7091
  value: {
7092
7092
  isSuccessful: false,
@@ -7123,7 +7123,7 @@
7123
7123
  return exportJson({
7124
7124
  name: 'pipelineExecutorResult',
7125
7125
  message: spaceTrim.spaceTrim((block) => `
7126
- Unuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
7126
+ Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
7127
7127
 
7128
7128
  ${block(pipelineIdentification)}
7129
7129
  `),
@@ -7264,7 +7264,7 @@
7264
7264
  }
7265
7265
  return exportJson({
7266
7266
  name: 'pipelineExecutorResult',
7267
- message: `Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
7267
+ message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
7268
7268
  order: [],
7269
7269
  value: {
7270
7270
  isSuccessful: false,
@@ -7315,7 +7315,7 @@
7315
7315
  * @public exported from `@promptbook/core`
7316
7316
  */
7317
7317
  function createPipelineExecutor(options) {
7318
- const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSupressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
7318
+ const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
7319
7319
  validatePipeline(pipeline);
7320
7320
  const pipelineIdentification = (() => {
7321
7321
  // Note: This is a 😐 implementation of [🚞]
@@ -7332,7 +7332,7 @@
7332
7332
  if (isPipelinePrepared(pipeline)) {
7333
7333
  preparedPipeline = pipeline;
7334
7334
  }
7335
- else if (isNotPreparedWarningSupressed !== true) {
7335
+ else if (isNotPreparedWarningSuppressed !== true) {
7336
7336
  console.warn(spaceTrim.spaceTrim((block) => `
7337
7337
  Pipeline is not prepared
7338
7338
 
@@ -7365,7 +7365,7 @@
7365
7365
  maxParallelCount,
7366
7366
  csvSettings,
7367
7367
  isVerbose,
7368
- isNotPreparedWarningSupressed,
7368
+ isNotPreparedWarningSuppressed,
7369
7369
  rootDirname,
7370
7370
  cacheDirname,
7371
7371
  intermediateFilesStrategy,
@@ -7374,7 +7374,7 @@
7374
7374
  assertsError(error);
7375
7375
  return exportJson({
7376
7376
  name: 'pipelineExecutorResult',
7377
- message: `Unuccessful PipelineExecutorResult, last catch`,
7377
+ message: `Unsuccessful PipelineExecutorResult, last catch`,
7378
7378
  order: [],
7379
7379
  value: {
7380
7380
  isSuccessful: false,
@@ -7786,7 +7786,7 @@
7786
7786
  /**
7787
7787
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
7788
7788
  * Put `knowledgePieces` into `PrepareKnowledgeOptions`
7789
- * TODO: [🪂] More than max things can run in parallel by acident [1,[2a,2b,_],[3a,3b,_]]
7789
+ * TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
7790
7790
  * TODO: [🧠][❎] Do here proper M:N mapping
7791
7791
  * [x] One source can make multiple pieces
7792
7792
  * [ ] One piece can have multiple sources
@@ -8674,7 +8674,7 @@
8674
8674
  $taskJson.expectations[unit] = $taskJson.expectations[unit] || {};
8675
8675
  if (command.sign === 'MINIMUM' || command.sign === 'EXACTLY') {
8676
8676
  if ($taskJson.expectations[unit].min !== undefined) {
8677
- throw new ParseError(`Already defined minumum ${$taskJson.expectations[unit].min} ${command.unit.toLowerCase()}, now trying to redefine it to ${command.amount}`);
8677
+ throw new ParseError(`Already defined minimum ${$taskJson.expectations[unit].min} ${command.unit.toLowerCase()}, now trying to redefine it to ${command.amount}`);
8678
8678
  }
8679
8679
  $taskJson.expectations[unit].min = command.amount;
8680
8680
  } /* not else */
@@ -12136,7 +12136,7 @@
12136
12136
  return fileNames;
12137
12137
  }
12138
12138
  /**
12139
- * TODO: [😶] Unite floder listing
12139
+ * TODO: [😶] Unite folder listing
12140
12140
  * Note: Not [~🟢~] because it is not directly dependent on `fs
12141
12141
  * TODO: [🖇] What about symlinks?
12142
12142
  */
@@ -12283,7 +12283,7 @@
12283
12283
  if (isCrashedOnError) {
12284
12284
  throw new CollectionError(wrappedErrorMessage);
12285
12285
  }
12286
- // TODO: [🟥] Detect browser / node and make it colorfull
12286
+ // TODO: [🟥] Detect browser / node and make it colorful
12287
12287
  console.error(wrappedErrorMessage);
12288
12288
  }
12289
12289
  }
@@ -12350,7 +12350,7 @@
12350
12350
 
12351
12351
  Note: You have probably forgotten to run "ptbk make" to update the collection
12352
12352
  Note: Pipelines with the same URL are not allowed
12353
- Only exepction is when the pipelines are identical
12353
+ Only exception is when the pipelines are identical
12354
12354
 
12355
12355
  `));
12356
12356
  }
@@ -12374,7 +12374,7 @@
12374
12374
  if (isCrashedOnError) {
12375
12375
  throw new CollectionError(wrappedErrorMessage);
12376
12376
  }
12377
- // TODO: [🟥] Detect browser / node and make it colorfull
12377
+ // TODO: [🟥] Detect browser / node and make it colorful
12378
12378
  console.error(wrappedErrorMessage);
12379
12379
  }
12380
12380
  }
@@ -12397,7 +12397,7 @@
12397
12397
  * @param books Pipelines to be saved in the archive
12398
12398
  * @param fs Filesystem tools
12399
12399
  *
12400
- * @private utility of Prompbook
12400
+ * @private utility of Promptbook
12401
12401
  */
12402
12402
  async function saveArchive(filePath, collectionJson, fs) {
12403
12403
  if (!filePath.endsWith('.bookc')) {
@@ -12881,7 +12881,7 @@
12881
12881
  }));
12882
12882
  }
12883
12883
  /**
12884
- * TODO: [😶] Unite floder listing
12884
+ * TODO: [😶] Unite folder listing
12885
12885
  * Note: [💞] Ignore a discrepancy between file name and entity name
12886
12886
  * Note: [🟡] Code in this file should never be published outside of `@promptbook/cli`
12887
12887
  * TODO: [🖇] What about symlinks? Maybe flag --follow-symlinks
@@ -13198,9 +13198,9 @@
13198
13198
  */
13199
13199
 
13200
13200
  /**
13201
- * @see ./wizzard.ts `getPipeline` method
13201
+ * @see ./wizard.ts `getPipeline` method
13202
13202
  *
13203
- * @private usable through `ptbk run` and `@prompbook/wizzard`
13203
+ * @private usable through `ptbk run` and `@promptbook/wizard`
13204
13204
  */
13205
13205
  async function $getCompiledBook(tools, pipelineSource, options) {
13206
13206
  const { fs, fetch } = tools;
@@ -13415,7 +13415,7 @@
13415
13415
  }
13416
13416
  catch (error) {
13417
13417
  assertsError(error);
13418
- // TODO: Allow to ressurect the chatbot after an error - prompt the user to continue
13418
+ // TODO: Allow to resurrect the chatbot after an error - prompt the user to continue
13419
13419
  console.error(colors__default["default"].red(error.stack || error.message));
13420
13420
  return process.exit(1);
13421
13421
  }
@@ -13560,7 +13560,7 @@
13560
13560
  const pipelineExecutor = createPipelineExecutor({
13561
13561
  pipeline,
13562
13562
  tools,
13563
- isNotPreparedWarningSupressed: true,
13563
+ isNotPreparedWarningSuppressed: true,
13564
13564
  maxExecutionAttempts: DEFAULT_MAX_EXECUTION_ATTEMPTS,
13565
13565
  // <- TODO: Why "LLM execution failed undefinedx"
13566
13566
  maxParallelCount: 1, // <- TODO: Pass CLI argument
@@ -14807,7 +14807,7 @@
14807
14807
  }
14808
14808
  const url = !rawUrl ? null : new URL(rawUrl);
14809
14809
  if (url !== null && url.port !== port.toString()) {
14810
- console.warn(colors__default["default"].yellow(`Port in --url is different from --port which the server will listen on, this is ok only if you proxy from one port to another, for exaple via nginx or docker`));
14810
+ console.warn(colors__default["default"].yellow(`Port in --url is different from --port which the server will listen on, this is ok only if you proxy from one port to another, for example via nginx or docker`));
14811
14811
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
14812
14812
  }
14813
14813
  let rootUrl = undefined;
@@ -14948,7 +14948,7 @@
14948
14948
  }));
14949
14949
  }
14950
14950
  /**
14951
- * TODO: [😶] Unite floder listing
14951
+ * TODO: [😶] Unite folder listing
14952
14952
  * Note: [💞] Ignore a discrepancy between file name and entity name
14953
14953
  * Note: [🟡] Code in this file should never be published outside of `@promptbook/cli`
14954
14954
  * TODO: [🖇] What about symlinks? Maybe flag --follow-symlinks
@@ -15037,7 +15037,7 @@
15037
15037
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
15038
15038
  *
15039
15039
  * @public exported from `@promptbook/core`
15040
- * @public exported from `@promptbook/wizzard`
15040
+ * @public exported from `@promptbook/wizard`
15041
15041
  * @public exported from `@promptbook/cli`
15042
15042
  */
15043
15043
  const _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register({
@@ -15561,7 +15561,7 @@
15561
15561
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
15562
15562
  *
15563
15563
  * @public exported from `@promptbook/anthropic-claude`
15564
- * @public exported from `@promptbook/wizzard`
15564
+ * @public exported from `@promptbook/wizard`
15565
15565
  * @public exported from `@promptbook/cli`
15566
15566
  *
15567
15567
  */
@@ -15577,7 +15577,7 @@
15577
15577
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
15578
15578
  *
15579
15579
  * @public exported from `@promptbook/core`
15580
- * @public exported from `@promptbook/wizzard`
15580
+ * @public exported from `@promptbook/wizard`
15581
15581
  * @public exported from `@promptbook/cli`
15582
15582
  */
15583
15583
  const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
@@ -16255,7 +16255,7 @@
16255
16255
  rawResponse: {
16256
16256
  ...rawResponse,
16257
16257
  created: rawResponse.created.toISOString(),
16258
- // <- TODO: Put `created` at begining
16258
+ // <- TODO: Put `created` at beginning
16259
16259
  },
16260
16260
  // <- [🗯]
16261
16261
  },
@@ -16350,7 +16350,7 @@
16350
16350
  rawResponse: {
16351
16351
  ...rawResponse,
16352
16352
  created: rawResponse.created.toISOString(),
16353
- // <- TODO: Put `created` at begining
16353
+ // <- TODO: Put `created` at beginning
16354
16354
  },
16355
16355
  // <- [🗯]
16356
16356
  },
@@ -16420,7 +16420,7 @@
16420
16420
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
16421
16421
  *
16422
16422
  * @public exported from `@promptbook/azure-openai`
16423
- * @public exported from `@promptbook/wizzard`
16423
+ * @public exported from `@promptbook/wizard`
16424
16424
  * @public exported from `@promptbook/cli`
16425
16425
  */
16426
16426
  const _AzureOpenAiRegistration = $llmToolsRegister.register(createAzureOpenAiExecutionTools);
@@ -16435,7 +16435,7 @@
16435
16435
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
16436
16436
  *
16437
16437
  * @public exported from `@promptbook/core`
16438
- * @public exported from `@promptbook/wizzard`
16438
+ * @public exported from `@promptbook/wizard`
16439
16439
  * @public exported from `@promptbook/cli`
16440
16440
  */
16441
16441
  const _DeepseekMetadataRegistration = $llmToolsMetadataRegister.register({
@@ -16835,7 +16835,7 @@
16835
16835
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
16836
16836
  *
16837
16837
  * @public exported from `@promptbook/deepseek`
16838
- * @public exported from `@promptbook/wizzard`
16838
+ * @public exported from `@promptbook/wizard`
16839
16839
  * @public exported from `@promptbook/cli`
16840
16840
  */
16841
16841
  const _DeepseekRegistration = $llmToolsRegister.register(createDeepseekExecutionTools);
@@ -16850,7 +16850,7 @@
16850
16850
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
16851
16851
  *
16852
16852
  * @public exported from `@promptbook/core`
16853
- * @public exported from `@promptbook/wizzard`
16853
+ * @public exported from `@promptbook/wizard`
16854
16854
  * @public exported from `@promptbook/cli`
16855
16855
  */
16856
16856
  const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
@@ -17120,7 +17120,7 @@
17120
17120
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
17121
17121
  *
17122
17122
  * @public exported from `@promptbook/google`
17123
- * @public exported from `@promptbook/wizzard`
17123
+ * @public exported from `@promptbook/wizard`
17124
17124
  * @public exported from `@promptbook/cli`
17125
17125
  */
17126
17126
  const _GoogleRegistration = $llmToolsRegister.register(createGoogleExecutionTools);
@@ -17142,7 +17142,7 @@
17142
17142
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
17143
17143
  *
17144
17144
  * @public exported from `@promptbook/core`
17145
- * @public exported from `@promptbook/wizzard`
17145
+ * @public exported from `@promptbook/wizard`
17146
17146
  * @public exported from `@promptbook/cli`
17147
17147
  */
17148
17148
  const _OllamaMetadataRegistration = $llmToolsMetadataRegister.register({
@@ -17877,7 +17877,7 @@
17877
17877
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
17878
17878
  *
17879
17879
  * @public exported from `@promptbook/ollama`
17880
- * @public exported from `@promptbook/wizzard`
17880
+ * @public exported from `@promptbook/wizard`
17881
17881
  * @public exported from `@promptbook/cli`
17882
17882
  */
17883
17883
  const _OllamaRegistration = $llmToolsRegister.register(createOllamaExecutionTools);
@@ -17892,7 +17892,7 @@
17892
17892
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
17893
17893
  *
17894
17894
  * @public exported from `@promptbook/core`
17895
- * @public exported from `@promptbook/wizzard`
17895
+ * @public exported from `@promptbook/wizard`
17896
17896
  * @public exported from `@promptbook/cli`
17897
17897
  */
17898
17898
  const _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
@@ -17934,7 +17934,7 @@
17934
17934
  * Note: [🏐] Configurations registrations are done in the metadata registration section, but the constructor registration is handled separately.
17935
17935
  *
17936
17936
  * @public exported from `@promptbook/core`
17937
- * @public exported from `@promptbook/wizzard`
17937
+ * @public exported from `@promptbook/wizard`
17938
17938
  * @public exported from `@promptbook/cli`
17939
17939
  */
17940
17940
  const _OpenAiAssistantMetadataRegistration = $llmToolsMetadataRegister.register({
@@ -18114,7 +18114,7 @@
18114
18114
  });
18115
18115
  const rawRequest = {
18116
18116
  // TODO: [👨‍👨‍👧‍👧] ...modelSettings,
18117
- // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sence - combination of OpenAI assistants with Promptbook Personas
18117
+ // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sense - combination of OpenAI assistants with Promptbook Personas
18118
18118
  assistant_id: this.assistantId,
18119
18119
  thread: {
18120
18120
  messages: [
@@ -18202,7 +18202,7 @@
18202
18202
  }
18203
18203
  }
18204
18204
  /**
18205
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
18205
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizard for those who want to use just OpenAI
18206
18206
  * TODO: Maybe make custom OpenAiError
18207
18207
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
18208
18208
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
@@ -18255,7 +18255,7 @@
18255
18255
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available LLM tools
18256
18256
  *
18257
18257
  * @public exported from `@promptbook/openai`
18258
- * @public exported from `@promptbook/wizzard`
18258
+ * @public exported from `@promptbook/wizard`
18259
18259
  * @public exported from `@promptbook/cli`
18260
18260
  */
18261
18261
  const _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
@@ -18265,7 +18265,7 @@
18265
18265
  * Note: [🏐] Configurations registrations are done in register-constructor.ts BUT constructor register-constructor.ts
18266
18266
  *
18267
18267
  * @public exported from `@promptbook/openai`
18268
- * @public exported from `@promptbook/wizzard`
18268
+ * @public exported from `@promptbook/wizard`
18269
18269
  * @public exported from `@promptbook/cli`
18270
18270
  */
18271
18271
  const _OpenAiAssistantRegistration = $llmToolsRegister.register(createOpenAiAssistantExecutionTools);
@@ -18338,7 +18338,7 @@
18338
18338
  className: 'MarkdownScraper',
18339
18339
  mimeTypes: ['text/markdown', 'text/plain'],
18340
18340
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
18341
- isAvilableInBrowser: true,
18341
+ isAvailableInBrowser: true,
18342
18342
  // <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
18343
18343
  requiredExecutables: [],
18344
18344
  }); /* <- Note: [🤛] */
@@ -18348,7 +18348,7 @@
18348
18348
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
18349
18349
  *
18350
18350
  * @public exported from `@promptbook/core`
18351
- * @public exported from `@promptbook/wizzard`
18351
+ * @public exported from `@promptbook/wizard`
18352
18352
  * @public exported from `@promptbook/cli`
18353
18353
  */
18354
18354
  const _MarkdownScraperMetadataRegistration = $scrapersMetadataRegister.register(markdownScraperMetadata);
@@ -18447,7 +18447,7 @@
18447
18447
  }
18448
18448
  // ---
18449
18449
  if (!llmTools.callEmbeddingModel) {
18450
- // TODO: [🟥] Detect browser / node and make it colorfull
18450
+ // TODO: [🟥] Detect browser / node and make it colorful
18451
18451
  console.error('No callEmbeddingModel function provided');
18452
18452
  }
18453
18453
  else {
@@ -18473,7 +18473,7 @@
18473
18473
  if (!(error instanceof PipelineExecutionError)) {
18474
18474
  throw error;
18475
18475
  }
18476
- // TODO: [🟥] Detect browser / node and make it colorfull
18476
+ // TODO: [🟥] Detect browser / node and make it colorful
18477
18477
  console.error(error, "<- Note: This error is not critical to prepare the pipeline, just knowledge pieces won't have embeddings");
18478
18478
  }
18479
18479
  return {
@@ -18507,7 +18507,7 @@
18507
18507
  // <- TODO: @@ Add compatible mime types with Boilerplate scraper
18508
18508
  ],
18509
18509
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
18510
- isAvilableInBrowser: false,
18510
+ isAvailableInBrowser: false,
18511
18511
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
18512
18512
  requiredExecutables: [
18513
18513
  /* @@ 'Pandoc' */
@@ -18519,7 +18519,7 @@
18519
18519
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
18520
18520
  *
18521
18521
  * @public exported from `@promptbook/core`
18522
- * @public exported from `@promptbook/wizzard`
18522
+ * @public exported from `@promptbook/wizard`
18523
18523
  * @public exported from `@promptbook/cli`
18524
18524
  */
18525
18525
  const _BoilerplateScraperMetadataRegistration = $scrapersMetadataRegister.register(boilerplateScraperMetadata);
@@ -18657,7 +18657,7 @@
18657
18657
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
18658
18658
  *
18659
18659
  * @public exported from `@promptbook/boilerplate`
18660
- * @public exported from `@promptbook/wizzard`
18660
+ * @public exported from `@promptbook/wizard`
18661
18661
  * @public exported from `@promptbook/cli`
18662
18662
  */
18663
18663
  const _BoilerplateScraperRegistration = $scrapersRegister.register(createBoilerplateScraper);
@@ -18677,7 +18677,7 @@
18677
18677
  className: 'DocumentScraper',
18678
18678
  mimeTypes: ['application/vnd.openxmlformats-officedocument.wordprocessingml.document'],
18679
18679
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
18680
- isAvilableInBrowser: false,
18680
+ isAvailableInBrowser: false,
18681
18681
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
18682
18682
  requiredExecutables: ['Pandoc'],
18683
18683
  }); /* <- Note: [🤛] */
@@ -18687,7 +18687,7 @@
18687
18687
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
18688
18688
  *
18689
18689
  * @public exported from `@promptbook/core`
18690
- * @public exported from `@promptbook/wizzard`
18690
+ * @public exported from `@promptbook/wizard`
18691
18691
  * @public exported from `@promptbook/cli`
18692
18692
  */
18693
18693
  const _DocumentScraperMetadataRegistration = $scrapersMetadataRegister.register(documentScraperMetadata);
@@ -18813,7 +18813,7 @@
18813
18813
  className: 'LegacyDocumentScraper',
18814
18814
  mimeTypes: ['application/msword', 'text/rtf'],
18815
18815
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
18816
- isAvilableInBrowser: false,
18816
+ isAvailableInBrowser: false,
18817
18817
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
18818
18818
  requiredExecutables: [
18819
18819
  'Pandoc',
@@ -18827,7 +18827,7 @@
18827
18827
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
18828
18828
  *
18829
18829
  * @public exported from `@promptbook/core`
18830
- * @public exported from `@promptbook/wizzard`
18830
+ * @public exported from `@promptbook/wizard`
18831
18831
  * @public exported from `@promptbook/cli`
18832
18832
  */
18833
18833
  const _LegacyDocumentScraperMetadataRegistration = $scrapersMetadataRegister.register(legacyDocumentScraperMetadata);
@@ -18988,7 +18988,7 @@
18988
18988
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
18989
18989
  *
18990
18990
  * @public exported from `@promptbook/legacy-documents`
18991
- * @public exported from `@promptbook/wizzard`
18991
+ * @public exported from `@promptbook/wizard`
18992
18992
  * @public exported from `@promptbook/cli`
18993
18993
  */
18994
18994
  const _LegacyDocumentScraperRegistration = $scrapersRegister.register(createLegacyDocumentScraper);
@@ -19015,7 +19015,7 @@
19015
19015
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
19016
19016
  *
19017
19017
  * @public exported from `@promptbook/documents`
19018
- * @public exported from `@promptbook/wizzard`
19018
+ * @public exported from `@promptbook/wizard`
19019
19019
  * @public exported from `@promptbook/cli`
19020
19020
  */
19021
19021
  const _DocumentScraperRegistration = $scrapersRegister.register(createDocumentScraper);
@@ -19042,7 +19042,7 @@
19042
19042
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
19043
19043
  *
19044
19044
  * @public exported from `@promptbook/markdown-utils`
19045
- * @public exported from `@promptbook/wizzard`
19045
+ * @public exported from `@promptbook/wizard`
19046
19046
  * @public exported from `@promptbook/cli`
19047
19047
  */
19048
19048
  const _MarkdownScraperRegistration = $scrapersRegister.register(createMarkdownScraper);
@@ -19067,7 +19067,7 @@
19067
19067
  // 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
19068
19068
  ],
19069
19069
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
19070
- isAvilableInBrowser: false,
19070
+ isAvailableInBrowser: false,
19071
19071
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
19072
19072
  requiredExecutables: [],
19073
19073
  }); /* <- Note: [🤛] */
@@ -19077,7 +19077,7 @@
19077
19077
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
19078
19078
  *
19079
19079
  * @public exported from `@promptbook/core`
19080
- * @public exported from `@promptbook/wizzard`
19080
+ * @public exported from `@promptbook/wizard`
19081
19081
  * @public exported from `@promptbook/cli`
19082
19082
  */
19083
19083
  const _MarkitdownScraperMetadataRegistration = $scrapersMetadataRegister.register(markitdownScraperMetadata);
@@ -19214,7 +19214,7 @@
19214
19214
  *
19215
19215
  * @public exported from `@promptbook/markitdown`
19216
19216
  * @public exported from `@promptbook/pdf`
19217
- * @public exported from `@promptbook/wizzard`
19217
+ * @public exported from `@promptbook/wizard`
19218
19218
  * @public exported from `@promptbook/cli`
19219
19219
  */
19220
19220
  const _MarkitdownScraperRegistration = $scrapersRegister.register(createMarkitdownScraper);
@@ -19235,7 +19235,7 @@
19235
19235
  className: 'PdfScraper',
19236
19236
  mimeTypes: ['application/pdf-DISABLED'],
19237
19237
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
19238
- isAvilableInBrowser: false,
19238
+ isAvailableInBrowser: false,
19239
19239
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
19240
19240
  requiredExecutables: [],
19241
19241
  }); /* <- Note: [🤛] */
@@ -19245,7 +19245,7 @@
19245
19245
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
19246
19246
  *
19247
19247
  * @public exported from `@promptbook/core`
19248
- * @public exported from `@promptbook/wizzard`
19248
+ * @public exported from `@promptbook/wizard`
19249
19249
  * @public exported from `@promptbook/cli`
19250
19250
  */
19251
19251
  const _PdfScraperMetadataRegistration = $scrapersMetadataRegister.register(pdfScraperMetadata);
@@ -19320,7 +19320,7 @@
19320
19320
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
19321
19321
  *
19322
19322
  * @public exported from `@promptbook/pdf`
19323
- * @public exported from `@promptbook/wizzard`
19323
+ * @public exported from `@promptbook/wizard`
19324
19324
  * @public exported from `@promptbook/cli`
19325
19325
  */
19326
19326
  const _PdfScraperRegistration = $scrapersRegister.register(createPdfScraper);
@@ -19340,7 +19340,7 @@
19340
19340
  className: 'WebsiteScraper',
19341
19341
  mimeTypes: ['text/html'],
19342
19342
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
19343
- isAvilableInBrowser: false,
19343
+ isAvailableInBrowser: false,
19344
19344
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
19345
19345
  requiredExecutables: [],
19346
19346
  }); /* <- Note: [🤛] */
@@ -19350,7 +19350,7 @@
19350
19350
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
19351
19351
  *
19352
19352
  * @public exported from `@promptbook/core`
19353
- * @public exported from `@promptbook/wizzard`
19353
+ * @public exported from `@promptbook/wizard`
19354
19354
  * @public exported from `@promptbook/cli`
19355
19355
  */
19356
19356
  const _WebsiteScraperMetadataRegistration = $scrapersMetadataRegister.register(websiteScraperMetadata);
@@ -19497,7 +19497,7 @@
19497
19497
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
19498
19498
  *
19499
19499
  * @public exported from `@promptbook/website-crawler`
19500
- * @public exported from `@promptbook/wizzard`
19500
+ * @public exported from `@promptbook/wizard`
19501
19501
  * @public exported from `@promptbook/cli`
19502
19502
  */
19503
19503
  const _WebsiteScraperRegistration = $scrapersRegister.register(createWebsiteScraper);