@promptbook/openai 0.72.0-7 → 0.72.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/README.md +72 -64
  2. package/esm/index.es.js +99 -7
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/browser.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/cli.index.d.ts +20 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +20 -16
  7. package/esm/typings/src/_packages/documents.index.d.ts +8 -0
  8. package/esm/typings/src/_packages/legacy-documents.index.d.ts +8 -0
  9. package/esm/typings/src/_packages/markdown-utils.index.d.ts +6 -0
  10. package/esm/typings/src/_packages/node.index.d.ts +10 -4
  11. package/esm/typings/src/_packages/pdf.index.d.ts +8 -0
  12. package/esm/typings/src/_packages/types.index.d.ts +15 -5
  13. package/esm/typings/src/_packages/website-crawler.index.d.ts +8 -0
  14. package/esm/typings/src/collection/constructors/createCollectionFromDirectory.d.ts +5 -3
  15. package/esm/typings/src/config.d.ts +6 -0
  16. package/esm/typings/src/conversion/pipelineStringToJson.d.ts +3 -1
  17. package/esm/typings/src/dialogs/callback/CallbackInterfaceToolsOptions.d.ts +2 -2
  18. package/esm/typings/src/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +3 -3
  19. package/esm/typings/src/execution/{CommonExecutionToolsOptions.d.ts → CommonToolsOptions.d.ts} +1 -1
  20. package/esm/typings/src/execution/ExecutionTools.d.ts +26 -6
  21. package/esm/typings/src/execution/FilesystemTools.d.ts +9 -0
  22. package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +1 -1
  23. package/esm/typings/src/execution/createPipelineExecutor/20-executeTemplate.d.ts +1 -6
  24. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -6
  25. package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -0
  26. package/esm/typings/src/execution/utils/$provideExecutionToolsForNode.d.ts +13 -0
  27. package/esm/typings/src/llm-providers/_common/{$llmToolsMetadataRegister.d.ts → register/$llmToolsMetadataRegister.d.ts} +4 -1
  28. package/esm/typings/src/llm-providers/_common/{$llmToolsRegister.d.ts → register/$llmToolsRegister.d.ts} +5 -2
  29. package/esm/typings/src/llm-providers/_common/{createLlmToolsFromConfigurationFromEnv.d.ts → register/$provideLlmToolsConfigurationFromEnv.d.ts} +3 -3
  30. package/esm/typings/src/llm-providers/_common/{getLlmToolsForCli.d.ts → register/$provideLlmToolsForCli.d.ts} +4 -11
  31. package/esm/typings/src/llm-providers/_common/{getLlmToolsForTestingAndScriptsAndPlayground.d.ts → register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts} +4 -3
  32. package/esm/typings/src/llm-providers/_common/{createLlmToolsFromEnv.d.ts → register/$provideLlmToolsFromEnv.d.ts} +6 -5
  33. package/esm/typings/src/llm-providers/_common/{$registeredLlmToolsMessage.d.ts → register/$registeredLlmToolsMessage.d.ts} +5 -2
  34. package/esm/typings/src/llm-providers/_common/{LlmToolsConfiguration.d.ts → register/LlmToolsConfiguration.d.ts} +5 -4
  35. package/esm/typings/src/llm-providers/_common/{LlmToolsMetadata.d.ts → register/LlmToolsMetadata.d.ts} +5 -4
  36. package/esm/typings/src/llm-providers/_common/{LlmToolsOptions.d.ts → register/LlmToolsOptions.d.ts} +4 -1
  37. package/esm/typings/src/llm-providers/_common/{createLlmToolsFromConfiguration.d.ts → register/createLlmToolsFromConfiguration.d.ts} +5 -4
  38. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -3
  39. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +4 -3
  40. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +4 -3
  41. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +2 -2
  42. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +4 -3
  43. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +4 -3
  44. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +3 -3
  45. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
  46. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -0
  47. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +2 -2
  48. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +5 -4
  49. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +5 -4
  50. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +1 -1
  51. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Prompt_Request.d.ts +1 -1
  52. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +3 -3
  53. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +2 -2
  54. package/esm/typings/src/personas/preparePersona.d.ts +2 -1
  55. package/esm/typings/src/prepare/PrepareAndScrapeOptions.d.ts +8 -7
  56. package/esm/typings/src/prepare/preparePipeline.d.ts +2 -1
  57. package/esm/typings/src/prepare/prepareTemplates.d.ts +2 -1
  58. package/esm/typings/src/scrapers/_common/Converter.d.ts +4 -10
  59. package/esm/typings/src/scrapers/_common/Scraper.d.ts +4 -9
  60. package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +2 -1
  61. package/esm/typings/src/scrapers/_common/register/$provideFilesystemForNode.d.ts +11 -0
  62. package/esm/typings/src/scrapers/_common/register/$provideScrapersForBrowser.d.ts +12 -0
  63. package/esm/typings/src/scrapers/_common/register/$provideScrapersForNode.d.ts +15 -0
  64. package/esm/typings/src/scrapers/_common/register/$registeredScrapersMessage.d.ts +12 -0
  65. package/esm/typings/src/scrapers/_common/register/$scrapersMetadataRegister.d.ts +13 -0
  66. package/esm/typings/src/scrapers/_common/register/$scrapersRegister.d.ts +13 -0
  67. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +41 -0
  68. package/esm/typings/src/scrapers/_common/register/ScraperConstructor.d.ts +12 -0
  69. package/esm/typings/src/scrapers/_common/utils/getScraperIntermediateSource.d.ts +1 -0
  70. package/esm/typings/src/scrapers/_common/utils/makeKnowledgeSourceHandler.d.ts +2 -1
  71. package/esm/typings/src/scrapers/document/{documentScraper.d.ts → DocumentScraper.d.ts} +18 -12
  72. package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +20 -0
  73. package/esm/typings/src/scrapers/document/register-constructor.d.ts +13 -0
  74. package/esm/typings/src/scrapers/document/register-metadata.d.ts +24 -0
  75. package/esm/typings/src/scrapers/document-legacy/{legacyDocumentScraper.d.ts → LegacyDocumentScraper.d.ts} +18 -12
  76. package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +20 -0
  77. package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +13 -0
  78. package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +24 -0
  79. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +29 -0
  80. package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +20 -0
  81. package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +13 -0
  82. package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +24 -0
  83. package/esm/typings/src/scrapers/pdf/PdfScraper.d.ts +40 -0
  84. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +20 -0
  85. package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +13 -0
  86. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +24 -0
  87. package/esm/typings/src/scrapers/website/{websiteScraper.d.ts → WebsiteScraper.d.ts} +18 -14
  88. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +20 -0
  89. package/esm/typings/src/scrapers/website/register-constructor.d.ts +13 -0
  90. package/esm/typings/src/scrapers/website/register-metadata.d.ts +24 -0
  91. package/esm/typings/src/scripting/javascript/JavascriptExecutionToolsOptions.d.ts +2 -2
  92. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +3 -3
  93. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +3 -3
  94. package/esm/typings/src/storage/file-cache-storage/FileCacheStorage.d.ts +5 -3
  95. package/esm/typings/src/storage/{utils → memory/utils}/PrefixStorage.d.ts +1 -1
  96. package/esm/typings/src/storage/{utils → memory/utils}/makePromptbookStorageFromWebStorage.d.ts +1 -1
  97. package/esm/typings/src/types/typeAliases.d.ts +7 -0
  98. package/esm/typings/src/utils/$Register.d.ts +19 -6
  99. package/esm/typings/src/utils/execCommand/$execCommand.d.ts +1 -1
  100. package/esm/typings/src/utils/execCommand/$execCommands.d.ts +1 -1
  101. package/esm/typings/src/utils/files/isDirectoryExisting.d.ts +14 -0
  102. package/esm/typings/src/utils/files/isFileExisting.d.ts +13 -0
  103. package/esm/typings/src/utils/files/{$listAllFiles.d.ts → listAllFiles.d.ts} +3 -4
  104. package/package.json +2 -2
  105. package/umd/index.umd.js +99 -7
  106. package/umd/index.umd.js.map +1 -1
  107. package/esm/typings/src/scrapers/index.d.ts +0 -7
  108. package/esm/typings/src/scrapers/markdown/markdownScraper.d.ts +0 -29
  109. package/esm/typings/src/scrapers/pdf/pdfScraper.d.ts +0 -35
  110. package/esm/typings/src/utils/files/$isDirectoryExisting.d.ts +0 -15
  111. package/esm/typings/src/utils/files/$isFileExisting.d.ts +0 -14
  112. /package/esm/typings/src/scrapers/document/{documentScraper.test.d.ts → DocumentScraper.test.d.ts} +0 -0
  113. /package/esm/typings/src/scrapers/document-legacy/{legacyDocumentScraper.test.d.ts → LegacyDocumentScraper.test.d.ts} +0 -0
  114. /package/esm/typings/src/scrapers/markdown/{markdownScraper.test.d.ts → MarkdownScraper.test.d.ts} +0 -0
  115. /package/esm/typings/src/scrapers/website/{websiteScraper.test.d.ts → WebsiteScraper.test.d.ts} +0 -0
  116. /package/esm/typings/src/utils/files/{$isDirectoryExisting.test.d.ts → isDirectoryExisting.test.d.ts} +0 -0
  117. /package/esm/typings/src/utils/files/{$isFileExisting.test.d.ts → isFileExisting.test.d.ts} +0 -0
  118. /package/esm/typings/src/utils/files/{$listAllFiles.test.d.ts → listAllFiles.test.d.ts} +0 -0
@@ -0,0 +1,13 @@
1
+ import { $Register } from '../../../utils/$Register';
2
+ import type { ScraperAndConverterMetadata } from './ScraperAndConverterMetadata';
3
+ /**
4
+ * @@@
5
+ *
6
+ * Note: `$` is used to indicate that this interacts with the global scope
7
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
8
+ * @public exported from `@promptbook/core`
9
+ */
10
+ export declare const $scrapersMetadataRegister: $Register<ScraperAndConverterMetadata>;
11
+ /**
12
+ * TODO: [®] DRY Register logic
13
+ */
@@ -0,0 +1,13 @@
1
+ import { $Register } from '../../../utils/$Register';
2
+ import type { ScraperConstructor } from './ScraperConstructor';
3
+ /**
4
+ * @@@
5
+ *
6
+ * Note: `$` is used to indicate that this interacts with the global scope
7
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
8
+ * @public exported from `@promptbook/core`
9
+ */
10
+ export declare const $scrapersRegister: $Register<ScraperConstructor>;
11
+ /**
12
+ * TODO: [®] DRY Register logic
13
+ */
@@ -0,0 +1,41 @@
1
+ import type { string_mime_type } from '../../../types/typeAliases';
2
+ import type { string_promptbook_documentation_url } from '../../../types/typeAliases';
3
+ import type { string_title } from '../../../types/typeAliases';
4
+ import type { Registered } from '../../../utils/$Register';
5
+ import type { TODO_any } from '../../../utils/organization/TODO_any';
6
+ /**
7
+ * @@@
8
+ *
9
+ * @@@
10
+ * x) `Scraper`
11
+ * x) `Converter`
12
+ * x) `ScraperConstructor`
13
+ * x) `Registered`
14
+ * x) `ExecutionTools`
15
+ * x) `ScraperAndConverterMetadata`
16
+ * x) `PrepareAndScrapeOptions`
17
+ * x) `ScraperConfiguration`
18
+ * x) `ScraperOptions`
19
+ */
20
+ export type ScraperAndConverterMetadata = Registered & {
21
+ /**
22
+ * @@@
23
+ */
24
+ readonly title: string_title;
25
+ /**
26
+ * Mime types that this scraper can handle
27
+ */
28
+ readonly mimeTypes: ReadonlyArray<string_mime_type>;
29
+ /**
30
+ * @@@
31
+ */
32
+ readonly isAvilableInBrowser: boolean;
33
+ /**
34
+ * @@@
35
+ */
36
+ readonly requiredExecutables: TODO_any;
37
+ /**
38
+ * Link to documentation
39
+ */
40
+ readonly documentationUrl: string_promptbook_documentation_url;
41
+ };
@@ -0,0 +1,12 @@
1
+ import type { ExecutionTools } from '../../../execution/ExecutionTools';
2
+ import type { PrepareAndScrapeOptions } from '../../../prepare/PrepareAndScrapeOptions';
3
+ import type { Registered } from '../../../utils/$Register';
4
+ import type { Scraper } from '../Scraper';
5
+ import type { ScraperAndConverterMetadata } from './ScraperAndConverterMetadata';
6
+ /**
7
+ * @@@
8
+ */
9
+ export type ScraperConstructor = Registered & ScraperAndConverterMetadata & ((tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions) => Scraper);
10
+ /**
11
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
12
+ */
@@ -30,4 +30,5 @@ export {};
30
30
  * 1) Need to store more than serialized JSONs
31
31
  * 2) Need to switch between a `rootDirname` and `cacheDirname` <- TODO: !!!!
32
32
  * TODO: [🐱‍🐉][🧠] Make some smart crop
33
+ * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
33
34
  */
@@ -1,4 +1,5 @@
1
1
  import type { SetOptional } from 'type-fest';
2
+ import type { ExecutionTools } from '../../../execution/ExecutionTools';
2
3
  import type { PrepareAndScrapeOptions } from '../../../prepare/PrepareAndScrapeOptions';
3
4
  import type { KnowledgeSourceJson } from '../../../types/PipelineJson/KnowledgeSourceJson';
4
5
  import type { ScraperSourceHandler } from '../Scraper';
@@ -7,4 +8,4 @@ import type { ScraperSourceHandler } from '../Scraper';
7
8
  *
8
9
  * @private for scraper utilities
9
10
  */
10
- export declare function makeKnowledgeSourceHandler(knowledgeSource: SetOptional<KnowledgeSourceJson, 'name'>, options?: Pick<PrepareAndScrapeOptions, 'rootDirname' | 'isVerbose'>): Promise<ScraperSourceHandler>;
11
+ export declare function makeKnowledgeSourceHandler(knowledgeSource: SetOptional<KnowledgeSourceJson, 'name'>, tools: Pick<ExecutionTools, 'fs'>, options?: Pick<PrepareAndScrapeOptions, 'rootDirname' | 'isVerbose'>): Promise<ScraperSourceHandler>;
@@ -1,37 +1,43 @@
1
- import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
2
1
  import type { KnowledgePiecePreparedJson } from '../../types/PipelineJson/KnowledgePieceJson';
2
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
3
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
4
+ import type { Converter } from '../_common/Converter';
5
+ import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
6
+ import type { Scraper } from '../_common/Scraper';
3
7
  import type { ScraperSourceHandler } from '../_common/Scraper';
4
8
  import type { ScraperIntermediateSource } from '../_common/ScraperIntermediateSource';
5
9
  /**
6
10
  * Scraper of .docx and .odt files
7
11
  *
8
12
  * @see `documentationUrl` for more details
9
- * @public exported from `@promptbook/core`
13
+ * @public exported from `@promptbook/documents`
10
14
  */
11
- export declare const documentScraper: {
15
+ export declare class DocumentScraper implements Converter, Scraper {
16
+ private readonly tools;
17
+ private readonly options;
12
18
  /**
13
- * Mime types that this scraper can handle
19
+ * Metadata of the scraper which includes title, mime types, etc.
14
20
  */
15
- mimeTypes: string[];
21
+ get metadata(): ScraperAndConverterMetadata;
16
22
  /**
17
- * Link to documentation
23
+ * Markdown scraper is used internally
18
24
  */
19
- documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
25
+ private readonly markdownScraper;
26
+ constructor(tools: Pick<ExecutionTools, 'fs' | 'llm'>, options: PrepareAndScrapeOptions);
20
27
  /**
21
28
  * Convert the `.docx` or `.odt` to `.md` file and returns intermediate source
22
29
  *
23
30
  * Note: `$` is used to indicate that this function is not a pure function - it leaves files on the disk and you are responsible for cleaning them by calling `destroy` method of returned object
24
31
  */
25
- $convert(source: ScraperSourceHandler, options: PrepareAndScrapeOptions): Promise<ScraperIntermediateSource>;
32
+ $convert(source: ScraperSourceHandler): Promise<ScraperIntermediateSource>;
26
33
  /**
27
34
  * Scrapes the docx file and returns the knowledge pieces or `null` if it can't scrape it
28
35
  */
29
- scrape(source: ScraperSourceHandler, options: PrepareAndScrapeOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
30
- };
36
+ scrape(source: ScraperSourceHandler): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
37
+ }
31
38
  /**
32
39
  * TODO: [👣] Converted documents can act as cached items - there is no need to run conversion each time
33
- * TODO: [🦖] Make some system for putting scrapers to separete packages
34
40
  * TODO: [🪂] Do it in parallel 11:11
35
- * TODO: [🦷] Ideally use `as const satisfies Converter & Scraper` BUT this combination throws errors
36
41
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
42
+ * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
37
43
  */
@@ -0,0 +1,20 @@
1
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
2
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
3
+ import { DocumentScraper } from './DocumentScraper';
4
+ /**
5
+ * @@@
6
+ *
7
+ * @public exported from `@promptbook/documents`
8
+ */
9
+ export declare const createDocumentScraper: ((tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions) => DocumentScraper) & import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
10
+ title: string;
11
+ packageName: string;
12
+ className: string;
13
+ mimeTypes: string[];
14
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
15
+ isAvilableInBrowser: false;
16
+ requiredExecutables: string[];
17
+ }>;
18
+ /**
19
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
20
+ */
@@ -0,0 +1,13 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of known scraper
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6
+ *
7
+ * @public exported from `@promptbook/documents`
8
+ * @public exported from `@promptbook/cli`
9
+ */
10
+ export declare const _DocumentScraperRegistration: Registration;
11
+ /**
12
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
13
+ */
@@ -0,0 +1,24 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Metadata of the scraper
4
+ *
5
+ * @private within the scraper directory
6
+ */
7
+ export declare const documentScraperMetadata: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
8
+ title: string;
9
+ packageName: string;
10
+ className: string;
11
+ mimeTypes: string[];
12
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
13
+ isAvilableInBrowser: false;
14
+ requiredExecutables: string[];
15
+ }>;
16
+ /**
17
+ * Registration of known scraper metadata
18
+ *
19
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
20
+ *
21
+ * @public exported from `@promptbook/core`
22
+ * @public exported from `@promptbook/cli`
23
+ */
24
+ export declare const _DocumentScraperMetadataRegistration: Registration;
@@ -1,37 +1,43 @@
1
- import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
2
1
  import type { KnowledgePiecePreparedJson } from '../../types/PipelineJson/KnowledgePieceJson';
2
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
3
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
4
+ import type { Converter } from '../_common/Converter';
5
+ import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
6
+ import type { Scraper } from '../_common/Scraper';
3
7
  import type { ScraperSourceHandler } from '../_common/Scraper';
4
8
  import type { ScraperIntermediateSource } from '../_common/ScraperIntermediateSource';
5
9
  /**
6
10
  * Scraper for .docx files
7
11
  *
8
12
  * @see `documentationUrl` for more details
9
- * @public exported from `@promptbook/core`
13
+ * @public exported from `@promptbook/legacy-documents`
10
14
  */
11
- export declare const legacyDocumentScraper: {
15
+ export declare class LegacyDocumentScraper implements Converter, Scraper {
16
+ private readonly tools;
17
+ private readonly options;
12
18
  /**
13
- * Mime types that this scraper can handle
19
+ * Metadata of the scraper which includes title, mime types, etc.
14
20
  */
15
- mimeTypes: string[];
21
+ get metadata(): ScraperAndConverterMetadata;
16
22
  /**
17
- * Link to documentation
23
+ * Document scraper is used internally
18
24
  */
19
- documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
25
+ private readonly documentScraper;
26
+ constructor(tools: Pick<ExecutionTools, 'fs' | 'llm'>, options: PrepareAndScrapeOptions);
20
27
  /**
21
28
  * Convert the `.doc` or `.rtf` to `.doc` file and returns intermediate source
22
29
  *
23
30
  * Note: `$` is used to indicate that this function is not a pure function - it leaves files on the disk and you are responsible for cleaning them by calling `destroy` method of returned object
24
31
  */
25
- $convert(source: ScraperSourceHandler, options: PrepareAndScrapeOptions): Promise<ScraperIntermediateSource>;
32
+ $convert(source: ScraperSourceHandler): Promise<ScraperIntermediateSource>;
26
33
  /**
27
34
  * Scrapes the `.doc` or `.rtf` file and returns the knowledge pieces or `null` if it can't scrape it
28
35
  */
29
- scrape(source: ScraperSourceHandler, options: PrepareAndScrapeOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
30
- };
36
+ scrape(source: ScraperSourceHandler): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
37
+ }
31
38
  /**
32
39
  * TODO: [👣] Converted documents can act as cached items - there is no need to run conversion each time
33
- * TODO: [🦖] Make some system for putting scrapers to separete packages
34
40
  * TODO: [🪂] Do it in parallel 11:11
35
- * TODO: [🦷] Ideally use `as const satisfies Converter & Scraper` BUT this combination throws errors
36
41
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
42
+ * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
37
43
  */
@@ -0,0 +1,20 @@
1
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
2
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
3
+ import { LegacyDocumentScraper } from './LegacyDocumentScraper';
4
+ /**
5
+ * @@@
6
+ *
7
+ * @public exported from `@promptbook/legacy-documents`
8
+ */
9
+ export declare const createLegacyDocumentScraper: ((tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions) => LegacyDocumentScraper) & import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
10
+ title: string;
11
+ packageName: string;
12
+ className: string;
13
+ mimeTypes: string[];
14
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
15
+ isAvilableInBrowser: false;
16
+ requiredExecutables: string[];
17
+ }>;
18
+ /**
19
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
20
+ */
@@ -0,0 +1,13 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of known scraper
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6
+ *
7
+ * @public exported from `@promptbook/legacy-documents`
8
+ * @public exported from `@promptbook/cli`
9
+ */
10
+ export declare const _LegacyDocumentScraperRegistration: Registration;
11
+ /**
12
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
13
+ */
@@ -0,0 +1,24 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Metadata of the scraper
4
+ *
5
+ * @private within the scraper directory
6
+ */
7
+ export declare const legacyDocumentScraperMetadata: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
8
+ title: string;
9
+ packageName: string;
10
+ className: string;
11
+ mimeTypes: string[];
12
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
13
+ isAvilableInBrowser: false;
14
+ requiredExecutables: string[];
15
+ }>;
16
+ /**
17
+ * Registration of known scraper metadata
18
+ *
19
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
20
+ *
21
+ * @public exported from `@promptbook/core`
22
+ * @public exported from `@promptbook/cli`
23
+ */
24
+ export declare const _LegacyDocumentScraperMetadataRegistration: Registration;
@@ -0,0 +1,29 @@
1
+ import type { KnowledgePiecePreparedJson } from '../../types/PipelineJson/KnowledgePieceJson';
2
+ import type { Scraper } from '../_common/Scraper';
3
+ import type { ScraperSourceHandler } from '../_common/Scraper';
4
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
5
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
6
+ import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
7
+ /**
8
+ * Scraper for markdown files
9
+ *
10
+ * @see `documentationUrl` for more details
11
+ * @public exported from `@promptbook/markdown-utils`
12
+ */
13
+ export declare class MarkdownScraper implements Scraper {
14
+ private readonly tools;
15
+ private readonly options;
16
+ /**
17
+ * Metadata of the scraper which includes title, mime types, etc.
18
+ */
19
+ get metadata(): ScraperAndConverterMetadata;
20
+ constructor(tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions);
21
+ /**
22
+ * Scrapes the markdown file and returns the knowledge pieces or `null` if it can't scrape it
23
+ */
24
+ scrape(source: ScraperSourceHandler): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
25
+ }
26
+ /**
27
+ * TODO: [🪂] Do it in parallel 11:11
28
+ * Note: No need to aggregate usage here, it is done by intercepting the llmTools
29
+ */
@@ -0,0 +1,20 @@
1
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
2
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
3
+ import { MarkdownScraper } from './MarkdownScraper';
4
+ /**
5
+ * @@@
6
+ *
7
+ * @public exported from `@promptbook/markdown-utils`
8
+ */
9
+ export declare const createMarkdownScraper: ((tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions) => MarkdownScraper) & import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
10
+ title: string;
11
+ packageName: string;
12
+ className: string;
13
+ mimeTypes: string[];
14
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
15
+ isAvilableInBrowser: true;
16
+ requiredExecutables: string[];
17
+ }>;
18
+ /**
19
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
20
+ */
@@ -0,0 +1,13 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of known scraper
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6
+ *
7
+ * @public exported from `@promptbook/markdown-utils`
8
+ * @public exported from `@promptbook/cli`
9
+ */
10
+ export declare const _MarkdownScraperRegistration: Registration;
11
+ /**
12
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
13
+ */
@@ -0,0 +1,24 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Metadata of the scraper
4
+ *
5
+ * @private within the scraper directory
6
+ */
7
+ export declare const markdownScraperMetadata: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
8
+ title: string;
9
+ packageName: string;
10
+ className: string;
11
+ mimeTypes: string[];
12
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
13
+ isAvilableInBrowser: true;
14
+ requiredExecutables: string[];
15
+ }>;
16
+ /**
17
+ * Registration of known scraper metadata
18
+ *
19
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
20
+ *
21
+ * @public exported from `@promptbook/core`
22
+ * @public exported from `@promptbook/cli`
23
+ */
24
+ export declare const _MarkdownScraperMetadataRegistration: Registration;
@@ -0,0 +1,40 @@
1
+ import type { KnowledgePiecePreparedJson } from '../../types/PipelineJson/KnowledgePieceJson';
2
+ import type { Scraper } from '../_common/Scraper';
3
+ import type { ScraperSourceHandler } from '../_common/Scraper';
4
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
5
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
6
+ import type { Converter } from '../_common/Converter';
7
+ import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
8
+ import type { ScraperIntermediateSource } from '../_common/ScraperIntermediateSource';
9
+ /**
10
+ * Scraper for .docx files
11
+ *
12
+ * @see `documentationUrl` for more details
13
+ * @public exported from `@promptbook/pdf`
14
+ */
15
+ export declare class PdfScraper implements Converter, Scraper {
16
+ private readonly tools;
17
+ private readonly options;
18
+ /**
19
+ * Metadata of the scraper which includes title, mime types, etc.
20
+ */
21
+ get metadata(): ScraperAndConverterMetadata;
22
+ /**
23
+ * Markdown scraper is used internally
24
+ */
25
+ private readonly markdownScraper;
26
+ constructor(tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions);
27
+ /**
28
+ * Converts the `.pdf` file to `.md` file and returns intermediate source
29
+ */
30
+ $convert(source: ScraperSourceHandler): Promise<ScraperIntermediateSource>;
31
+ /**
32
+ * Scrapes the `.pdf` file and returns the knowledge pieces or `null` if it can't scrape it
33
+ */
34
+ scrape(source: ScraperSourceHandler): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
35
+ }
36
+ /**
37
+ * TODO: [👣] Converted pdf documents can act as cached items - there is no need to run conversion each time
38
+ * TODO: [🪂] Do it in parallel 11:11
39
+ * Note: No need to aggregate usage here, it is done by intercepting the llmTools
40
+ */
@@ -0,0 +1,20 @@
1
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
2
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
3
+ import { PdfScraper } from './PdfScraper';
4
+ /**
5
+ * @@@
6
+ *
7
+ * @public exported from `@promptbook/pdf`
8
+ */
9
+ export declare const createPdfScraper: ((tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions) => PdfScraper) & import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
10
+ title: string;
11
+ packageName: string;
12
+ className: string;
13
+ mimeTypes: string[];
14
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
15
+ isAvilableInBrowser: true;
16
+ requiredExecutables: string[];
17
+ }>;
18
+ /**
19
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
20
+ */
@@ -0,0 +1,13 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of known scraper
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6
+ *
7
+ * @public exported from `@promptbook/pdf`
8
+ * @public exported from `@promptbook/cli`
9
+ */
10
+ export declare const _PdfScraperRegistration: Registration;
11
+ /**
12
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
13
+ */
@@ -0,0 +1,24 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Metadata of the scraper
4
+ *
5
+ * @private within the scraper directory
6
+ */
7
+ export declare const pdfScraperMetadata: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
8
+ title: string;
9
+ packageName: string;
10
+ className: string;
11
+ mimeTypes: string[];
12
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
13
+ isAvilableInBrowser: true;
14
+ requiredExecutables: string[];
15
+ }>;
16
+ /**
17
+ * Registration of known scraper metadata
18
+ *
19
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
20
+ *
21
+ * @public exported from `@promptbook/core`
22
+ * @public exported from `@promptbook/cli`
23
+ */
24
+ export declare const _PdfScraperMetadataRegistration: Registration;
@@ -1,43 +1,47 @@
1
- import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
2
1
  import type { KnowledgePiecePreparedJson } from '../../types/PipelineJson/KnowledgePieceJson';
3
2
  import type { string_markdown } from '../../types/typeAliases';
3
+ import type { Converter } from '../_common/Converter';
4
+ import type { Scraper } from '../_common/Scraper';
4
5
  import type { ScraperSourceHandler } from '../_common/Scraper';
6
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
7
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
8
+ import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
5
9
  import type { ScraperIntermediateSource } from '../_common/ScraperIntermediateSource';
6
10
  /**
7
11
  * Scraper for .docx files
8
12
  *
9
13
  * @see `documentationUrl` for more details
10
- * @public exported from `@promptbook/core`
14
+ * @public exported from `@promptbook/website-crawler`
11
15
  */
12
- export declare const websiteScraper: {
16
+ export declare class WebsiteScraper implements Converter, Scraper {
17
+ private readonly tools;
18
+ private readonly options;
13
19
  /**
14
- * Mime types that this scraper can handle
20
+ * Metadata of the scraper which includes title, mime types, etc.
15
21
  */
16
- mimeTypes: string[];
22
+ get metadata(): ScraperAndConverterMetadata;
17
23
  /**
18
- * Link to documentation
24
+ * Markdown scraper is used internally
19
25
  */
20
- documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
26
+ private readonly markdownScraper;
27
+ constructor(tools: Pick<ExecutionTools, 'fs' | 'llm'>, options: PrepareAndScrapeOptions);
21
28
  /**
22
29
  * Convert the website to `.md` file and returns intermediate source
23
30
  *
24
31
  * Note: `$` is used to indicate that this function is not a pure function - it leaves files on the disk and you are responsible for cleaning them by calling `destroy` method of returned object
25
32
  */
26
- $convert(source: ScraperSourceHandler, options: PrepareAndScrapeOptions): Promise<import("destroyable").IDestroyable & {
27
- readonly filename: string;
28
- } & {
33
+ $convert(source: ScraperSourceHandler): Promise<ScraperIntermediateSource & {
29
34
  markdown: string_markdown;
30
35
  }>;
31
36
  /**
32
37
  * Scrapes the website and returns the knowledge pieces or `null` if it can't scrape it
33
38
  */
34
- scrape(source: ScraperSourceHandler, options: PrepareAndScrapeOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
35
- };
39
+ scrape(source: ScraperSourceHandler): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>> | null>;
40
+ }
36
41
  /**
37
42
  * TODO: !!!!!! Put into separate package
38
43
  * TODO: [👣] Scraped website in .md can act as cache item - there is no need to run conversion each time
39
- * TODO: [🦖] Make some system for putting scrapers to separete packages
40
44
  * TODO: [🪂] Do it in parallel 11:11
41
- * TODO: [🦷] Ideally use `as const satisfies Converter & Scraper` BUT this combination throws errors
42
45
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
46
+ * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
43
47
  */
@@ -0,0 +1,20 @@
1
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
2
+ import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
3
+ import { WebsiteScraper } from './WebsiteScraper';
4
+ /**
5
+ * @@@
6
+ *
7
+ * @public exported from `@promptbook/website-crawler`
8
+ */
9
+ export declare const createWebsiteScraper: ((tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions) => WebsiteScraper) & import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
10
+ title: string;
11
+ packageName: string;
12
+ className: string;
13
+ mimeTypes: string[];
14
+ documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
15
+ isAvilableInBrowser: false;
16
+ requiredExecutables: string[];
17
+ }>;
18
+ /**
19
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
20
+ */
@@ -0,0 +1,13 @@
1
+ import type { Registration } from '../../utils/$Register';
2
+ /**
3
+ * Registration of known scraper
4
+ *
5
+ * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6
+ *
7
+ * @public exported from `@promptbook/website-crawler`
8
+ * @public exported from `@promptbook/cli`
9
+ */
10
+ export declare const _WebsiteScraperRegistration: Registration;
11
+ /**
12
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
13
+ */