@promptbook/markitdown 0.104.0-3 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,8 @@
1
1
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
5
- import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
5
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
7
  /**
8
8
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
43
43
  * Calls the best available embedding model
44
44
  */
45
45
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
46
+ /**
47
+ * Calls the best available embedding model
48
+ */
49
+ callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
46
50
  /**
47
51
  * Calls the best available model
48
52
  *
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
46
46
  private callCommonModel;
47
47
  }
48
48
  /**
49
+ * TODO: !!!! Deprecate pipeline server and all of its components
49
50
  * TODO: Maybe use `$exportJson`
50
51
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
51
52
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-2`).
18
+ * It follows semantic versioning (e.g., `0.104.0-3`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/markitdown",
3
- "version": "0.104.0-3",
3
+ "version": "0.104.0-4",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -91,7 +91,7 @@
91
91
  "module": "./esm/index.es.js",
92
92
  "typings": "./esm/typings/src/_packages/markitdown.index.d.ts",
93
93
  "peerDependencies": {
94
- "@promptbook/core": "0.104.0-3"
94
+ "@promptbook/core": "0.104.0-4"
95
95
  },
96
96
  "dependencies": {
97
97
  "crypto": "1.0.1",
package/umd/index.umd.js CHANGED
@@ -24,7 +24,7 @@
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-3';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3872,6 +3872,15 @@
3872
3872
  return promptResult;
3873
3873
  };
3874
3874
  }
3875
+ if (llmTools.callImageGenerationModel !== undefined) {
3876
+ proxyTools.callImageGenerationModel = async (prompt) => {
3877
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3878
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
3879
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3880
+ spending.next(promptResult.usage);
3881
+ return promptResult;
3882
+ };
3883
+ }
3875
3884
  // <- Note: [🤖]
3876
3885
  return proxyTools;
3877
3886
  }
@@ -3981,6 +3990,12 @@
3981
3990
  callEmbeddingModel(prompt) {
3982
3991
  return this.callCommonModel(prompt);
3983
3992
  }
3993
+ /**
3994
+ * Calls the best available embedding model
3995
+ */
3996
+ callImageGenerationModel(prompt) {
3997
+ return this.callCommonModel(prompt);
3998
+ }
3984
3999
  // <- Note: [🤖]
3985
4000
  /**
3986
4001
  * Calls the best available model
@@ -4007,6 +4022,11 @@
4007
4022
  continue llm;
4008
4023
  }
4009
4024
  return await llmExecutionTools.callEmbeddingModel(prompt);
4025
+ case 'IMAGE_GENERATION':
4026
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
4027
+ continue llm;
4028
+ }
4029
+ return await llmExecutionTools.callImageGenerationModel(prompt);
4010
4030
  // <- case [🤖]:
4011
4031
  default:
4012
4032
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -6141,8 +6161,9 @@
6141
6161
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
6142
6162
  break variant;
6143
6163
  case 'EMBEDDING':
6164
+ case 'IMAGE_GENERATION':
6144
6165
  throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
6145
- Embedding model can not be used in pipeline
6166
+ ${modelRequirements.modelVariant} model can not be used in pipeline
6146
6167
 
6147
6168
  This should be catched during parsing
6148
6169