@promptbook/node 0.104.0-3 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,8 @@
1
1
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
5
- import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
5
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
7
  /**
8
8
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
43
43
  * Calls the best available embedding model
44
44
  */
45
45
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
46
+ /**
47
+ * Calls the best available embedding model
48
+ */
49
+ callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
46
50
  /**
47
51
  * Calls the best available model
48
52
  *
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
46
46
  private callCommonModel;
47
47
  }
48
48
  /**
49
+ * TODO: !!!! Deprecate pipeline server and all of its components
49
50
  * TODO: Maybe use `$exportJson`
50
51
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
51
52
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-2`).
18
+ * It follows semantic versioning (e.g., `0.104.0-3`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.104.0-3",
3
+ "version": "0.104.0-4",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -93,7 +93,7 @@
93
93
  "module": "./esm/index.es.js",
94
94
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
95
95
  "peerDependencies": {
96
- "@promptbook/core": "0.104.0-3"
96
+ "@promptbook/core": "0.104.0-4"
97
97
  },
98
98
  "dependencies": {
99
99
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -45,7 +45,7 @@
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-3';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4056,6 +4056,12 @@
4056
4056
  callEmbeddingModel(prompt) {
4057
4057
  return this.callCommonModel(prompt);
4058
4058
  }
4059
+ /**
4060
+ * Calls the best available embedding model
4061
+ */
4062
+ callImageGenerationModel(prompt) {
4063
+ return this.callCommonModel(prompt);
4064
+ }
4059
4065
  // <- Note: [🤖]
4060
4066
  /**
4061
4067
  * Calls the best available model
@@ -4082,6 +4088,11 @@
4082
4088
  continue llm;
4083
4089
  }
4084
4090
  return await llmExecutionTools.callEmbeddingModel(prompt);
4091
+ case 'IMAGE_GENERATION':
4092
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
4093
+ continue llm;
4094
+ }
4095
+ return await llmExecutionTools.callImageGenerationModel(prompt);
4085
4096
  // <- case [🤖]:
4086
4097
  default:
4087
4098
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -4985,8 +4996,9 @@
4985
4996
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
4986
4997
  break variant;
4987
4998
  case 'EMBEDDING':
4999
+ case 'IMAGE_GENERATION':
4988
5000
  throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
4989
- Embedding model can not be used in pipeline
5001
+ ${modelRequirements.modelVariant} model can not be used in pipeline
4990
5002
 
4991
5003
  This should be catched during parsing
4992
5004
 
@@ -6211,6 +6223,15 @@
6211
6223
  return promptResult;
6212
6224
  };
6213
6225
  }
6226
+ if (llmTools.callImageGenerationModel !== undefined) {
6227
+ proxyTools.callImageGenerationModel = async (prompt) => {
6228
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
6229
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
6230
+ totalUsage = addUsage(totalUsage, promptResult.usage);
6231
+ spending.next(promptResult.usage);
6232
+ return promptResult;
6233
+ };
6234
+ }
6214
6235
  // <- Note: [🤖]
6215
6236
  return proxyTools;
6216
6237
  }