@promptbook/core 0.104.0-3 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,8 @@
1
1
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
5
- import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
5
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
7
  /**
8
8
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
43
43
  * Calls the best available embedding model
44
44
  */
45
45
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
46
+ /**
47
+ * Calls the best available embedding model
48
+ */
49
+ callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
46
50
  /**
47
51
  * Calls the best available model
48
52
  *
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
46
46
  private callCommonModel;
47
47
  }
48
48
  /**
49
+ * TODO: !!!! Deprecate pipeline server and all of its components
49
50
  * TODO: Maybe use `$exportJson`
50
51
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
51
52
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-2`).
18
+ * It follows semantic versioning (e.g., `0.104.0-3`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/core",
3
- "version": "0.104.0-3",
3
+ "version": "0.104.0-4",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -28,7 +28,7 @@
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-3';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3745,6 +3745,15 @@
3745
3745
  return promptResult;
3746
3746
  };
3747
3747
  }
3748
+ if (llmTools.callImageGenerationModel !== undefined) {
3749
+ proxyTools.callImageGenerationModel = async (prompt) => {
3750
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3751
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
3752
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3753
+ spending.next(promptResult.usage);
3754
+ return promptResult;
3755
+ };
3756
+ }
3748
3757
  // <- Note: [🤖]
3749
3758
  return proxyTools;
3750
3759
  }
@@ -3854,6 +3863,12 @@
3854
3863
  callEmbeddingModel(prompt) {
3855
3864
  return this.callCommonModel(prompt);
3856
3865
  }
3866
+ /**
3867
+ * Calls the best available embedding model
3868
+ */
3869
+ callImageGenerationModel(prompt) {
3870
+ return this.callCommonModel(prompt);
3871
+ }
3857
3872
  // <- Note: [🤖]
3858
3873
  /**
3859
3874
  * Calls the best available model
@@ -3880,6 +3895,11 @@
3880
3895
  continue llm;
3881
3896
  }
3882
3897
  return await llmExecutionTools.callEmbeddingModel(prompt);
3898
+ case 'IMAGE_GENERATION':
3899
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
3900
+ continue llm;
3901
+ }
3902
+ return await llmExecutionTools.callImageGenerationModel(prompt);
3883
3903
  // <- case [🤖]:
3884
3904
  default:
3885
3905
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -6305,8 +6325,9 @@
6305
6325
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
6306
6326
  break variant;
6307
6327
  case 'EMBEDDING':
6328
+ case 'IMAGE_GENERATION':
6308
6329
  throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
6309
- Embedding model can not be used in pipeline
6330
+ ${modelRequirements.modelVariant} model can not be used in pipeline
6310
6331
 
6311
6332
  This should be catched during parsing
6312
6333
 
@@ -16919,6 +16940,9 @@
16919
16940
  case 'EMBEDDING':
16920
16941
  promptResult = await llmTools.callEmbeddingModel(prompt);
16921
16942
  break variant;
16943
+ case 'IMAGE_GENERATION':
16944
+ promptResult = await llmTools.callImageGenerationModel(prompt);
16945
+ break variant;
16922
16946
  // <- case [🤖]:
16923
16947
  default:
16924
16948
  throw new PipelineExecutionError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -17006,6 +17030,11 @@
17006
17030
  return /* not await */ callCommonModel(prompt);
17007
17031
  };
17008
17032
  }
17033
+ if (llmTools.callImageGenerationModel !== undefined) {
17034
+ proxyTools.callImageGenerationModel = async (prompt) => {
17035
+ return /* not await */ callCommonModel(prompt);
17036
+ };
17037
+ }
17009
17038
  // <- Note: [🤖]
17010
17039
  return proxyTools;
17011
17040
  }
@@ -17044,6 +17073,11 @@
17044
17073
  throw new LimitReachedError('Cannot call `callEmbeddingModel` because you are not allowed to spend any cost');
17045
17074
  };
17046
17075
  }
17076
+ if (proxyTools.callImageGenerationModel !== undefined) {
17077
+ proxyTools.callImageGenerationModel = async (prompt) => {
17078
+ throw new LimitReachedError('Cannot call `callImageGenerationModel` because you are not allowed to spend any cost');
17079
+ };
17080
+ }
17047
17081
  // <- Note: [🤖]
17048
17082
  return proxyTools;
17049
17083
  }