@promptbook/core 0.104.0-2 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -185,6 +185,7 @@ import type { BookTranspiler } from '../transpilers/_common/BookTranspiler';
185
185
  import type { BookTranspilerOptions } from '../transpilers/_common/BookTranspilerOptions';
186
186
  import type { IntermediateFilesStrategy } from '../types/IntermediateFilesStrategy';
187
187
  import type { LlmCall } from '../types/LlmCall';
188
+ import type { Message } from '../types/Message';
188
189
  import type { ModelRequirements } from '../types/ModelRequirements';
189
190
  import type { CompletionModelRequirements } from '../types/ModelRequirements';
190
191
  import type { ChatModelRequirements } from '../types/ModelRequirements';
@@ -551,6 +552,7 @@ export type { BookTranspiler };
551
552
  export type { BookTranspilerOptions };
552
553
  export type { IntermediateFilesStrategy };
553
554
  export type { LlmCall };
555
+ export type { Message };
554
556
  export type { ModelRequirements };
555
557
  export type { CompletionModelRequirements };
556
558
  export type { ChatModelRequirements };
@@ -1,22 +1,17 @@
1
+ import { Message } from '../../../types/Message';
1
2
  import type { id, string_markdown } from '../../../types/typeAliases';
2
3
  /**
3
4
  * A message in the chat
4
5
  *
5
6
  * @public exported from `@promptbook/components`
6
7
  */
7
- export type ChatMessage = {
8
+ export type ChatMessage = Omit<Message<id>, 'direction' | 'recipients' | 'threadId' | 'metadata'> & {
8
9
  /**
9
- * Unique identifier of the message
10
- */
11
- id?: id;
12
- /**
13
- * Date when the message was created
14
- */
15
- date?: Date;
16
- /**
17
- * The name of the participant who sent the message
10
+ * Force the channel to be 'PROMPTBOOK_CHAT'
11
+ *
12
+ * @default 'PROMPTBOOK_CHAT'
18
13
  */
19
- from: id;
14
+ channel?: 'PROMPTBOOK_CHAT';
20
15
  /**
21
16
  * The content of the message with optional markdown formatting
22
17
  */
@@ -37,6 +32,7 @@ export type ChatMessage = {
37
32
  isVoiceCall?: boolean;
38
33
  };
39
34
  /**
35
+ * TODO: Make all fields readonly
40
36
  * TODO: Delete `expectedAnswer` from ChatMessage
41
37
  * TODO: Rename `date` into `created`+`modified`
42
38
  */
@@ -1,8 +1,8 @@
1
1
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
5
- import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
5
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
7
  /**
8
8
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
43
43
  * Calls the best available embedding model
44
44
  */
45
45
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
46
+ /**
47
+ * Calls the best available embedding model
48
+ */
49
+ callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
46
50
  /**
47
51
  * Calls the best available model
48
52
  *
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
46
46
  private callCommonModel;
47
47
  }
48
48
  /**
49
+ * TODO: !!!! Deprecate pipeline server and all of its components
49
50
  * TODO: Maybe use `$exportJson`
50
51
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
51
52
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -0,0 +1,49 @@
1
+ import { Arrayable } from 'type-fest';
2
+ import { really_any } from '../_packages/types.index';
3
+ import { id, string_date_iso8601, string_markdown } from './typeAliases';
4
+ /**
5
+ * A generic message structure for various communication channels
6
+ */
7
+ export type Message<TParticipant> = {
8
+ /**
9
+ * Unique identifier of the message
10
+ */
11
+ readonly id?: id;
12
+ /**
13
+ * Date when the message was created
14
+ */
15
+ readonly createdAt?: Date | string_date_iso8601;
16
+ /**
17
+ * The communication channel of the message
18
+ */
19
+ readonly channel?: 'PROMPTBOOK_CHAT' | 'EMAIL' | 'SMS' | 'WHATSAPP' | 'TELEGRAM' | 'SIGNAL' | string | 'UNKNOWN';
20
+ /**
21
+ * Is the message send from the Promptbook or to the Promptbook
22
+ */
23
+ readonly direction?: 'INBOUND' | 'OUTBOUND' | 'INTERNAL' | 'INITIAL';
24
+ /**
25
+ * Who sent the message
26
+ */
27
+ readonly sender: TParticipant;
28
+ /**
29
+ * Who are the recipients of the message
30
+ */
31
+ readonly recipients?: Readonly<Arrayable<TParticipant>>;
32
+ /**
33
+ * The content of the message as markdown
34
+ *
35
+ * Note: We are converting all message content to markdown for consistency
36
+ */
37
+ readonly content: string_markdown;
38
+ /**
39
+ * The thread identifier the message belongs to
40
+ *
41
+ * - `null` means the message is not part of any thread
42
+ * - `undefined` means that we don't know if the message is part of a thread or not
43
+ */
44
+ readonly threadId?: id | null;
45
+ /**
46
+ * Arbitrary metadata associated with the message
47
+ */
48
+ readonly metadata?: Readonly<Record<string, really_any>>;
49
+ };
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-1`).
18
+ * It follows semantic versioning (e.g., `0.104.0-3`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/core",
3
- "version": "0.104.0-2",
3
+ "version": "0.104.0-4",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -28,7 +28,7 @@
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-2';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3745,6 +3745,15 @@
3745
3745
  return promptResult;
3746
3746
  };
3747
3747
  }
3748
+ if (llmTools.callImageGenerationModel !== undefined) {
3749
+ proxyTools.callImageGenerationModel = async (prompt) => {
3750
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3751
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
3752
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3753
+ spending.next(promptResult.usage);
3754
+ return promptResult;
3755
+ };
3756
+ }
3748
3757
  // <- Note: [🤖]
3749
3758
  return proxyTools;
3750
3759
  }
@@ -3854,6 +3863,12 @@
3854
3863
  callEmbeddingModel(prompt) {
3855
3864
  return this.callCommonModel(prompt);
3856
3865
  }
3866
+ /**
3867
+ * Calls the best available embedding model
3868
+ */
3869
+ callImageGenerationModel(prompt) {
3870
+ return this.callCommonModel(prompt);
3871
+ }
3857
3872
  // <- Note: [🤖]
3858
3873
  /**
3859
3874
  * Calls the best available model
@@ -3880,6 +3895,11 @@
3880
3895
  continue llm;
3881
3896
  }
3882
3897
  return await llmExecutionTools.callEmbeddingModel(prompt);
3898
+ case 'IMAGE_GENERATION':
3899
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
3900
+ continue llm;
3901
+ }
3902
+ return await llmExecutionTools.callImageGenerationModel(prompt);
3883
3903
  // <- case [🤖]:
3884
3904
  default:
3885
3905
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -6305,8 +6325,9 @@
6305
6325
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
6306
6326
  break variant;
6307
6327
  case 'EMBEDDING':
6328
+ case 'IMAGE_GENERATION':
6308
6329
  throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
6309
- Embedding model can not be used in pipeline
6330
+ ${modelRequirements.modelVariant} model can not be used in pipeline
6310
6331
 
6311
6332
  This should be catched during parsing
6312
6333
 
@@ -11045,17 +11066,64 @@
11045
11066
  };
11046
11067
  }
11047
11068
  const lines = agentSource.split('\n');
11048
- const agentName = (((_a = lines[0]) === null || _a === void 0 ? void 0 : _a.trim()) || null);
11069
+ let agentName = null;
11070
+ let agentNameLineIndex = -1;
11071
+ // Find the agent name: first non-empty line that is not a commitment and not a horizontal line
11072
+ for (let i = 0; i < lines.length; i++) {
11073
+ const line = lines[i];
11074
+ if (line === undefined) {
11075
+ continue;
11076
+ }
11077
+ const trimmed = line.trim();
11078
+ if (!trimmed) {
11079
+ continue;
11080
+ }
11081
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
11082
+ if (isHorizontal) {
11083
+ continue;
11084
+ }
11085
+ let isCommitment = false;
11086
+ for (const definition of COMMITMENT_REGISTRY) {
11087
+ const typeRegex = definition.createTypeRegex();
11088
+ const match = typeRegex.exec(trimmed);
11089
+ if (match && ((_a = match.groups) === null || _a === void 0 ? void 0 : _a.type)) {
11090
+ isCommitment = true;
11091
+ break;
11092
+ }
11093
+ }
11094
+ if (!isCommitment) {
11095
+ agentName = trimmed;
11096
+ agentNameLineIndex = i;
11097
+ break;
11098
+ }
11099
+ }
11049
11100
  const commitments = [];
11050
11101
  const nonCommitmentLines = [];
11051
- // Always add the first line (agent name) to non-commitment lines
11052
- if (lines[0] !== undefined) {
11053
- nonCommitmentLines.push(lines[0]);
11102
+ // Add lines before agentName that are horizontal lines (they are non-commitment)
11103
+ for (let i = 0; i < agentNameLineIndex; i++) {
11104
+ const line = lines[i];
11105
+ if (line === undefined) {
11106
+ continue;
11107
+ }
11108
+ const trimmed = line.trim();
11109
+ if (!trimmed) {
11110
+ continue;
11111
+ }
11112
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
11113
+ if (isHorizontal) {
11114
+ nonCommitmentLines.push(line);
11115
+ }
11116
+ // Note: Commitments before agentName are not added to nonCommitmentLines
11117
+ }
11118
+ // Add the agent name line to non-commitment lines
11119
+ if (agentNameLineIndex >= 0) {
11120
+ nonCommitmentLines.push(lines[agentNameLineIndex]);
11054
11121
  }
11055
11122
  // Parse commitments with multiline support
11056
11123
  let currentCommitment = null;
11057
- // Process lines starting from the second line (skip agent name)
11058
- for (let i = 1; i < lines.length; i++) {
11124
+ // Process lines starting from after the agent name line
11125
+ const startIndex = agentNameLineIndex >= 0 ? agentNameLineIndex + 1 : 0;
11126
+ for (let i = startIndex; i < lines.length; i++) {
11059
11127
  const line = lines[i];
11060
11128
  if (line === undefined) {
11061
11129
  continue;
@@ -16872,6 +16940,9 @@
16872
16940
  case 'EMBEDDING':
16873
16941
  promptResult = await llmTools.callEmbeddingModel(prompt);
16874
16942
  break variant;
16943
+ case 'IMAGE_GENERATION':
16944
+ promptResult = await llmTools.callImageGenerationModel(prompt);
16945
+ break variant;
16875
16946
  // <- case [🤖]:
16876
16947
  default:
16877
16948
  throw new PipelineExecutionError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -16959,6 +17030,11 @@
16959
17030
  return /* not await */ callCommonModel(prompt);
16960
17031
  };
16961
17032
  }
17033
+ if (llmTools.callImageGenerationModel !== undefined) {
17034
+ proxyTools.callImageGenerationModel = async (prompt) => {
17035
+ return /* not await */ callCommonModel(prompt);
17036
+ };
17037
+ }
16962
17038
  // <- Note: [🤖]
16963
17039
  return proxyTools;
16964
17040
  }
@@ -16997,6 +17073,11 @@
16997
17073
  throw new LimitReachedError('Cannot call `callEmbeddingModel` because you are not allowed to spend any cost');
16998
17074
  };
16999
17075
  }
17076
+ if (proxyTools.callImageGenerationModel !== undefined) {
17077
+ proxyTools.callImageGenerationModel = async (prompt) => {
17078
+ throw new LimitReachedError('Cannot call `callImageGenerationModel` because you are not allowed to spend any cost');
17079
+ };
17080
+ }
17000
17081
  // <- Note: [🤖]
17001
17082
  return proxyTools;
17002
17083
  }
@@ -18068,7 +18149,7 @@
18068
18149
  let threadMessages = [];
18069
18150
  if ('thread' in prompt && Array.isArray(prompt.thread)) {
18070
18151
  threadMessages = prompt.thread.map((msg) => ({
18071
- role: msg.role === 'assistant' ? 'assistant' : 'user',
18152
+ role: msg.sender === 'assistant' ? 'assistant' : 'user',
18072
18153
  content: msg.content,
18073
18154
  }));
18074
18155
  }