@promptbook/cli 0.104.0-3 → 0.104.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/apps/agents-server/src/database/migrate.ts +34 -1
- package/apps/agents-server/src/database/migrations/2025-12-0402-message-table.sql +42 -0
- package/apps/agents-server/src/message-providers/email/_common/Email.ts +73 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/TODO.txt +1 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/parseEmailAddress.test.ts.todo +108 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/parseEmailAddress.ts +62 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/parseEmailAddresses.test.ts.todo +117 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/parseEmailAddresses.ts +19 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/stringifyEmailAddress.test.ts.todo +119 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/stringifyEmailAddress.ts +19 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/stringifyEmailAddresses.test.ts.todo +74 -0
- package/apps/agents-server/src/message-providers/email/_common/utils/stringifyEmailAddresses.ts +14 -0
- package/apps/agents-server/src/message-providers/email/sendgrid/SendgridMessageProvider.ts +44 -0
- package/apps/agents-server/src/message-providers/email/zeptomail/ZeptomailMessageProvider.ts +43 -0
- package/apps/agents-server/src/message-providers/index.ts +13 -0
- package/apps/agents-server/src/message-providers/interfaces/MessageProvider.ts +11 -0
- package/apps/agents-server/src/utils/messages/sendMessage.ts +91 -0
- package/apps/agents-server/src/utils/normalization/filenameToPrompt.test.ts +36 -0
- package/apps/agents-server/src/utils/normalization/filenameToPrompt.ts +6 -2
- package/esm/index.es.js +32 -2
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +6 -2
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +32 -2
- package/umd/index.umd.js.map +1 -1
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
|
|
2
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
|
-
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
|
|
5
|
-
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
|
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
|
|
5
|
+
import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
|
|
6
6
|
import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
|
|
7
7
|
/**
|
|
8
8
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
|
43
43
|
* Calls the best available embedding model
|
|
44
44
|
*/
|
|
45
45
|
callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
|
|
46
|
+
/**
|
|
47
|
+
* Calls the best available embedding model
|
|
48
|
+
*/
|
|
49
|
+
callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
|
|
46
50
|
/**
|
|
47
51
|
* Calls the best available model
|
|
48
52
|
*
|
|
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
|
|
|
46
46
|
private callCommonModel;
|
|
47
47
|
}
|
|
48
48
|
/**
|
|
49
|
+
* TODO: !!!! Deprecate pipeline server and all of its components
|
|
49
50
|
* TODO: Maybe use `$exportJson`
|
|
50
51
|
* TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
|
|
51
52
|
* TODO: [🍓] Allow to list compatible models with each variant
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.104.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.104.0-3`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
package/umd/index.umd.js
CHANGED
|
@@ -56,7 +56,7 @@
|
|
|
56
56
|
* @generated
|
|
57
57
|
* @see https://github.com/webgptorg/promptbook
|
|
58
58
|
*/
|
|
59
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
59
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
|
|
60
60
|
/**
|
|
61
61
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
62
62
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -3730,6 +3730,7 @@
|
|
|
3730
3730
|
}
|
|
3731
3731
|
}
|
|
3732
3732
|
/**
|
|
3733
|
+
* TODO: !!!! Deprecate pipeline server and all of its components
|
|
3733
3734
|
* TODO: Maybe use `$exportJson`
|
|
3734
3735
|
* TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
|
|
3735
3736
|
* TODO: [🍓] Allow to list compatible models with each variant
|
|
@@ -4258,6 +4259,9 @@
|
|
|
4258
4259
|
case 'EMBEDDING':
|
|
4259
4260
|
promptResult = await llmTools.callEmbeddingModel(prompt);
|
|
4260
4261
|
break variant;
|
|
4262
|
+
case 'IMAGE_GENERATION':
|
|
4263
|
+
promptResult = await llmTools.callImageGenerationModel(prompt);
|
|
4264
|
+
break variant;
|
|
4261
4265
|
// <- case [🤖]:
|
|
4262
4266
|
default:
|
|
4263
4267
|
throw new PipelineExecutionError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
|
@@ -4345,6 +4349,11 @@
|
|
|
4345
4349
|
return /* not await */ callCommonModel(prompt);
|
|
4346
4350
|
};
|
|
4347
4351
|
}
|
|
4352
|
+
if (llmTools.callImageGenerationModel !== undefined) {
|
|
4353
|
+
proxyTools.callImageGenerationModel = async (prompt) => {
|
|
4354
|
+
return /* not await */ callCommonModel(prompt);
|
|
4355
|
+
};
|
|
4356
|
+
}
|
|
4348
4357
|
// <- Note: [🤖]
|
|
4349
4358
|
return proxyTools;
|
|
4350
4359
|
}
|
|
@@ -4533,6 +4542,15 @@
|
|
|
4533
4542
|
return promptResult;
|
|
4534
4543
|
};
|
|
4535
4544
|
}
|
|
4545
|
+
if (llmTools.callImageGenerationModel !== undefined) {
|
|
4546
|
+
proxyTools.callImageGenerationModel = async (prompt) => {
|
|
4547
|
+
// console.info('[🚕] callImageGenerationModel through countTotalUsage');
|
|
4548
|
+
const promptResult = await llmTools.callImageGenerationModel(prompt);
|
|
4549
|
+
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
4550
|
+
spending.next(promptResult.usage);
|
|
4551
|
+
return promptResult;
|
|
4552
|
+
};
|
|
4553
|
+
}
|
|
4536
4554
|
// <- Note: [🤖]
|
|
4537
4555
|
return proxyTools;
|
|
4538
4556
|
}
|
|
@@ -4655,6 +4673,12 @@
|
|
|
4655
4673
|
callEmbeddingModel(prompt) {
|
|
4656
4674
|
return this.callCommonModel(prompt);
|
|
4657
4675
|
}
|
|
4676
|
+
/**
|
|
4677
|
+
* Calls the best available embedding model
|
|
4678
|
+
*/
|
|
4679
|
+
callImageGenerationModel(prompt) {
|
|
4680
|
+
return this.callCommonModel(prompt);
|
|
4681
|
+
}
|
|
4658
4682
|
// <- Note: [🤖]
|
|
4659
4683
|
/**
|
|
4660
4684
|
* Calls the best available model
|
|
@@ -4681,6 +4705,11 @@
|
|
|
4681
4705
|
continue llm;
|
|
4682
4706
|
}
|
|
4683
4707
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
4708
|
+
case 'IMAGE_GENERATION':
|
|
4709
|
+
if (llmExecutionTools.callImageGenerationModel === undefined) {
|
|
4710
|
+
continue llm;
|
|
4711
|
+
}
|
|
4712
|
+
return await llmExecutionTools.callImageGenerationModel(prompt);
|
|
4684
4713
|
// <- case [🤖]:
|
|
4685
4714
|
default:
|
|
4686
4715
|
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
@@ -7603,8 +7632,9 @@
|
|
|
7603
7632
|
$ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
|
|
7604
7633
|
break variant;
|
|
7605
7634
|
case 'EMBEDDING':
|
|
7635
|
+
case 'IMAGE_GENERATION':
|
|
7606
7636
|
throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
|
|
7607
|
-
|
|
7637
|
+
${modelRequirements.modelVariant} model can not be used in pipeline
|
|
7608
7638
|
|
|
7609
7639
|
This should be catched during parsing
|
|
7610
7640
|
|