@promptbook/wizard 0.102.0-3 → 0.102.0-5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +89 -39
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +2 -0
- package/esm/typings/src/book-components/BookEditor/utils.d.ts +8 -0
- package/esm/typings/src/book-components/Chat/save/index.d.ts +6 -0
- package/esm/typings/src/book-components/Chat/save/pdf/pdfSaveFormatDefinition.d.ts +12 -0
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +5 -1
- package/esm/typings/src/types/Prompt.d.ts +5 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +89 -39
- package/umd/index.umd.js.map +1 -1
@@ -33,6 +33,7 @@ import { htmlSaveFormatDefinition } from '../book-components/Chat/save/html/html
|
|
33
33
|
import { CHAT_SAVE_FORMATS } from '../book-components/Chat/save/index';
|
34
34
|
import { jsonSaveFormatDefinition } from '../book-components/Chat/save/json/jsonSaveFormatDefinition';
|
35
35
|
import { mdSaveFormatDefinition } from '../book-components/Chat/save/markdown/mdSaveFormatDefinition';
|
36
|
+
import { pdfSaveFormatDefinition } from '../book-components/Chat/save/pdf/pdfSaveFormatDefinition';
|
36
37
|
import { txtSaveFormatDefinition } from '../book-components/Chat/save/text/txtSaveFormatDefinition';
|
37
38
|
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
38
39
|
import type { ChatParticipant } from '../book-components/Chat/types/ChatParticipant';
|
@@ -83,6 +84,7 @@ export { htmlSaveFormatDefinition };
|
|
83
84
|
export { CHAT_SAVE_FORMATS };
|
84
85
|
export { jsonSaveFormatDefinition };
|
85
86
|
export { mdSaveFormatDefinition };
|
87
|
+
export { pdfSaveFormatDefinition };
|
86
88
|
export { txtSaveFormatDefinition };
|
87
89
|
export type { ChatMessage };
|
88
90
|
export type { ChatParticipant };
|
@@ -0,0 +1,8 @@
|
|
1
|
+
import { TODO_any } from '../../_packages/types.index';
|
2
|
+
/**
|
3
|
+
* @private
|
4
|
+
*/
|
5
|
+
export declare function debounce<T extends (...args: TODO_any[]) => void>(fn: T, delay: number): (...args: Parameters<T>) => void;
|
6
|
+
/**
|
7
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
8
|
+
* TODO: !!! remove this file */
|
@@ -27,6 +27,12 @@ export declare const CHAT_SAVE_FORMATS: readonly [{
|
|
27
27
|
readonly getContent: (messages: import("../types/ChatMessage").ChatMessage[]) => string;
|
28
28
|
readonly mimeType: "text/html";
|
29
29
|
readonly fileExtension: "html";
|
30
|
+
}, {
|
31
|
+
readonly formatName: "pdf";
|
32
|
+
readonly label: "PDF";
|
33
|
+
readonly getContent: (messages: import("../types/ChatMessage").ChatMessage[]) => string;
|
34
|
+
readonly mimeType: "application/pdf";
|
35
|
+
readonly fileExtension: "pdf";
|
30
36
|
}];
|
31
37
|
/**
|
32
38
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -0,0 +1,12 @@
|
|
1
|
+
/**
|
2
|
+
* PDF export plugin
|
3
|
+
*
|
4
|
+
* @public exported from `@promptbook/components`
|
5
|
+
*/
|
6
|
+
export declare const pdfSaveFormatDefinition: {
|
7
|
+
readonly formatName: "pdf";
|
8
|
+
readonly label: "PDF";
|
9
|
+
readonly getContent: (messages: import("../../types/ChatMessage").ChatMessage[]) => string;
|
10
|
+
readonly mimeType: "application/pdf";
|
11
|
+
readonly fileExtension: "pdf";
|
12
|
+
};
|
@@ -46,7 +46,7 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
46
46
|
/**
|
47
47
|
* Calls OpenAI compatible API to use a chat model.
|
48
48
|
*/
|
49
|
-
callChatModel(prompt:
|
49
|
+
callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
|
50
50
|
/**
|
51
51
|
* Internal method that handles parameter retry for chat model calls
|
52
52
|
*/
|
@@ -63,6 +63,10 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
63
63
|
* Calls OpenAI compatible API to use a embedding model
|
64
64
|
*/
|
65
65
|
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
|
66
|
+
/**
|
67
|
+
* Internal method that handles parameter retry for embedding model calls
|
68
|
+
*/
|
69
|
+
private callEmbeddingModelWithRetry;
|
66
70
|
/**
|
67
71
|
* Get the model that should be used as default
|
68
72
|
*/
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import type { FormatCommand } from '../commands/FORMAT/FormatCommand';
|
2
|
+
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
2
3
|
import type { Expectations } from '../pipeline/PipelineJson/Expectations';
|
3
4
|
import type { ChatModelRequirements } from './ModelRequirements';
|
4
5
|
import type { CompletionModelRequirements } from './ModelRequirements';
|
@@ -38,6 +39,10 @@ export type ChatPrompt = CommonPrompt & {
|
|
38
39
|
* Requirements for chat model
|
39
40
|
*/
|
40
41
|
modelRequirements: ChatModelRequirements;
|
42
|
+
/**
|
43
|
+
* Optional chat thread (history of previous messages)
|
44
|
+
*/
|
45
|
+
thread?: ChatMessage[];
|
41
46
|
};
|
42
47
|
/**
|
43
48
|
* Embedding prompt
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
16
16
|
/**
|
17
17
|
* Represents the version string of the Promptbook engine.
|
18
|
-
* It follows semantic versioning (e.g., `0.102.0-
|
18
|
+
* It follows semantic versioning (e.g., `0.102.0-4`).
|
19
19
|
*
|
20
20
|
* @generated
|
21
21
|
*/
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@promptbook/wizard",
|
3
|
-
"version": "0.102.0-
|
3
|
+
"version": "0.102.0-5",
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
5
5
|
"private": false,
|
6
6
|
"sideEffects": false,
|
@@ -95,7 +95,7 @@
|
|
95
95
|
"module": "./esm/index.es.js",
|
96
96
|
"typings": "./esm/typings/src/_packages/wizard.index.d.ts",
|
97
97
|
"peerDependencies": {
|
98
|
-
"@promptbook/core": "0.102.0-
|
98
|
+
"@promptbook/core": "0.102.0-5"
|
99
99
|
},
|
100
100
|
"dependencies": {
|
101
101
|
"@ai-sdk/deepseek": "0.1.6",
|
package/umd/index.umd.js
CHANGED
@@ -48,7 +48,7 @@
|
|
48
48
|
* @generated
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
50
50
|
*/
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.102.0-
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.102.0-5';
|
52
52
|
/**
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -4794,6 +4794,14 @@
|
|
4794
4794
|
// <- TODO: [🚸] Not all models are compatible with JSON mode
|
4795
4795
|
// > 'response_format' of type 'json_object' is not supported with this model.
|
4796
4796
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
4797
|
+
// Convert thread to OpenAI format if present
|
4798
|
+
let threadMessages = [];
|
4799
|
+
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
4800
|
+
threadMessages = prompt.thread.map((msg) => ({
|
4801
|
+
role: msg.role === 'assistant' ? 'assistant' : 'user',
|
4802
|
+
content: msg.content,
|
4803
|
+
}));
|
4804
|
+
}
|
4797
4805
|
const rawRequest = {
|
4798
4806
|
...modelSettings,
|
4799
4807
|
messages: [
|
@@ -4805,6 +4813,7 @@
|
|
4805
4813
|
content: currentModelRequirements.systemMessage,
|
4806
4814
|
},
|
4807
4815
|
]),
|
4816
|
+
...threadMessages,
|
4808
4817
|
{
|
4809
4818
|
role: 'user',
|
4810
4819
|
content: rawPromptContent,
|
@@ -5013,16 +5022,22 @@
|
|
5013
5022
|
* Calls OpenAI compatible API to use a embedding model
|
5014
5023
|
*/
|
5015
5024
|
async callEmbeddingModel(prompt) {
|
5025
|
+
return this.callEmbeddingModelWithRetry(prompt, prompt.modelRequirements);
|
5026
|
+
}
|
5027
|
+
/**
|
5028
|
+
* Internal method that handles parameter retry for embedding model calls
|
5029
|
+
*/
|
5030
|
+
async callEmbeddingModelWithRetry(prompt, currentModelRequirements) {
|
5016
5031
|
if (this.options.isVerbose) {
|
5017
|
-
console.info(`🖋 ${this.title} embedding call`, { prompt });
|
5032
|
+
console.info(`🖋 ${this.title} embedding call`, { prompt, currentModelRequirements });
|
5018
5033
|
}
|
5019
|
-
const { content, parameters
|
5034
|
+
const { content, parameters } = prompt;
|
5020
5035
|
const client = await this.getClient();
|
5021
5036
|
// TODO: [☂] Use here more modelRequirements
|
5022
|
-
if (
|
5037
|
+
if (currentModelRequirements.modelVariant !== 'EMBEDDING') {
|
5023
5038
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
5024
5039
|
}
|
5025
|
-
const modelName =
|
5040
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
5026
5041
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
5027
5042
|
const rawRequest = {
|
5028
5043
|
input: rawPromptContent,
|
@@ -5032,44 +5047,79 @@
|
|
5032
5047
|
if (this.options.isVerbose) {
|
5033
5048
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
5034
5049
|
}
|
5035
|
-
|
5036
|
-
|
5037
|
-
|
5038
|
-
|
5050
|
+
try {
|
5051
|
+
const rawResponse = await this.limiter
|
5052
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
|
5053
|
+
.catch((error) => {
|
5054
|
+
assertsError(error);
|
5055
|
+
if (this.options.isVerbose) {
|
5056
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
5057
|
+
}
|
5058
|
+
throw error;
|
5059
|
+
});
|
5039
5060
|
if (this.options.isVerbose) {
|
5040
|
-
console.info(colors__default["default"].
|
5061
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
5041
5062
|
}
|
5042
|
-
|
5043
|
-
|
5044
|
-
|
5045
|
-
|
5063
|
+
const complete = $getCurrentDate();
|
5064
|
+
if (rawResponse.data.length !== 1) {
|
5065
|
+
throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
|
5066
|
+
}
|
5067
|
+
const resultContent = rawResponse.data[0].embedding;
|
5068
|
+
const usage = this.computeUsage(content || '', '',
|
5069
|
+
// <- Note: Embedding does not have result content
|
5070
|
+
rawResponse);
|
5071
|
+
return exportJson({
|
5072
|
+
name: 'promptResult',
|
5073
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
|
5074
|
+
order: [],
|
5075
|
+
value: {
|
5076
|
+
content: resultContent,
|
5077
|
+
modelName: rawResponse.model || modelName,
|
5078
|
+
timing: {
|
5079
|
+
start,
|
5080
|
+
complete,
|
5081
|
+
},
|
5082
|
+
usage,
|
5083
|
+
rawPromptContent,
|
5084
|
+
rawRequest,
|
5085
|
+
rawResponse,
|
5086
|
+
// <- [🗯]
|
5087
|
+
},
|
5088
|
+
});
|
5046
5089
|
}
|
5047
|
-
|
5048
|
-
|
5049
|
-
|
5090
|
+
catch (error) {
|
5091
|
+
assertsError(error);
|
5092
|
+
// Check if this is an unsupported parameter error
|
5093
|
+
if (!isUnsupportedParameterError(error)) {
|
5094
|
+
throw error;
|
5095
|
+
}
|
5096
|
+
// Parse which parameter is unsupported
|
5097
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
5098
|
+
if (!unsupportedParameter) {
|
5099
|
+
if (this.options.isVerbose) {
|
5100
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
5101
|
+
}
|
5102
|
+
throw error;
|
5103
|
+
}
|
5104
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
5105
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
5106
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
5107
|
+
// Already retried this parameter, throw the error
|
5108
|
+
if (this.options.isVerbose) {
|
5109
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
5110
|
+
}
|
5111
|
+
throw error;
|
5112
|
+
}
|
5113
|
+
// Mark this parameter as retried
|
5114
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
5115
|
+
// Log warning in verbose mode
|
5116
|
+
if (this.options.isVerbose) {
|
5117
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
5118
|
+
}
|
5119
|
+
// Remove the unsupported parameter and retry
|
5120
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
5121
|
+
return this.callEmbeddingModelWithRetry(prompt, modifiedModelRequirements);
|
5050
5122
|
}
|
5051
|
-
const resultContent = rawResponse.data[0].embedding;
|
5052
|
-
const usage = this.computeUsage(content || '', '',
|
5053
|
-
// <- Note: Embedding does not have result content
|
5054
|
-
rawResponse);
|
5055
|
-
return exportJson({
|
5056
|
-
name: 'promptResult',
|
5057
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
|
5058
|
-
order: [],
|
5059
|
-
value: {
|
5060
|
-
content: resultContent,
|
5061
|
-
modelName: rawResponse.model || modelName,
|
5062
|
-
timing: {
|
5063
|
-
start,
|
5064
|
-
complete,
|
5065
|
-
},
|
5066
|
-
usage,
|
5067
|
-
rawPromptContent,
|
5068
|
-
rawRequest,
|
5069
|
-
rawResponse,
|
5070
|
-
// <- [🗯]
|
5071
|
-
},
|
5072
|
-
});
|
5073
5123
|
}
|
5074
5124
|
// <- Note: [🤖] callXxxModel
|
5075
5125
|
/**
|