@promptbook/wizard 0.102.0-4 → 0.102.0-5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +10 -1
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/book-components/BookEditor/utils.d.ts +8 -0
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +1 -1
- package/esm/typings/src/types/Prompt.d.ts +5 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +10 -1
- package/umd/index.umd.js.map +1 -1
@@ -0,0 +1,8 @@
|
|
1
|
+
import { TODO_any } from '../../_packages/types.index';
|
2
|
+
/**
|
3
|
+
* @private
|
4
|
+
*/
|
5
|
+
export declare function debounce<T extends (...args: TODO_any[]) => void>(fn: T, delay: number): (...args: Parameters<T>) => void;
|
6
|
+
/**
|
7
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
8
|
+
* TODO: !!! remove this file */
|
@@ -46,7 +46,7 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
46
46
|
/**
|
47
47
|
* Calls OpenAI compatible API to use a chat model.
|
48
48
|
*/
|
49
|
-
callChatModel(prompt:
|
49
|
+
callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
|
50
50
|
/**
|
51
51
|
* Internal method that handles parameter retry for chat model calls
|
52
52
|
*/
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import type { FormatCommand } from '../commands/FORMAT/FormatCommand';
|
2
|
+
import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
|
2
3
|
import type { Expectations } from '../pipeline/PipelineJson/Expectations';
|
3
4
|
import type { ChatModelRequirements } from './ModelRequirements';
|
4
5
|
import type { CompletionModelRequirements } from './ModelRequirements';
|
@@ -38,6 +39,10 @@ export type ChatPrompt = CommonPrompt & {
|
|
38
39
|
* Requirements for chat model
|
39
40
|
*/
|
40
41
|
modelRequirements: ChatModelRequirements;
|
42
|
+
/**
|
43
|
+
* Optional chat thread (history of previous messages)
|
44
|
+
*/
|
45
|
+
thread?: ChatMessage[];
|
41
46
|
};
|
42
47
|
/**
|
43
48
|
* Embedding prompt
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
16
16
|
/**
|
17
17
|
* Represents the version string of the Promptbook engine.
|
18
|
-
* It follows semantic versioning (e.g., `0.102.0-
|
18
|
+
* It follows semantic versioning (e.g., `0.102.0-4`).
|
19
19
|
*
|
20
20
|
* @generated
|
21
21
|
*/
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@promptbook/wizard",
|
3
|
-
"version": "0.102.0-
|
3
|
+
"version": "0.102.0-5",
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
5
5
|
"private": false,
|
6
6
|
"sideEffects": false,
|
@@ -95,7 +95,7 @@
|
|
95
95
|
"module": "./esm/index.es.js",
|
96
96
|
"typings": "./esm/typings/src/_packages/wizard.index.d.ts",
|
97
97
|
"peerDependencies": {
|
98
|
-
"@promptbook/core": "0.102.0-
|
98
|
+
"@promptbook/core": "0.102.0-5"
|
99
99
|
},
|
100
100
|
"dependencies": {
|
101
101
|
"@ai-sdk/deepseek": "0.1.6",
|
package/umd/index.umd.js
CHANGED
@@ -48,7 +48,7 @@
|
|
48
48
|
* @generated
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
50
50
|
*/
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.102.0-
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.102.0-5';
|
52
52
|
/**
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -4794,6 +4794,14 @@
|
|
4794
4794
|
// <- TODO: [🚸] Not all models are compatible with JSON mode
|
4795
4795
|
// > 'response_format' of type 'json_object' is not supported with this model.
|
4796
4796
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
4797
|
+
// Convert thread to OpenAI format if present
|
4798
|
+
let threadMessages = [];
|
4799
|
+
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
4800
|
+
threadMessages = prompt.thread.map((msg) => ({
|
4801
|
+
role: msg.role === 'assistant' ? 'assistant' : 'user',
|
4802
|
+
content: msg.content,
|
4803
|
+
}));
|
4804
|
+
}
|
4797
4805
|
const rawRequest = {
|
4798
4806
|
...modelSettings,
|
4799
4807
|
messages: [
|
@@ -4805,6 +4813,7 @@
|
|
4805
4813
|
content: currentModelRequirements.systemMessage,
|
4806
4814
|
},
|
4807
4815
|
]),
|
4816
|
+
...threadMessages,
|
4808
4817
|
{
|
4809
4818
|
role: 'user',
|
4810
4819
|
content: rawPromptContent,
|