@promptbook/remote-server 0.50.0-7 → 0.50.0-9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1 -1
- package/esm/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +8 -0
- package/esm/typings/types/ModelRequirements.d.ts +2 -1
- package/package.json +2 -2
- package/umd/index.umd.js +1 -1
- package/umd/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +8 -0
- package/umd/typings/types/ModelRequirements.d.ts +2 -1
package/esm/index.es.js
CHANGED
|
@@ -89,7 +89,7 @@ var PromptbookExecutionError = /** @class */ (function (_super) {
|
|
|
89
89
|
/**
|
|
90
90
|
* The version of the Promptbook library
|
|
91
91
|
*/
|
|
92
|
-
var PROMPTBOOK_VERSION = '0.50.0-
|
|
92
|
+
var PROMPTBOOK_VERSION = '0.50.0-8';
|
|
93
93
|
|
|
94
94
|
/**
|
|
95
95
|
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
|
@@ -25,6 +25,14 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
25
25
|
* Calls OpenAI API to use a complete model.
|
|
26
26
|
*/
|
|
27
27
|
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
|
|
28
|
+
/**
|
|
29
|
+
* Default model for chat variant.
|
|
30
|
+
*/
|
|
31
|
+
private getDefaultChatModel;
|
|
32
|
+
/**
|
|
33
|
+
* Default model for completion variant.
|
|
34
|
+
*/
|
|
35
|
+
private getDefaultCompletionModel;
|
|
28
36
|
/**
|
|
29
37
|
* List all available OpenAI models that can be used
|
|
30
38
|
*/
|
|
@@ -26,10 +26,11 @@ export type ModelRequirements = {
|
|
|
26
26
|
* The model for text prompt
|
|
27
27
|
*
|
|
28
28
|
* Note: Model must be compatible with the model variant
|
|
29
|
+
* Note: If not specified, the best model for the variant will be used
|
|
29
30
|
*
|
|
30
31
|
* @example 'gpt-4', 'gpt-4-32k-0314', 'gpt-3.5-turbo-instruct',...
|
|
31
32
|
*/
|
|
32
|
-
readonly modelName
|
|
33
|
+
readonly modelName?: string_model_name;
|
|
33
34
|
/**
|
|
34
35
|
* Maximum number of tokens that can be generated by the model
|
|
35
36
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/remote-server",
|
|
3
|
-
"version": "0.50.0-
|
|
3
|
+
"version": "0.50.0-9",
|
|
4
4
|
"description": "Library to supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -49,7 +49,7 @@
|
|
|
49
49
|
}
|
|
50
50
|
],
|
|
51
51
|
"peerDependencies": {
|
|
52
|
-
"@promptbook/core": "0.50.0-
|
|
52
|
+
"@promptbook/core": "0.50.0-9"
|
|
53
53
|
},
|
|
54
54
|
"main": "./umd/index.umd.js",
|
|
55
55
|
"module": "./esm/index.es.js",
|
package/umd/index.umd.js
CHANGED
|
@@ -95,7 +95,7 @@
|
|
|
95
95
|
/**
|
|
96
96
|
* The version of the Promptbook library
|
|
97
97
|
*/
|
|
98
|
-
var PROMPTBOOK_VERSION = '0.50.0-
|
|
98
|
+
var PROMPTBOOK_VERSION = '0.50.0-8';
|
|
99
99
|
|
|
100
100
|
/**
|
|
101
101
|
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
|
@@ -25,6 +25,14 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
25
25
|
* Calls OpenAI API to use a complete model.
|
|
26
26
|
*/
|
|
27
27
|
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
|
|
28
|
+
/**
|
|
29
|
+
* Default model for chat variant.
|
|
30
|
+
*/
|
|
31
|
+
private getDefaultChatModel;
|
|
32
|
+
/**
|
|
33
|
+
* Default model for completion variant.
|
|
34
|
+
*/
|
|
35
|
+
private getDefaultCompletionModel;
|
|
28
36
|
/**
|
|
29
37
|
* List all available OpenAI models that can be used
|
|
30
38
|
*/
|
|
@@ -26,10 +26,11 @@ export type ModelRequirements = {
|
|
|
26
26
|
* The model for text prompt
|
|
27
27
|
*
|
|
28
28
|
* Note: Model must be compatible with the model variant
|
|
29
|
+
* Note: If not specified, the best model for the variant will be used
|
|
29
30
|
*
|
|
30
31
|
* @example 'gpt-4', 'gpt-4-32k-0314', 'gpt-3.5-turbo-instruct',...
|
|
31
32
|
*/
|
|
32
|
-
readonly modelName
|
|
33
|
+
readonly modelName?: string_model_name;
|
|
33
34
|
/**
|
|
34
35
|
* Maximum number of tokens that can be generated by the model
|
|
35
36
|
*/
|