@promptbook/node 0.101.0-14 → 0.101.0-15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1 -1
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +3 -5
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +1 -1
package/esm/index.es.js
CHANGED
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
28
28
|
* @generated
|
29
29
|
* @see https://github.com/webgptorg/promptbook
|
30
30
|
*/
|
31
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
31
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
|
32
32
|
/**
|
33
33
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
34
34
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
2
|
+
import { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
2
3
|
import type { string_model_name } from '../../types/typeAliases';
|
3
4
|
import type { AgentModelRequirements } from './AgentModelRequirements';
|
4
5
|
import type { string_book } from './string_book';
|
@@ -8,11 +9,11 @@ import type { string_book } from './string_book';
|
|
8
9
|
*
|
9
10
|
* There are 2 similar functions:
|
10
11
|
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
11
|
-
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works
|
12
|
+
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
|
12
13
|
*
|
13
14
|
* @public exported from `@promptbook/core`
|
14
15
|
*/
|
15
|
-
export declare function createAgentModelRequirements(agentSource: string_book, modelName?: string_model_name, availableModels?: readonly AvailableModel[]): Promise<AgentModelRequirements>;
|
16
|
+
export declare function createAgentModelRequirements(agentSource: string_book, modelName?: string_model_name, availableModels?: readonly AvailableModel[], llmTools?: LlmExecutionTools): Promise<AgentModelRequirements>;
|
16
17
|
/**
|
17
18
|
* Clears the cache for createAgentModelRequirements
|
18
19
|
* Useful when agent sources are updated and cached results should be invalidated
|
@@ -61,6 +62,3 @@ export declare function extractAgentName(agentSource: string_book): string;
|
|
61
62
|
* @private
|
62
63
|
*/
|
63
64
|
export declare function extractAgentProfileImage(agentSource: string_book): string;
|
64
|
-
/**
|
65
|
-
* TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
|
66
|
-
*/
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import type { ModelRequirements } from '../../../types/ModelRequirements';
|
2
|
+
/**
|
3
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
4
|
+
*
|
5
|
+
* @param errorMessage The error message from OpenAI API
|
6
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
7
|
+
* @private utility of LLM Tools
|
8
|
+
*/
|
9
|
+
export declare function parseUnsupportedParameterError(errorMessage: string): string | null;
|
10
|
+
/**
|
11
|
+
* Creates a copy of model requirements with the specified parameter removed
|
12
|
+
*
|
13
|
+
* @param modelRequirements Original model requirements
|
14
|
+
* @param unsupportedParameter The parameter to remove
|
15
|
+
* @returns New model requirements without the unsupported parameter
|
16
|
+
* @private utility of LLM Tools
|
17
|
+
*/
|
18
|
+
export declare function removeUnsupportedModelRequirement(modelRequirements: ModelRequirements, unsupportedParameter: string): ModelRequirements;
|
19
|
+
/**
|
20
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
21
|
+
* @param error The error to check
|
22
|
+
* @returns true if this is an unsupported parameter error
|
23
|
+
* @private utility of LLM Tools
|
24
|
+
*/
|
25
|
+
export declare function isUnsupportedParameterError(error: Error): boolean;
|
@@ -1,15 +1,10 @@
|
|
1
1
|
import OpenAI from 'openai';
|
2
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
|
-
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
6
|
-
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
4
|
+
import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
|
7
5
|
import type { Usage } from '../../execution/Usage';
|
8
6
|
import type { Prompt } from '../../types/Prompt';
|
9
|
-
import type { string_markdown } from '../../types/typeAliases';
|
10
|
-
import type { string_markdown_text } from '../../types/typeAliases';
|
11
|
-
import type { string_model_name } from '../../types/typeAliases';
|
12
|
-
import type { string_title } from '../../types/typeAliases';
|
7
|
+
import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
|
13
8
|
import { computeOpenAiUsage } from './computeOpenAiUsage';
|
14
9
|
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
|
15
10
|
/**
|
@@ -27,6 +22,10 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
27
22
|
* Rate limiter instance
|
28
23
|
*/
|
29
24
|
private limiter;
|
25
|
+
/**
|
26
|
+
* Tracks models and parameters that have already been retried to prevent infinite loops
|
27
|
+
*/
|
28
|
+
private retriedUnsupportedParameters;
|
30
29
|
/**
|
31
30
|
* Creates OpenAI compatible Execution Tools.
|
32
31
|
*
|
@@ -48,10 +47,18 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
48
47
|
* Calls OpenAI compatible API to use a chat model.
|
49
48
|
*/
|
50
49
|
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
|
50
|
+
/**
|
51
|
+
* Internal method that handles parameter retry for chat model calls
|
52
|
+
*/
|
53
|
+
private callChatModelWithRetry;
|
51
54
|
/**
|
52
55
|
* Calls OpenAI API to use a complete model.
|
53
56
|
*/
|
54
57
|
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
58
|
+
/**
|
59
|
+
* Internal method that handles parameter retry for completion model calls
|
60
|
+
*/
|
61
|
+
private callCompletionModelWithRetry;
|
55
62
|
/**
|
56
63
|
* Calls OpenAI compatible API to use a embedding model
|
57
64
|
*/
|
@@ -85,7 +92,7 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
85
92
|
/**
|
86
93
|
* Makes a request with retry logic for network errors like ECONNRESET
|
87
94
|
*/
|
88
|
-
private
|
95
|
+
private makeRequestWithNetworkRetry;
|
89
96
|
/**
|
90
97
|
* Determines if an error is retryable (network-related errors)
|
91
98
|
*/
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
16
16
|
/**
|
17
17
|
* Represents the version string of the Promptbook engine.
|
18
|
-
* It follows semantic versioning (e.g., `0.101.0-
|
18
|
+
* It follows semantic versioning (e.g., `0.101.0-14`).
|
19
19
|
*
|
20
20
|
* @generated
|
21
21
|
*/
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@promptbook/node",
|
3
|
-
"version": "0.101.0-
|
3
|
+
"version": "0.101.0-15",
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
5
5
|
"private": false,
|
6
6
|
"sideEffects": false,
|
@@ -93,7 +93,7 @@
|
|
93
93
|
"module": "./esm/index.es.js",
|
94
94
|
"typings": "./esm/typings/src/_packages/node.index.d.ts",
|
95
95
|
"peerDependencies": {
|
96
|
-
"@promptbook/core": "0.101.0-
|
96
|
+
"@promptbook/core": "0.101.0-15"
|
97
97
|
},
|
98
98
|
"dependencies": {
|
99
99
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
@@ -45,7 +45,7 @@
|
|
45
45
|
* @generated
|
46
46
|
* @see https://github.com/webgptorg/promptbook
|
47
47
|
*/
|
48
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
48
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
|
49
49
|
/**
|
50
50
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
51
51
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|