@promptbook/node 0.101.0-13 → 0.101.0-15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,5 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
+ import { LlmExecutionTools } from '../../execution/LlmExecutionTools';
1
3
  import type { string_model_name } from '../../types/typeAliases';
2
4
  import type { AgentModelRequirements } from './AgentModelRequirements';
3
5
  import type { string_book } from './string_book';
@@ -7,11 +9,11 @@ import type { string_book } from './string_book';
7
9
  *
8
10
  * There are 2 similar functions:
9
11
  * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
10
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
12
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
11
13
  *
12
14
  * @public exported from `@promptbook/core`
13
15
  */
14
- export declare function createAgentModelRequirements(agentSource: string_book, modelName?: string_model_name): Promise<AgentModelRequirements>;
16
+ export declare function createAgentModelRequirements(agentSource: string_book, modelName?: string_model_name, availableModels?: readonly AvailableModel[], llmTools?: LlmExecutionTools): Promise<AgentModelRequirements>;
15
17
  /**
16
18
  * Clears the cache for createAgentModelRequirements
17
19
  * Useful when agent sources are updated and cached results should be invalidated
@@ -0,0 +1,25 @@
1
+ import type { ModelRequirements } from '../../../types/ModelRequirements';
2
+ /**
3
+ * Parses an OpenAI error message to identify which parameter is unsupported
4
+ *
5
+ * @param errorMessage The error message from OpenAI API
6
+ * @returns The parameter name that is unsupported, or null if not an unsupported parameter error
7
+ * @private utility of LLM Tools
8
+ */
9
+ export declare function parseUnsupportedParameterError(errorMessage: string): string | null;
10
+ /**
11
+ * Creates a copy of model requirements with the specified parameter removed
12
+ *
13
+ * @param modelRequirements Original model requirements
14
+ * @param unsupportedParameter The parameter to remove
15
+ * @returns New model requirements without the unsupported parameter
16
+ * @private utility of LLM Tools
17
+ */
18
+ export declare function removeUnsupportedModelRequirement(modelRequirements: ModelRequirements, unsupportedParameter: string): ModelRequirements;
19
+ /**
20
+ * Checks if an error is an "Unsupported value" error from OpenAI
21
+ * @param error The error to check
22
+ * @returns true if this is an unsupported parameter error
23
+ * @private utility of LLM Tools
24
+ */
25
+ export declare function isUnsupportedParameterError(error: Error): boolean;
@@ -50,6 +50,5 @@ export declare class AgentLlmExecutionTools implements LlmExecutionTools {
50
50
  }
51
51
  /**
52
52
  * TODO: [🍚] Implement Destroyable pattern to free resources
53
- * TODO: !!!! Pick the best model from available models
54
53
  * TODO: !!!! adding parameter substitution support
55
54
  */
@@ -1,15 +1,10 @@
1
1
  import OpenAI from 'openai';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
7
5
  import type { Usage } from '../../execution/Usage';
8
6
  import type { Prompt } from '../../types/Prompt';
9
- import type { string_markdown } from '../../types/typeAliases';
10
- import type { string_markdown_text } from '../../types/typeAliases';
11
- import type { string_model_name } from '../../types/typeAliases';
12
- import type { string_title } from '../../types/typeAliases';
7
+ import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
13
8
  import { computeOpenAiUsage } from './computeOpenAiUsage';
14
9
  import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
15
10
  /**
@@ -27,6 +22,10 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
27
22
  * Rate limiter instance
28
23
  */
29
24
  private limiter;
25
+ /**
26
+ * Tracks models and parameters that have already been retried to prevent infinite loops
27
+ */
28
+ private retriedUnsupportedParameters;
30
29
  /**
31
30
  * Creates OpenAI compatible Execution Tools.
32
31
  *
@@ -48,10 +47,18 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
48
47
  * Calls OpenAI compatible API to use a chat model.
49
48
  */
50
49
  callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'format'>): Promise<ChatPromptResult>;
50
+ /**
51
+ * Internal method that handles parameter retry for chat model calls
52
+ */
53
+ private callChatModelWithRetry;
51
54
  /**
52
55
  * Calls OpenAI API to use a complete model.
53
56
  */
54
57
  callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
58
+ /**
59
+ * Internal method that handles parameter retry for completion model calls
60
+ */
61
+ private callCompletionModelWithRetry;
55
62
  /**
56
63
  * Calls OpenAI compatible API to use a embedding model
57
64
  */
@@ -85,7 +92,7 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
85
92
  /**
86
93
  * Makes a request with retry logic for network errors like ECONNRESET
87
94
  */
88
- private makeRequestWithRetry;
95
+ private makeRequestWithNetworkRetry;
89
96
  /**
90
97
  * Determines if an error is retryable (network-related errors)
91
98
  */
@@ -10,6 +10,7 @@ import type { string_persona_description } from '../types/typeAliases';
10
10
  */
11
11
  export declare function preparePersona(personaDescription: string_persona_description, tools: Pick<ExecutionTools, 'llm'>, options: PrepareAndScrapeOptions): Promise<Pick<PersonaPreparedJson, 'modelsRequirements'>>;
12
12
  /**
13
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
13
14
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
14
15
  * TODO: [🏢] Check validity of `modelName` in pipeline
15
16
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-12`).
18
+ * It follows semantic versioning (e.g., `0.101.0-14`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.101.0-13",
3
+ "version": "0.101.0-15",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -93,7 +93,7 @@
93
93
  "module": "./esm/index.es.js",
94
94
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
95
95
  "peerDependencies": {
96
- "@promptbook/core": "0.101.0-13"
96
+ "@promptbook/core": "0.101.0-15"
97
97
  },
98
98
  "dependencies": {
99
99
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -45,7 +45,7 @@
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-13';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -5369,6 +5369,7 @@
5369
5369
  };
5370
5370
  }
5371
5371
  /**
5372
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
5372
5373
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
5373
5374
  * TODO: [🏢] Check validity of `modelName` in pipeline
5374
5375
  * TODO: [🏢] Check validity of `systemMessage` in pipeline