@promptbook/markdown-utils 0.98.0-6 → 0.98.0-9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +85 -45
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -2
- package/esm/typings/src/_packages/openai.index.d.ts +4 -0
- package/esm/typings/src/_packages/types.index.d.ts +10 -2
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +1 -0
- package/esm/typings/src/execution/utils/validatePromptResult.d.ts +53 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +4 -4
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionToolsOptions.d.ts +42 -1
- package/esm/typings/src/llm-providers/openai/createOpenAiCompatibleExecutionTools.d.ts +58 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +85 -45
- package/umd/index.umd.js.map +1 -1
|
@@ -2,7 +2,7 @@ import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
|
|
|
2
2
|
import { ANTHROPIC_CLAUDE_MODELS } from '../llm-providers/anthropic-claude/anthropic-claude-models';
|
|
3
3
|
import { AnthropicClaudeExecutionTools } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionTools';
|
|
4
4
|
import type { AnthropicClaudeExecutionToolsOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
|
|
5
|
-
import type {
|
|
5
|
+
import type { AnthropicClaudeExecutionToolsNonProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
|
|
6
6
|
import type { AnthropicClaudeExecutionToolsProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
|
|
7
7
|
import { createAnthropicClaudeExecutionTools } from '../llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools';
|
|
8
8
|
import { _AnthropicClaudeRegistration } from '../llm-providers/anthropic-claude/register-constructor';
|
|
@@ -10,7 +10,7 @@ export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
|
|
|
10
10
|
export { ANTHROPIC_CLAUDE_MODELS };
|
|
11
11
|
export { AnthropicClaudeExecutionTools };
|
|
12
12
|
export type { AnthropicClaudeExecutionToolsOptions };
|
|
13
|
-
export type {
|
|
13
|
+
export type { AnthropicClaudeExecutionToolsNonProxiedOptions };
|
|
14
14
|
export type { AnthropicClaudeExecutionToolsProxiedOptions };
|
|
15
15
|
export { createAnthropicClaudeExecutionTools };
|
|
16
16
|
export { _AnthropicClaudeRegistration };
|
|
@@ -7,6 +7,8 @@ import { OpenAiAssistantExecutionTools } from '../llm-providers/openai/OpenAiAss
|
|
|
7
7
|
import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
|
|
8
8
|
import { OpenAiCompatibleExecutionTools } from '../llm-providers/openai/OpenAiCompatibleExecutionTools';
|
|
9
9
|
import type { OpenAiCompatibleExecutionToolsOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
10
|
+
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
11
|
+
import type { OpenAiCompatibleExecutionToolsProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
10
12
|
import { OpenAiExecutionTools } from '../llm-providers/openai/OpenAiExecutionTools';
|
|
11
13
|
import type { OpenAiExecutionToolsOptions } from '../llm-providers/openai/OpenAiExecutionToolsOptions';
|
|
12
14
|
import { _OpenAiRegistration } from '../llm-providers/openai/register-constructor';
|
|
@@ -21,6 +23,8 @@ export { OpenAiAssistantExecutionTools };
|
|
|
21
23
|
export type { OpenAiAssistantExecutionToolsOptions };
|
|
22
24
|
export { OpenAiCompatibleExecutionTools };
|
|
23
25
|
export type { OpenAiCompatibleExecutionToolsOptions };
|
|
26
|
+
export type { OpenAiCompatibleExecutionToolsNonProxiedOptions };
|
|
27
|
+
export type { OpenAiCompatibleExecutionToolsProxiedOptions };
|
|
24
28
|
export { OpenAiExecutionTools };
|
|
25
29
|
export type { OpenAiExecutionToolsOptions };
|
|
26
30
|
export { _OpenAiRegistration };
|
|
@@ -61,6 +61,8 @@ import type { Usage } from '../execution/Usage';
|
|
|
61
61
|
import type { UsageCounts } from '../execution/Usage';
|
|
62
62
|
import type { UserInterfaceTools } from '../execution/UserInterfaceTools';
|
|
63
63
|
import type { UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
|
|
64
|
+
import type { ValidatePromptResultOptions } from '../execution/utils/validatePromptResult';
|
|
65
|
+
import type { ValidatePromptResultResult } from '../execution/utils/validatePromptResult';
|
|
64
66
|
import type { FormatSubvalueParser } from '../formats/_common/FormatSubvalueParser';
|
|
65
67
|
import type { FormatSubvalueParserMapValuesOptions } from '../formats/_common/FormatSubvalueParser';
|
|
66
68
|
import type { CsvSettings } from '../formats/csv/CsvSettings';
|
|
@@ -74,7 +76,7 @@ import type { CacheItem } from '../llm-providers/_common/utils/cache/CacheItem';
|
|
|
74
76
|
import type { CacheLlmToolsOptions } from '../llm-providers/_common/utils/cache/CacheLlmToolsOptions';
|
|
75
77
|
import type { LlmExecutionToolsWithTotalUsage } from '../llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
76
78
|
import type { AnthropicClaudeExecutionToolsOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
|
|
77
|
-
import type {
|
|
79
|
+
import type { AnthropicClaudeExecutionToolsNonProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
|
|
78
80
|
import type { AnthropicClaudeExecutionToolsProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
|
|
79
81
|
import type { AzureOpenAiExecutionToolsOptions } from '../llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions';
|
|
80
82
|
import type { DeepseekExecutionToolsOptions } from '../llm-providers/deepseek/DeepseekExecutionToolsOptions';
|
|
@@ -82,6 +84,8 @@ import type { GoogleExecutionToolsOptions } from '../llm-providers/google/Google
|
|
|
82
84
|
import type { OllamaExecutionToolsOptions } from '../llm-providers/ollama/OllamaExecutionToolsOptions';
|
|
83
85
|
import type { OpenAiAssistantExecutionToolsOptions } from '../llm-providers/openai/OpenAiAssistantExecutionToolsOptions';
|
|
84
86
|
import type { OpenAiCompatibleExecutionToolsOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
87
|
+
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
88
|
+
import type { OpenAiCompatibleExecutionToolsProxiedOptions } from '../llm-providers/openai/OpenAiCompatibleExecutionToolsOptions';
|
|
85
89
|
import type { OpenAiExecutionToolsOptions } from '../llm-providers/openai/OpenAiExecutionToolsOptions';
|
|
86
90
|
import type { VercelExecutionToolsOptions } from '../llm-providers/vercel/VercelExecutionToolsOptions';
|
|
87
91
|
import type { VercelProvider } from '../llm-providers/vercel/VercelProvider';
|
|
@@ -361,6 +365,8 @@ export type { Usage };
|
|
|
361
365
|
export type { UsageCounts };
|
|
362
366
|
export type { UserInterfaceTools };
|
|
363
367
|
export type { UserInterfaceToolsPromptDialogOptions };
|
|
368
|
+
export type { ValidatePromptResultOptions };
|
|
369
|
+
export type { ValidatePromptResultResult };
|
|
364
370
|
export type { FormatSubvalueParser };
|
|
365
371
|
export type { FormatSubvalueParserMapValuesOptions };
|
|
366
372
|
export type { CsvSettings };
|
|
@@ -374,7 +380,7 @@ export type { CacheItem };
|
|
|
374
380
|
export type { CacheLlmToolsOptions };
|
|
375
381
|
export type { LlmExecutionToolsWithTotalUsage };
|
|
376
382
|
export type { AnthropicClaudeExecutionToolsOptions };
|
|
377
|
-
export type {
|
|
383
|
+
export type { AnthropicClaudeExecutionToolsNonProxiedOptions };
|
|
378
384
|
export type { AnthropicClaudeExecutionToolsProxiedOptions };
|
|
379
385
|
export type { AzureOpenAiExecutionToolsOptions };
|
|
380
386
|
export type { DeepseekExecutionToolsOptions };
|
|
@@ -382,6 +388,8 @@ export type { GoogleExecutionToolsOptions };
|
|
|
382
388
|
export type { OllamaExecutionToolsOptions };
|
|
383
389
|
export type { OpenAiAssistantExecutionToolsOptions };
|
|
384
390
|
export type { OpenAiCompatibleExecutionToolsOptions };
|
|
391
|
+
export type { OpenAiCompatibleExecutionToolsNonProxiedOptions };
|
|
392
|
+
export type { OpenAiCompatibleExecutionToolsProxiedOptions };
|
|
385
393
|
export type { OpenAiExecutionToolsOptions };
|
|
386
394
|
export type { VercelExecutionToolsOptions };
|
|
387
395
|
export type { VercelProvider };
|
|
@@ -176,7 +176,7 @@ export declare const DEFAULT_MAX_PARALLEL_COUNT = 5;
|
|
|
176
176
|
*
|
|
177
177
|
* @public exported from `@promptbook/core`
|
|
178
178
|
*/
|
|
179
|
-
export declare const DEFAULT_MAX_EXECUTION_ATTEMPTS =
|
|
179
|
+
export declare const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7;
|
|
180
180
|
/**
|
|
181
181
|
* The maximum depth to which knowledge sources will be scraped when building a knowledge base.
|
|
182
182
|
* This prevents infinite recursion and limits resource usage.
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import type { FormatCommand } from '../../commands/FORMAT/FormatCommand';
|
|
2
|
+
import { ExpectError } from '../../errors/ExpectError';
|
|
3
|
+
import type { Expectations } from '../../pipeline/PipelineJson/Expectations';
|
|
4
|
+
import type { string_postprocessing_function_name } from '../../types/typeAliases';
|
|
5
|
+
/**
|
|
6
|
+
* Options for validating a prompt result
|
|
7
|
+
*/
|
|
8
|
+
export interface ValidatePromptResultOptions {
|
|
9
|
+
/**
|
|
10
|
+
* The result string to validate
|
|
11
|
+
*/
|
|
12
|
+
resultString: string;
|
|
13
|
+
/**
|
|
14
|
+
* Expectations for the result (word count, sentence count, etc.)
|
|
15
|
+
*/
|
|
16
|
+
expectations?: Expectations;
|
|
17
|
+
/**
|
|
18
|
+
* Expected format of the result (e.g., 'JSON')
|
|
19
|
+
*/
|
|
20
|
+
format?: FormatCommand['format'];
|
|
21
|
+
/**
|
|
22
|
+
* List of postprocessing function names that should be applied
|
|
23
|
+
* Note: This is for validation purposes only - postprocessing should be done before calling this function
|
|
24
|
+
*/
|
|
25
|
+
postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>;
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Result of prompt result validation
|
|
29
|
+
*/
|
|
30
|
+
export interface ValidatePromptResultResult {
|
|
31
|
+
/**
|
|
32
|
+
* Whether the result is valid (passes all expectations and format checks)
|
|
33
|
+
*/
|
|
34
|
+
isValid: boolean;
|
|
35
|
+
/**
|
|
36
|
+
* The processed result string (may be modified if format extraction was needed)
|
|
37
|
+
*/
|
|
38
|
+
processedResultString: string;
|
|
39
|
+
/**
|
|
40
|
+
* Error that occurred during validation, if any
|
|
41
|
+
*/
|
|
42
|
+
error?: ExpectError;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Validates a prompt result against expectations and format requirements.
|
|
46
|
+
* This function provides a common abstraction for result validation that can be used
|
|
47
|
+
* by both execution logic and caching logic to ensure consistency.
|
|
48
|
+
*
|
|
49
|
+
* @param options - The validation options including result string, expectations, and format
|
|
50
|
+
* @returns Validation result with processed string and validity status
|
|
51
|
+
* @private internal function of `createPipelineExecutor` and `cacheLlmTools`
|
|
52
|
+
*/
|
|
53
|
+
export declare function validatePromptResult(options: ValidatePromptResultOptions): ValidatePromptResultResult;
|
|
@@ -7,7 +7,7 @@ import type { Prompt } from '../../types/Prompt';
|
|
|
7
7
|
import type { string_markdown } from '../../types/typeAliases';
|
|
8
8
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
9
9
|
import type { string_title } from '../../types/typeAliases';
|
|
10
|
-
import type {
|
|
10
|
+
import type { AnthropicClaudeExecutionToolsNonProxiedOptions } from './AnthropicClaudeExecutionToolsOptions';
|
|
11
11
|
/**
|
|
12
12
|
* Execution Tools for calling Anthropic Claude API.
|
|
13
13
|
*
|
|
@@ -15,7 +15,7 @@ import type { AnthropicClaudeExecutionToolsDirectOptions } from './AnthropicClau
|
|
|
15
15
|
* @deprecated use `createAnthropicClaudeExecutionTools` instead
|
|
16
16
|
*/
|
|
17
17
|
export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools {
|
|
18
|
-
protected readonly options:
|
|
18
|
+
protected readonly options: AnthropicClaudeExecutionToolsNonProxiedOptions;
|
|
19
19
|
/**
|
|
20
20
|
* Anthropic Claude API client.
|
|
21
21
|
*/
|
|
@@ -26,7 +26,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
26
26
|
*
|
|
27
27
|
* @param options which are relevant are directly passed to the Anthropic Claude client
|
|
28
28
|
*/
|
|
29
|
-
constructor(options?:
|
|
29
|
+
constructor(options?: AnthropicClaudeExecutionToolsNonProxiedOptions);
|
|
30
30
|
get title(): string_title & string_markdown_text;
|
|
31
31
|
get description(): string_markdown;
|
|
32
32
|
getClient(): Promise<Anthropic>;
|
package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts
CHANGED
|
@@ -7,14 +7,14 @@ import type { RemoteClientOptions } from '../../remote-server/types/RemoteClient
|
|
|
7
7
|
* This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
|
|
8
8
|
* @public exported from `@promptbook/anthropic-claude`
|
|
9
9
|
*/
|
|
10
|
-
export type AnthropicClaudeExecutionToolsOptions =
|
|
10
|
+
export type AnthropicClaudeExecutionToolsOptions = AnthropicClaudeExecutionToolsNonProxiedOptions | AnthropicClaudeExecutionToolsProxiedOptions;
|
|
11
11
|
/**
|
|
12
12
|
* Options for directly used `AnthropicClaudeExecutionTools`
|
|
13
13
|
*
|
|
14
14
|
* This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
|
|
15
15
|
* @public exported from `@promptbook/anthropic-claude`
|
|
16
16
|
*/
|
|
17
|
-
export type
|
|
17
|
+
export type AnthropicClaudeExecutionToolsNonProxiedOptions = CommonToolsOptions & ClientOptions & {
|
|
18
18
|
isProxied?: false;
|
|
19
19
|
};
|
|
20
20
|
/**
|
|
@@ -11,14 +11,14 @@ import type { string_markdown_text } from '../../types/typeAliases';
|
|
|
11
11
|
import type { string_model_name } from '../../types/typeAliases';
|
|
12
12
|
import type { string_title } from '../../types/typeAliases';
|
|
13
13
|
import { computeOpenAiUsage } from './computeOpenAiUsage';
|
|
14
|
-
import type {
|
|
14
|
+
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
|
|
15
15
|
/**
|
|
16
|
-
* Execution Tools for calling OpenAI API or other
|
|
16
|
+
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
17
17
|
*
|
|
18
18
|
* @public exported from `@promptbook/openai`
|
|
19
19
|
*/
|
|
20
20
|
export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecutionTools {
|
|
21
|
-
protected readonly options:
|
|
21
|
+
protected readonly options: OpenAiCompatibleExecutionToolsNonProxiedOptions;
|
|
22
22
|
/**
|
|
23
23
|
* OpenAI API client.
|
|
24
24
|
*/
|
|
@@ -32,7 +32,7 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
|
|
|
32
32
|
*
|
|
33
33
|
* @param options which are relevant are directly passed to the OpenAI compatible client
|
|
34
34
|
*/
|
|
35
|
-
constructor(options:
|
|
35
|
+
constructor(options: OpenAiCompatibleExecutionToolsNonProxiedOptions);
|
|
36
36
|
abstract get title(): string_title & string_markdown_text;
|
|
37
37
|
abstract get description(): string_markdown;
|
|
38
38
|
getClient(): Promise<OpenAI>;
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { ClientOptions } from 'openai';
|
|
2
2
|
import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
|
3
|
+
import type { RemoteClientOptions } from '../../remote-server/types/RemoteClientOptions';
|
|
3
4
|
/**
|
|
4
5
|
* Options for `createOpenAiCompatibleExecutionTools` and `OpenAiCompatibleExecutionTools`
|
|
5
6
|
*
|
|
@@ -8,4 +9,44 @@ import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
|
|
|
8
9
|
*
|
|
9
10
|
* @public exported from `@promptbook/openai`
|
|
10
11
|
*/
|
|
11
|
-
export type OpenAiCompatibleExecutionToolsOptions =
|
|
12
|
+
export type OpenAiCompatibleExecutionToolsOptions = OpenAiCompatibleExecutionToolsNonProxiedOptions | OpenAiCompatibleExecutionToolsProxiedOptions;
|
|
13
|
+
/**
|
|
14
|
+
* Options for directly used `OpenAiCompatibleExecutionTools`
|
|
15
|
+
*
|
|
16
|
+
* This extends OpenAI's `ClientOptions` with are directly passed to the OpenAI client.
|
|
17
|
+
* @public exported from `@promptbook/openai`
|
|
18
|
+
*/
|
|
19
|
+
export type OpenAiCompatibleExecutionToolsNonProxiedOptions = CommonToolsOptions & ClientOptions & {
|
|
20
|
+
/**
|
|
21
|
+
* Base URL for the OpenAI-compatible API endpoint
|
|
22
|
+
*
|
|
23
|
+
* This allows connecting to any OpenAI-compatible LLM service by specifying their API endpoint.
|
|
24
|
+
*
|
|
25
|
+
* @example 'https://https://promptbook.s5.ptbk.io/' (Promptbook)
|
|
26
|
+
* @example 'https://api.openai.com/v1' (OpenAI)
|
|
27
|
+
* @example 'http://localhost:11434/v1' (Ollama)
|
|
28
|
+
* @example 'https://api.deepseek.com/v1' (DeepSeek)
|
|
29
|
+
*/
|
|
30
|
+
baseURL?: string;
|
|
31
|
+
isProxied?: false;
|
|
32
|
+
};
|
|
33
|
+
/**
|
|
34
|
+
* Options for proxied `OpenAiCompatibleExecutionTools`
|
|
35
|
+
*
|
|
36
|
+
* This extends OpenAI's `ClientOptions` with are directly passed to the OpenAI client.
|
|
37
|
+
* @public exported from `@promptbook/openai`
|
|
38
|
+
*/
|
|
39
|
+
export type OpenAiCompatibleExecutionToolsProxiedOptions = CommonToolsOptions & ClientOptions & {
|
|
40
|
+
/**
|
|
41
|
+
* Base URL for the OpenAI-compatible API endpoint
|
|
42
|
+
*
|
|
43
|
+
* This allows connecting to any OpenAI-compatible LLM service by specifying their API endpoint.
|
|
44
|
+
*
|
|
45
|
+
* @example 'https://https://promptbook.s5.ptbk.io/' (Promptbook)
|
|
46
|
+
* @example 'https://api.openai.com/v1' (OpenAI)
|
|
47
|
+
* @example 'http://localhost:11434/v1' (Ollama)
|
|
48
|
+
* @example 'https://api.deepseek.com/v1' (DeepSeek)
|
|
49
|
+
*/
|
|
50
|
+
baseURL?: string;
|
|
51
|
+
isProxied: true;
|
|
52
|
+
} & Pick<RemoteClientOptions<undefined>, 'remoteServerUrl'>;
|
|
@@ -1,4 +1,14 @@
|
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
|
+
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
+
import type { Usage } from '../../execution/Usage';
|
|
4
|
+
import type { string_markdown } from '../../types/typeAliases';
|
|
5
|
+
import type { string_markdown_text } from '../../types/typeAliases';
|
|
6
|
+
import type { string_model_name } from '../../types/typeAliases';
|
|
7
|
+
import type { string_title } from '../../types/typeAliases';
|
|
8
|
+
import { RemoteLlmExecutionTools } from '../remote/RemoteLlmExecutionTools';
|
|
9
|
+
import { computeOpenAiUsage } from './computeOpenAiUsage';
|
|
1
10
|
import { OpenAiCompatibleExecutionTools } from './OpenAiCompatibleExecutionTools';
|
|
11
|
+
import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
|
|
2
12
|
import type { OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleExecutionToolsOptions';
|
|
3
13
|
/**
|
|
4
14
|
* Execution Tools for calling OpenAI compatible API
|
|
@@ -7,10 +17,57 @@ import type { OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleEx
|
|
|
7
17
|
*
|
|
8
18
|
* @public exported from `@promptbook/openai`
|
|
9
19
|
*/
|
|
10
|
-
export declare const createOpenAiCompatibleExecutionTools: ((options: OpenAiCompatibleExecutionToolsOptions
|
|
20
|
+
export declare const createOpenAiCompatibleExecutionTools: ((options: OpenAiCompatibleExecutionToolsOptions & {
|
|
21
|
+
/**
|
|
22
|
+
* The model name to use for all operations
|
|
23
|
+
*
|
|
24
|
+
* This will be the only model available through this LLM provider and it will be a chat model.
|
|
25
|
+
* Other variants won't be available for now.
|
|
26
|
+
*/
|
|
27
|
+
defaultModelName: string_model_name;
|
|
28
|
+
}) => OpenAiCompatibleExecutionTools | RemoteLlmExecutionTools) & {
|
|
11
29
|
packageName: string;
|
|
12
30
|
className: string;
|
|
13
31
|
};
|
|
32
|
+
/**
|
|
33
|
+
* Execution Tools for calling ONE SPECIFIC PRECONFIGURED OpenAI compatible provider
|
|
34
|
+
*
|
|
35
|
+
* @private for `createOpenAiCompatibleExecutionTools`
|
|
36
|
+
*/
|
|
37
|
+
export declare class HardcodedOpenAiCompatibleExecutionTools extends OpenAiCompatibleExecutionTools implements LlmExecutionTools {
|
|
38
|
+
private readonly defaultModelName;
|
|
39
|
+
protected readonly options: OpenAiCompatibleExecutionToolsNonProxiedOptions;
|
|
40
|
+
/**
|
|
41
|
+
* Creates OpenAI compatible Execution Tools.
|
|
42
|
+
*
|
|
43
|
+
* @param options which are relevant are directly passed to the OpenAI compatible client
|
|
44
|
+
*/
|
|
45
|
+
constructor(defaultModelName: string_model_name, options: OpenAiCompatibleExecutionToolsNonProxiedOptions);
|
|
46
|
+
get title(): string_title & string_markdown_text;
|
|
47
|
+
get description(): string_markdown;
|
|
48
|
+
/**
|
|
49
|
+
* List all available models (non dynamically)
|
|
50
|
+
*
|
|
51
|
+
* Note: Purpose of this is to provide more information about models than standard listing from API
|
|
52
|
+
*/
|
|
53
|
+
protected get HARDCODED_MODELS(): ReadonlyArray<AvailableModel>;
|
|
54
|
+
/**
|
|
55
|
+
* Computes the usage
|
|
56
|
+
*/
|
|
57
|
+
protected computeUsage(...args: Parameters<typeof computeOpenAiUsage>): Usage;
|
|
58
|
+
/**
|
|
59
|
+
* Default model for chat variant.
|
|
60
|
+
*/
|
|
61
|
+
protected getDefaultChatModel(): AvailableModel;
|
|
62
|
+
/**
|
|
63
|
+
* Default model for completion variant.
|
|
64
|
+
*/
|
|
65
|
+
protected getDefaultCompletionModel(): AvailableModel;
|
|
66
|
+
/**
|
|
67
|
+
* Default model for completion variant.
|
|
68
|
+
*/
|
|
69
|
+
protected getDefaultEmbeddingModel(): AvailableModel;
|
|
70
|
+
}
|
|
14
71
|
/**
|
|
15
72
|
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
16
73
|
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.98.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.98.0-8`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
package/umd/index.umd.js
CHANGED
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-9';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -304,7 +304,7 @@
|
|
|
304
304
|
*
|
|
305
305
|
* @public exported from `@promptbook/core`
|
|
306
306
|
*/
|
|
307
|
-
const DEFAULT_MAX_EXECUTION_ATTEMPTS =
|
|
307
|
+
const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7; // <- TODO: [🤹♂️]
|
|
308
308
|
// <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
|
|
309
309
|
// TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
|
|
310
310
|
/**
|
|
@@ -2087,7 +2087,7 @@
|
|
|
2087
2087
|
throw new Error(spaceTrim__default["default"]((block) => `
|
|
2088
2088
|
${block(error.message)}
|
|
2089
2089
|
|
|
2090
|
-
The JSON text:
|
|
2090
|
+
The expected JSON text:
|
|
2091
2091
|
${block(value)}
|
|
2092
2092
|
`));
|
|
2093
2093
|
}
|
|
@@ -4792,6 +4792,68 @@
|
|
|
4792
4792
|
* Note: [💝] and [🤠] are interconnected together
|
|
4793
4793
|
*/
|
|
4794
4794
|
|
|
4795
|
+
/**
|
|
4796
|
+
* Validates a prompt result against expectations and format requirements.
|
|
4797
|
+
* This function provides a common abstraction for result validation that can be used
|
|
4798
|
+
* by both execution logic and caching logic to ensure consistency.
|
|
4799
|
+
*
|
|
4800
|
+
* @param options - The validation options including result string, expectations, and format
|
|
4801
|
+
* @returns Validation result with processed string and validity status
|
|
4802
|
+
* @private internal function of `createPipelineExecutor` and `cacheLlmTools`
|
|
4803
|
+
*/
|
|
4804
|
+
function validatePromptResult(options) {
|
|
4805
|
+
const { resultString, expectations, format } = options;
|
|
4806
|
+
let processedResultString = resultString;
|
|
4807
|
+
let validationError;
|
|
4808
|
+
try {
|
|
4809
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
4810
|
+
if (format) {
|
|
4811
|
+
if (format === 'JSON') {
|
|
4812
|
+
if (!isValidJsonString(processedResultString)) {
|
|
4813
|
+
// TODO: [🏢] Do more universally via `FormatParser`
|
|
4814
|
+
try {
|
|
4815
|
+
processedResultString = extractJsonBlock(processedResultString);
|
|
4816
|
+
}
|
|
4817
|
+
catch (error) {
|
|
4818
|
+
keepUnused(error);
|
|
4819
|
+
throw new ExpectError(spaceTrim.spaceTrim((block) => `
|
|
4820
|
+
Expected valid JSON string
|
|
4821
|
+
|
|
4822
|
+
The expected JSON text:
|
|
4823
|
+
${block(processedResultString)}
|
|
4824
|
+
`));
|
|
4825
|
+
}
|
|
4826
|
+
}
|
|
4827
|
+
}
|
|
4828
|
+
else {
|
|
4829
|
+
throw new UnexpectedError(`Unknown format "${format}"`);
|
|
4830
|
+
}
|
|
4831
|
+
}
|
|
4832
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
4833
|
+
if (expectations) {
|
|
4834
|
+
checkExpectations(expectations, processedResultString);
|
|
4835
|
+
}
|
|
4836
|
+
return {
|
|
4837
|
+
isValid: true,
|
|
4838
|
+
processedResultString,
|
|
4839
|
+
};
|
|
4840
|
+
}
|
|
4841
|
+
catch (error) {
|
|
4842
|
+
if (error instanceof ExpectError) {
|
|
4843
|
+
validationError = error;
|
|
4844
|
+
}
|
|
4845
|
+
else {
|
|
4846
|
+
// Re-throw non-ExpectError errors (like UnexpectedError)
|
|
4847
|
+
throw error;
|
|
4848
|
+
}
|
|
4849
|
+
return {
|
|
4850
|
+
isValid: false,
|
|
4851
|
+
processedResultString,
|
|
4852
|
+
error: validationError,
|
|
4853
|
+
};
|
|
4854
|
+
}
|
|
4855
|
+
}
|
|
4856
|
+
|
|
4795
4857
|
/**
|
|
4796
4858
|
* Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
|
|
4797
4859
|
* (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
|
|
@@ -4814,13 +4876,13 @@
|
|
|
4814
4876
|
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
4815
4877
|
const _llms = arrayableToArray(tools.llm);
|
|
4816
4878
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4817
|
-
attempts: for (let
|
|
4818
|
-
const isJokerAttempt =
|
|
4819
|
-
const jokerParameterName = jokerParameterNames[jokerParameterNames.length +
|
|
4879
|
+
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
4880
|
+
const isJokerAttempt = attemptIndex < 0;
|
|
4881
|
+
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
4820
4882
|
// TODO: [🧠][🍭] JOKERS, EXPECTATIONS, POSTPROCESSING and FOREACH
|
|
4821
4883
|
if (isJokerAttempt && !jokerParameterName) {
|
|
4822
4884
|
throw new UnexpectedError(spaceTrim.spaceTrim((block) => `
|
|
4823
|
-
Joker not found in attempt ${
|
|
4885
|
+
Joker not found in attempt ${attemptIndex}
|
|
4824
4886
|
|
|
4825
4887
|
${block(pipelineIdentification)}
|
|
4826
4888
|
`));
|
|
@@ -5018,35 +5080,18 @@
|
|
|
5018
5080
|
}
|
|
5019
5081
|
}
|
|
5020
5082
|
// TODO: [💝] Unite object for expecting amount and format
|
|
5021
|
-
|
|
5022
|
-
|
|
5023
|
-
|
|
5024
|
-
|
|
5025
|
-
|
|
5026
|
-
|
|
5027
|
-
|
|
5028
|
-
|
|
5029
|
-
|
|
5030
|
-
throw new ExpectError(spaceTrim.spaceTrim((block) => `
|
|
5031
|
-
Expected valid JSON string
|
|
5032
|
-
|
|
5033
|
-
${block(
|
|
5034
|
-
/*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ '')}
|
|
5035
|
-
`));
|
|
5036
|
-
}
|
|
5037
|
-
}
|
|
5038
|
-
}
|
|
5039
|
-
else {
|
|
5040
|
-
throw new UnexpectedError(spaceTrim.spaceTrim((block) => `
|
|
5041
|
-
Unknown format "${task.format}"
|
|
5042
|
-
|
|
5043
|
-
${block(pipelineIdentification)}
|
|
5044
|
-
`));
|
|
5083
|
+
// Use the common validation function for both format and expectations
|
|
5084
|
+
if (task.format || task.expectations) {
|
|
5085
|
+
const validationResult = validatePromptResult({
|
|
5086
|
+
resultString: $ongoingTaskResult.$resultString || '',
|
|
5087
|
+
expectations: task.expectations,
|
|
5088
|
+
format: task.format,
|
|
5089
|
+
});
|
|
5090
|
+
if (!validationResult.isValid) {
|
|
5091
|
+
throw validationResult.error;
|
|
5045
5092
|
}
|
|
5046
|
-
|
|
5047
|
-
|
|
5048
|
-
if (task.expectations) {
|
|
5049
|
-
checkExpectations(task.expectations, $ongoingTaskResult.$resultString || '');
|
|
5093
|
+
// Update the result string in case format processing modified it (e.g., JSON extraction)
|
|
5094
|
+
$ongoingTaskResult.$resultString = validationResult.processedResultString;
|
|
5050
5095
|
}
|
|
5051
5096
|
break attempts;
|
|
5052
5097
|
}
|
|
@@ -5060,6 +5105,7 @@
|
|
|
5060
5105
|
$ongoingTaskResult.$failedResults = [];
|
|
5061
5106
|
}
|
|
5062
5107
|
$ongoingTaskResult.$failedResults.push({
|
|
5108
|
+
attemptIndex,
|
|
5063
5109
|
result: $ongoingTaskResult.$resultString,
|
|
5064
5110
|
error: error,
|
|
5065
5111
|
});
|
|
@@ -5084,19 +5130,13 @@
|
|
|
5084
5130
|
});
|
|
5085
5131
|
}
|
|
5086
5132
|
}
|
|
5087
|
-
if ($ongoingTaskResult.$expectError !== null &&
|
|
5088
|
-
//
|
|
5089
|
-
$ongoingTaskResult.$failedResults = $ongoingTaskResult.$failedResults || [];
|
|
5090
|
-
$ongoingTaskResult.$failedResults.push({
|
|
5091
|
-
result: $ongoingTaskResult.$resultString,
|
|
5092
|
-
error: $ongoingTaskResult.$expectError,
|
|
5093
|
-
});
|
|
5094
|
-
// Create a summary of all failures
|
|
5133
|
+
if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
|
|
5134
|
+
// Note: Create a summary of all failures
|
|
5095
5135
|
const failuresSummary = $ongoingTaskResult.$failedResults
|
|
5096
|
-
.map((failure
|
|
5136
|
+
.map((failure) => spaceTrim.spaceTrim((block) => {
|
|
5097
5137
|
var _a, _b;
|
|
5098
5138
|
return `
|
|
5099
|
-
Attempt ${
|
|
5139
|
+
Attempt ${failure.attemptIndex + 1}:
|
|
5100
5140
|
Error ${((_a = failure.error) === null || _a === void 0 ? void 0 : _a.name) || ''}:
|
|
5101
5141
|
${block((_b = failure.error) === null || _b === void 0 ? void 0 : _b.message.split('\n').map((line) => `> ${line}`).join('\n'))}
|
|
5102
5142
|
|