@promptbook/legacy-documents 0.92.0-30 → 0.92.0-32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +6 -12
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/executables/locateApp.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/20-executeTask.d.ts +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +6 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +6 -12
- package/umd/index.umd.js.map +1 -1
|
@@ -3,7 +3,7 @@ import type { string_executable_path } from '../types/typeAliases';
|
|
|
3
3
|
/**
|
|
4
4
|
* Options for locating any application
|
|
5
5
|
*/
|
|
6
|
-
export
|
|
6
|
+
export type LocateAppOptions = {
|
|
7
7
|
/**
|
|
8
8
|
* Name of the application
|
|
9
9
|
*/
|
|
@@ -20,7 +20,7 @@ export interface LocateAppOptions {
|
|
|
20
20
|
* Name of the application on macOS
|
|
21
21
|
*/
|
|
22
22
|
macOsName?: string;
|
|
23
|
-
}
|
|
23
|
+
};
|
|
24
24
|
/**
|
|
25
25
|
* Locates an application on the system
|
|
26
26
|
*
|
|
@@ -28,7 +28,7 @@ type ExecutePipelineOptions = Required<CreatePipelineExecutorOptions> & {
|
|
|
28
28
|
/**
|
|
29
29
|
* Callback to update the prepared pipeline reference after preparation.
|
|
30
30
|
*/
|
|
31
|
-
|
|
31
|
+
setPreparedPipeline(preparedPipeline: ReadonlyDeep<PipelineJson>): void;
|
|
32
32
|
/**
|
|
33
33
|
* String identifier for the pipeline, used in error messages and reporting.
|
|
34
34
|
*/
|
|
@@ -26,7 +26,7 @@ type executeSingleTaskOptions = Required<CreatePipelineExecutorOptions> & {
|
|
|
26
26
|
/**
|
|
27
27
|
* Callback invoked with partial results as the execution progresses.
|
|
28
28
|
*/
|
|
29
|
-
|
|
29
|
+
onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
|
|
30
30
|
/**
|
|
31
31
|
* Mutable execution report object for tracking execution details.
|
|
32
32
|
*/
|
|
@@ -12,7 +12,7 @@ type ExecuteFormatCellsOptions = ExecuteAttemptsOptions & {
|
|
|
12
12
|
/**
|
|
13
13
|
* Callback invoked with partial results as the execution progresses.
|
|
14
14
|
*/
|
|
15
|
-
|
|
15
|
+
onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
|
|
16
16
|
};
|
|
17
17
|
/**
|
|
18
18
|
* Executes a pipeline task that requires mapping or iterating over subvalues of a parameter (such as rows in a CSV).
|
|
@@ -2,6 +2,7 @@ import Anthropic from '@anthropic-ai/sdk';
|
|
|
2
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
4
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
|
+
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
5
6
|
import type { Prompt } from '../../types/Prompt';
|
|
6
7
|
import type { string_markdown } from '../../types/typeAliases';
|
|
7
8
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
@@ -19,6 +20,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
19
20
|
* Anthropic Claude API client.
|
|
20
21
|
*/
|
|
21
22
|
private client;
|
|
23
|
+
private limiter;
|
|
22
24
|
/**
|
|
23
25
|
* Creates Anthropic Claude Execution Tools.
|
|
24
26
|
*
|
|
@@ -40,6 +42,10 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
40
42
|
* Calls Anthropic Claude API to use a chat model.
|
|
41
43
|
*/
|
|
42
44
|
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
|
|
45
|
+
/**
|
|
46
|
+
* Calls Anthropic Claude API to use a completion model.
|
|
47
|
+
*/
|
|
48
|
+
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
|
43
49
|
/**
|
|
44
50
|
* Get the model that should be used as default
|
|
45
51
|
*/
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.92.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.92.0-31`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/legacy-documents",
|
|
3
|
-
"version": "0.92.0-
|
|
3
|
+
"version": "0.92.0-32",
|
|
4
4
|
"description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -51,7 +51,7 @@
|
|
|
51
51
|
"module": "./esm/index.es.js",
|
|
52
52
|
"typings": "./esm/typings/src/_packages/legacy-documents.index.d.ts",
|
|
53
53
|
"peerDependencies": {
|
|
54
|
-
"@promptbook/core": "0.92.0-
|
|
54
|
+
"@promptbook/core": "0.92.0-32"
|
|
55
55
|
},
|
|
56
56
|
"dependencies": {
|
|
57
57
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -26,7 +26,7 @@
|
|
|
26
26
|
* @generated
|
|
27
27
|
* @see https://github.com/webgptorg/promptbook
|
|
28
28
|
*/
|
|
29
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-
|
|
29
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-32';
|
|
30
30
|
/**
|
|
31
31
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
32
32
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -3002,23 +3002,17 @@
|
|
|
3002
3002
|
* Check the configuration of all execution tools
|
|
3003
3003
|
*/
|
|
3004
3004
|
async checkConfiguration() {
|
|
3005
|
-
//
|
|
3006
|
-
|
|
3007
|
-
await llmExecutionTools.checkConfiguration();
|
|
3008
|
-
}
|
|
3005
|
+
// Note: Run checks in parallel
|
|
3006
|
+
await Promise.all(this.llmExecutionTools.map((tools) => tools.checkConfiguration()));
|
|
3009
3007
|
}
|
|
3010
3008
|
/**
|
|
3011
3009
|
* List all available models that can be used
|
|
3012
3010
|
* This lists is a combination of all available models from all execution tools
|
|
3013
3011
|
*/
|
|
3014
3012
|
async listModels() {
|
|
3015
|
-
|
|
3016
|
-
|
|
3017
|
-
|
|
3018
|
-
const models = await llmExecutionTools.listModels();
|
|
3019
|
-
availableModels.push(...models);
|
|
3020
|
-
}
|
|
3021
|
-
return availableModels;
|
|
3013
|
+
// Obtain all models in parallel and flatten
|
|
3014
|
+
const modelArrays = await Promise.all(this.llmExecutionTools.map((tools) => tools.listModels()));
|
|
3015
|
+
return modelArrays.flat();
|
|
3022
3016
|
}
|
|
3023
3017
|
/**
|
|
3024
3018
|
* Calls the best available chat model
|