@promptbook/node 0.92.0-30 → 0.92.0-32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import type { string_executable_path } from '../types/typeAliases';
3
3
  /**
4
4
  * Options for locating any application
5
5
  */
6
- export interface LocateAppOptions {
6
+ export type LocateAppOptions = {
7
7
  /**
8
8
  * Name of the application
9
9
  */
@@ -20,7 +20,7 @@ export interface LocateAppOptions {
20
20
  * Name of the application on macOS
21
21
  */
22
22
  macOsName?: string;
23
- }
23
+ };
24
24
  /**
25
25
  * Locates an application on the system
26
26
  *
@@ -28,7 +28,7 @@ type ExecutePipelineOptions = Required<CreatePipelineExecutorOptions> & {
28
28
  /**
29
29
  * Callback to update the prepared pipeline reference after preparation.
30
30
  */
31
- readonly setPreparedPipeline: (preparedPipeline: ReadonlyDeep<PipelineJson>) => void;
31
+ setPreparedPipeline(preparedPipeline: ReadonlyDeep<PipelineJson>): void;
32
32
  /**
33
33
  * String identifier for the pipeline, used in error messages and reporting.
34
34
  */
@@ -26,7 +26,7 @@ type executeSingleTaskOptions = Required<CreatePipelineExecutorOptions> & {
26
26
  /**
27
27
  * Callback invoked with partial results as the execution progresses.
28
28
  */
29
- readonly onProgress: (newOngoingResult: PartialDeep<PipelineExecutorResult>) => Promisable<void>;
29
+ onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
30
30
  /**
31
31
  * Mutable execution report object for tracking execution details.
32
32
  */
@@ -12,7 +12,7 @@ type ExecuteFormatCellsOptions = ExecuteAttemptsOptions & {
12
12
  /**
13
13
  * Callback invoked with partial results as the execution progresses.
14
14
  */
15
- readonly onProgress: (newOngoingResult: PartialDeep<PipelineExecutorResult>) => Promisable<void>;
15
+ onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
16
16
  };
17
17
  /**
18
18
  * Executes a pipeline task that requires mapping or iterating over subvalues of a parameter (such as rows in a CSV).
@@ -2,6 +2,7 @@ import Anthropic from '@anthropic-ai/sdk';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
+ import type { CompletionPromptResult } from '../../execution/PromptResult';
5
6
  import type { Prompt } from '../../types/Prompt';
6
7
  import type { string_markdown } from '../../types/typeAliases';
7
8
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -19,6 +20,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
19
20
  * Anthropic Claude API client.
20
21
  */
21
22
  private client;
23
+ private limiter;
22
24
  /**
23
25
  * Creates Anthropic Claude Execution Tools.
24
26
  *
@@ -40,6 +42,10 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
40
42
  * Calls Anthropic Claude API to use a chat model.
41
43
  */
42
44
  callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
45
+ /**
46
+ * Calls Anthropic Claude API to use a completion model.
47
+ */
48
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
43
49
  /**
44
50
  * Get the model that should be used as default
45
51
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.92.0-29`).
18
+ * It follows semantic versioning (e.g., `0.92.0-31`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.92.0-30",
3
+ "version": "0.92.0-32",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.92.0-30"
54
+ "@promptbook/core": "0.92.0-32"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -46,7 +46,7 @@
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-30';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-32';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2932,23 +2932,17 @@
2932
2932
  * Check the configuration of all execution tools
2933
2933
  */
2934
2934
  async checkConfiguration() {
2935
- // TODO: Maybe do it in parallel
2936
- for (const llmExecutionTools of this.llmExecutionTools) {
2937
- await llmExecutionTools.checkConfiguration();
2938
- }
2935
+ // Note: Run checks in parallel
2936
+ await Promise.all(this.llmExecutionTools.map((tools) => tools.checkConfiguration()));
2939
2937
  }
2940
2938
  /**
2941
2939
  * List all available models that can be used
2942
2940
  * This lists is a combination of all available models from all execution tools
2943
2941
  */
2944
2942
  async listModels() {
2945
- const availableModels = [];
2946
- for (const llmExecutionTools of this.llmExecutionTools) {
2947
- // TODO: [🪂] Obtain models in parallel
2948
- const models = await llmExecutionTools.listModels();
2949
- availableModels.push(...models);
2950
- }
2951
- return availableModels;
2943
+ // Obtain all models in parallel and flatten
2944
+ const modelArrays = await Promise.all(this.llmExecutionTools.map((tools) => tools.listModels()));
2945
+ return modelArrays.flat();
2952
2946
  }
2953
2947
  /**
2954
2948
  * Calls the best available chat model