@promptbook/node 0.92.0-25 → 0.92.0-27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import type { BoilerplateCommand } from './BoilerplateCommand';
3
3
  /**
4
4
  * Parses the boilerplate command
5
5
  *
6
- * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6
+ * Note: @@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
7
7
  *
8
8
  * @see `documentationUrl` for more details
9
9
  * @private within the commands folder
@@ -12,6 +12,12 @@ export declare const ORDER_OF_PIPELINE_JSON: ExportJsonOptions<PipelineJson>['or
12
12
  * @private within the repository
13
13
  */
14
14
  export declare const REPLACING_NONCE = "ptbkauk42kV2dzao34faw7FudQUHYPtW";
15
+ /**
16
+ * Nonce which is used as string which is not occurring in normal text
17
+ *
18
+ * @private within the repository
19
+ */
20
+ export declare const SALT_NONCE = "ptbkghhewbvruets21t54et5";
15
21
  /**
16
22
  * Placeholder value indicating a parameter is missing its value.
17
23
  *
@@ -2,8 +2,9 @@ import type { UncertainNumber } from '../UncertainNumber';
2
2
  /**
3
3
  * Make UncertainNumber
4
4
  *
5
- * @param value
5
+ * @param value value of the uncertain number, if `NaN` or `undefined`, it will be set to 0 and `isUncertain=true`
6
+ * @param isUncertain if `true`, the value is uncertain, otherwise depends on the value
6
7
  *
7
8
  * @private utility for initializating UncertainNumber
8
9
  */
9
- export declare function uncertainNumber(value?: number | typeof NaN | undefined | null): UncertainNumber;
10
+ export declare function uncertainNumber(value?: number | typeof NaN | undefined | null, isUncertain?: boolean): UncertainNumber;
@@ -4,34 +4,43 @@ import type { string_name } from '../../types/typeAliases';
4
4
  import type { string_promptbook_documentation_url } from '../../types/typeAliases';
5
5
  import type { string_SCREAMING_CASE } from '../../utils/normalization/normalizeTo_SCREAMING_CASE';
6
6
  /**
7
- * @@@
7
+ * AbstractFormfactorDefinition provides the base structure for all form factor implementations
8
+ * in the Promptbook system. It defines common properties and interfaces that must be
9
+ * implemented by specific form factors.
8
10
  *
9
11
  * Note: [🚉] This is fully serializable as JSON
10
12
  * @see https://github.com/webgptorg/promptbook/discussions/172
11
13
  */
12
14
  export type AbstractFormfactorDefinition = {
13
15
  /**
14
- * @@@
16
+ * Unique identifier for the form factor in SCREAMING_CASE format
17
+ * Used for programmatic identification and reference
15
18
  */
16
19
  readonly name: string_name & string_SCREAMING_CASE;
17
20
  /**
18
- * @@@
21
+ * Alternative names that can be used to reference this form factor
22
+ * Also in SCREAMING_CASE format for consistency
19
23
  */
20
24
  readonly aliasNames?: ReadonlyArray<string_name & string_SCREAMING_CASE>;
21
25
  /**
22
- * @@@
26
+ * Previous names that were used for this form factor but are now deprecated
27
+ * These are maintained for backward compatibility purposes
23
28
  */
24
29
  readonly deprecatedNames?: ReadonlyArray<string_name & string_SCREAMING_CASE>;
25
30
  /**
26
- * @@@
31
+ * Human-readable description of the form factor in markdown format
32
+ * Explains the purpose, functionality, and use cases of this form factor
27
33
  */
28
34
  readonly description: string_markdown_text;
29
35
  /**
30
- * @@@
36
+ * URL pointing to detailed documentation for this form factor
37
+ * Provides additional resources and guidance for implementation and usage
31
38
  */
32
39
  readonly documentationUrl: string_promptbook_documentation_url;
33
40
  /**
34
- * @@@
41
+ * Defines the interface structure for this form factor's pipeline
42
+ * Specifies how inputs and outputs are handled, processed, and formatted
43
+ * Required for properly configuring and executing the form factor's functionality
35
44
  */
36
45
  readonly pipelineInterface: PipelineInterface;
37
46
  };
@@ -1,6 +1,8 @@
1
1
  import { FORMFACTOR_DEFINITIONS } from '../index';
2
2
  /**
3
- * @@@
3
+ * FormfactorDefinition is a type that defines the structure and capabilities of a specific
4
+ * application form factor in the Promptbook system. It encapsulates all properties needed
5
+ * to represent how a particular interface handles inputs, outputs, and behaviors.
4
6
  *
5
7
  * Note: [🚉] This is fully serializable as JSON
6
8
  * @see https://github.com/webgptorg/promptbook/discussions/172
@@ -52,7 +52,7 @@ export declare const FORMFACTOR_DEFINITIONS: readonly [{
52
52
  };
53
53
  }, {
54
54
  readonly name: "TRANSLATOR";
55
- readonly description: "@@@";
55
+ readonly description: "A text transformation system that converts input content into different forms,\n including language translations, paraphrasing, style conversions, and tone adjustments.\n This form factor takes one input and produces one transformed output.";
56
56
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/175";
57
57
  readonly pipelineInterface: {
58
58
  readonly inputParameters: readonly [{
@@ -89,7 +89,7 @@ export declare const FORMFACTOR_DEFINITIONS: readonly [{
89
89
  };
90
90
  }, {
91
91
  readonly name: "EXPERIMENTAL_MATCHER";
92
- readonly description: "@@@";
92
+ readonly description: "An evaluation system that determines whether content meets specific criteria or patterns.\n Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.";
93
93
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/177";
94
94
  readonly pipelineInterface: {
95
95
  readonly inputParameters: readonly [{
@@ -1,11 +1,13 @@
1
1
  /**
2
- * Matcher is form of app that @@@
2
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
3
+ * determining if it matches or meets specific requirements. Used for classification,
4
+ * validation, filtering, and quality assessment of inputs.
3
5
  *
4
6
  * @public exported from `@promptbook/core`
5
7
  */
6
8
  export declare const MatcherFormfactorDefinition: {
7
9
  readonly name: "EXPERIMENTAL_MATCHER";
8
- readonly description: "@@@";
10
+ readonly description: "An evaluation system that determines whether content meets specific criteria or patterns.\n Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.";
9
11
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/177";
10
12
  readonly pipelineInterface: {
11
13
  readonly inputParameters: readonly [{
@@ -1,11 +1,12 @@
1
1
  /**
2
- * Translator is form of app that @@@
2
+ * Translator is form of app that transforms input text from one form to another,
3
+ * such as language translation, style conversion, tone modification, or other text transformations.
3
4
  *
4
5
  * @public exported from `@promptbook/core`
5
6
  */
6
7
  export declare const TranslatorFormfactorDefinition: {
7
8
  readonly name: "TRANSLATOR";
8
- readonly description: "@@@";
9
+ readonly description: "A text transformation system that converts input content into different forms,\n including language translations, paraphrasing, style conversions, and tone adjustments.\n This form factor takes one input and produces one transformed output.";
9
10
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/175";
10
11
  readonly pipelineInterface: {
11
12
  readonly inputParameters: readonly [{
@@ -3,7 +3,8 @@ import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage
3
3
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
4
4
  type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromConfigurationOptions & {
5
5
  /**
6
- * @@@
6
+ * Flag indicating whether the cache should be reloaded or reused
7
+ * When set to true, the existing cache will not be used but thinks will be still saved to the cache
7
8
  *
8
9
  * @default false
9
10
  */
@@ -22,5 +23,5 @@ export {};
22
23
  * Note: [⚪] This should never be in any released package
23
24
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
24
25
  * TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
25
- * TODO: [®] DRY Register logic
26
- */
26
+ * TODO: [®] DRY Register logi
27
+ */
@@ -1,11 +1,14 @@
1
1
  import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
2
2
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
3
3
  /**
4
- * @@@
4
+ * Automatically configures LLM tools from environment variables in Node.js
5
+ *
6
+ * This utility function detects available LLM providers based on environment variables
7
+ * and creates properly configured LLM execution tools for each detected provider.
5
8
  *
6
9
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7
10
  *
8
- * @@@ .env
11
+ * Supports environment variables from .env files when dotenv is configured
9
12
  * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
10
13
  *
11
14
  * It looks for environment variables:
@@ -13,12 +16,22 @@ import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFro
13
16
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
14
17
  * - ...
15
18
  *
16
- * @returns @@@
19
+ * @param options Configuration options for the LLM tools
20
+ * @returns A unified interface containing all detected and configured LLM tools
17
21
  * @public exported from `@promptbook/node`
18
22
  */
19
23
  export declare function $provideLlmToolsFromEnv(options?: CreateLlmToolsFromConfigurationOptions): Promise<MultipleLlmExecutionTools>;
20
24
  /**
21
- * TODO: @@@ write `$provideLlmToolsFromEnv` vs `$provideLlmToolsConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
25
+ * TODO: The architecture for LLM tools configuration consists of three key functions:
26
+ * 1. `$provideLlmToolsFromEnv` - High-level function that detects available providers from env vars and returns ready-to-use LLM tools
27
+ * 2. `$provideLlmToolsConfigurationFromEnv` - Middle layer that extracts configuration objects from environment variables
28
+ * 3. `createLlmToolsFromConfiguration` - Low-level function that instantiates LLM tools from explicit configuration
29
+ *
30
+ * This layered approach allows flexibility in how tools are configured:
31
+ * - Use $provideLlmToolsFromEnv for automatic detection and setup in Node.js environments
32
+ * - Use $provideLlmToolsConfigurationFromEnv to extract config objects for modification before instantiation
33
+ * - Use createLlmToolsFromConfiguration for explicit control over tool configurations
34
+ *
22
35
  * TODO: [🧠][🍛] Which name is better `$provideLlmToolsFromEnv` or `$provideLlmToolsFromEnvironment`?
23
36
  * TODO: [🧠] Is there some meaningfull way how to test this util
24
37
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
@@ -2,17 +2,24 @@ import type { string_title } from '../../../types/typeAliases';
2
2
  import type { Registered } from '../../../utils/$Register';
3
3
  import type { LlmToolsOptions } from './LlmToolsOptions';
4
4
  /**
5
- * @@@
5
+ * Configuration definition for LLM execution tools, containing provider-specific settings
6
+ * that can be passed during runtime to instantiate and configure LLM tools properly.
6
7
  *
7
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
8
+ * The Promptbook LLM tools architecture involves several related types:
9
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
10
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
11
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
12
+ * - `Registered`: The record of a registered tool in the global registry
8
13
  */
9
14
  export type LlmToolsConfiguration = ReadonlyArray<Registered & {
10
15
  /**
11
- * @@@
16
+ * Human-readable name for this specific provider configuration
17
+ * Used in UI components and logs for identifying this particular configuration
12
18
  */
13
19
  readonly title: string_title;
14
20
  /**
15
- * @@@
21
+ * Provider-specific configuration options used for instantiating and configuring LLM tools
22
+ * Contains values like API keys, model preferences, endpoint URLs, and other settings
16
23
  */
17
24
  readonly options: LlmToolsOptions;
18
25
  }>;
@@ -39,13 +39,19 @@ export declare const MODEL_ORDER: {
39
39
  readonly LOW_TIER: 0;
40
40
  };
41
41
  /**
42
- * @@@
42
+ * Metadata definition for LLM execution tools that provides information about a provider's capabilities,
43
+ * configuration options, and relationships within the registry system.
43
44
  *
44
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
45
+ * The Promptbook LLM tools architecture involves several related types:
46
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
47
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
48
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
49
+ * - `Registered`: The record of a registered tool in the global registry
45
50
  */
46
51
  export type LlmToolsMetadata = Registered & {
47
52
  /**
48
- * @@@
53
+ * Human-readable display name for the LLM provider
54
+ * Used in UI components and documentation references
49
55
  */
50
56
  readonly title: string_title;
51
57
  /**
@@ -64,11 +70,17 @@ export type LlmToolsMetadata = Registered & {
64
70
  */
65
71
  readonly envVariables: ReadonlyArray<string_name & string_SCREAMING_CASE> | null;
66
72
  /**
67
- * @@@
73
+ * Provides a default configuration template for this LLM provider
74
+ * Used to generate example configurations or as fallback when no specific configuration is provided
75
+ * @returns A standardized configuration object for this LLM provider
68
76
  */
69
77
  getBoilerplateConfiguration(): LlmToolsConfiguration[number];
70
78
  /**
71
- * @@@
79
+ * Creates a provider-specific configuration object from environment variables
80
+ * Used to automatically configure LLM tools based on available environment settings
81
+ *
82
+ * @param env Dictionary of environment variables (key-value pairs)
83
+ * @returns Configuration object for this LLM provider if required variables are present, or null if configuration is not possible
72
84
  */
73
85
  createConfigurationFromEnv(env: Record<string_name, string>): LlmToolsConfiguration[number] | null;
74
86
  };
@@ -5,7 +5,11 @@ import type { TODO_object } from '../../../utils/organization/TODO_object';
5
5
  * This type is used to pass provider-specific options to LLM execution tools.
6
6
  *
7
7
  *
8
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
8
+ * The Promptbook LLM tools architecture involves several related types:
9
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
10
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
11
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
12
+ * - `Registered`: The record of a registered tool in the global registry
9
13
  */
10
14
  export type LlmToolsOptions = TODO_object;
11
15
  /**
@@ -21,18 +21,27 @@ export type CreateLlmToolsFromConfigurationOptions = {
21
21
  readonly userId?: string_user_id;
22
22
  };
23
23
  /**
24
- * @@@
24
+ * Creates LLM execution tools from provided configuration objects
25
+ *
26
+ * Instantiates and configures LLM tool instances for each configuration entry,
27
+ * combining them into a unified interface via MultipleLlmExecutionTools.
25
28
  *
26
29
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
27
30
  *
28
- * @returns @@@
31
+ * @param configuration Array of LLM tool configurations to instantiate
32
+ * @param options Additional options for configuring the LLM tools
33
+ * @returns A unified interface combining all successfully instantiated LLM tools
29
34
  * @public exported from `@promptbook/core`
30
35
  */
31
36
  export declare function createLlmToolsFromConfiguration(configuration: LlmToolsConfiguration, options?: CreateLlmToolsFromConfigurationOptions): MultipleLlmExecutionTools;
32
37
  /**
33
38
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
34
39
  * TODO: [🧠][🎌] Dynamically install required providers
35
- * TODO: @@@ write discussion about this - wizzard
40
+ * TODO: We should implement an interactive configuration wizard that would:
41
+ * 1. Detect which LLM providers are available in the environment
42
+ * 2. Guide users through required configuration settings for each provider
43
+ * 3. Allow testing connections before completing setup
44
+ * 4. Generate appropriate configuration code for application integration
36
45
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
37
46
  * TODO: [🧠] Is there some meaningfull way how to test this util
38
47
  * TODO: This should be maybe not under `_common` but under `utils`
@@ -1,6 +1,7 @@
1
1
  import type { PromptResult } from '../../../../execution/PromptResult';
2
2
  import type { Prompt } from '../../../../types/Prompt';
3
3
  import type { string_date_iso8601 } from '../../../../types/typeAliases';
4
+ import type { string_semantic_version } from '../../../../types/typeAliases';
4
5
  import type { string_promptbook_version } from '../../../../version';
5
6
  /**
6
7
  * Represents a single item stored in the LLM cache.
@@ -14,6 +15,10 @@ export type CacheItem = {
14
15
  * The version of the Promptbook library used when this cache item was created.
15
16
  */
16
17
  promptbookVersion?: string_promptbook_version;
18
+ /**
19
+ * The version of the Book language used when this cache item was created.
20
+ */
21
+ bookVersion?: string_semantic_version;
17
22
  /**
18
23
  * The prompt that was sent to the LLM.
19
24
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.92.0-24`).
18
+ * It follows semantic versioning (e.g., `0.92.0-26`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.92.0-25",
3
+ "version": "0.92.0-27",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.92.0-25"
54
+ "@promptbook/core": "0.92.0-27"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -46,7 +46,7 @@
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-25';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-27';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -6239,7 +6239,7 @@
6239
6239
  /**
6240
6240
  * Parses the boilerplate command
6241
6241
  *
6242
- * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6242
+ * Note: @@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
6243
6243
  *
6244
6244
  * @see `documentationUrl` for more details
6245
6245
  * @private within the commands folder
@@ -7175,17 +7175,20 @@
7175
7175
  };
7176
7176
 
7177
7177
  /**
7178
- * Matcher is form of app that @@@
7178
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
7179
+ * determining if it matches or meets specific requirements. Used for classification,
7180
+ * validation, filtering, and quality assessment of inputs.
7179
7181
  *
7180
7182
  * @public exported from `@promptbook/core`
7181
7183
  */
7182
7184
  const MatcherFormfactorDefinition = {
7183
7185
  name: 'EXPERIMENTAL_MATCHER',
7184
- description: `@@@`,
7186
+ description: `An evaluation system that determines whether content meets specific criteria or patterns.
7187
+ Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.`,
7185
7188
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/177`,
7186
7189
  pipelineInterface: {
7187
7190
  inputParameters: [
7188
- /* @@@ */
7191
+ /* Input parameters for content to be matched and criteria to match against */
7189
7192
  {
7190
7193
  name: 'nonce',
7191
7194
  description: 'Just to prevent EXPERIMENTAL_MATCHER to be set as implicit formfactor',
@@ -7194,7 +7197,7 @@
7194
7197
  },
7195
7198
  ],
7196
7199
  outputParameters: [
7197
- /* @@@ */
7200
+ /* Output parameters containing match results, confidence scores, and relevant metadata */
7198
7201
  ],
7199
7202
  },
7200
7203
  };
@@ -7231,13 +7234,16 @@
7231
7234
  };
7232
7235
 
7233
7236
  /**
7234
- * Translator is form of app that @@@
7237
+ * Translator is form of app that transforms input text from one form to another,
7238
+ * such as language translation, style conversion, tone modification, or other text transformations.
7235
7239
  *
7236
7240
  * @public exported from `@promptbook/core`
7237
7241
  */
7238
7242
  const TranslatorFormfactorDefinition = {
7239
7243
  name: 'TRANSLATOR',
7240
- description: `@@@`,
7244
+ description: `A text transformation system that converts input content into different forms,
7245
+ including language translations, paraphrasing, style conversions, and tone adjustments.
7246
+ This form factor takes one input and produces one transformed output.`,
7241
7247
  documentationUrl: `https://github.com/webgptorg/promptbook/discussions/175`,
7242
7248
  pipelineInterface: {
7243
7249
  inputParameters: [
@@ -8364,7 +8370,10 @@
8364
8370
  `));
8365
8371
  }
8366
8372
  /**
8367
- * @@@
8373
+ * Generates a markdown-formatted message listing all supported commands
8374
+ * with their descriptions and documentation links
8375
+ *
8376
+ * @returns A formatted markdown string containing all available commands and their details
8368
8377
  */
8369
8378
  function getSupportedCommandsMessage() {
8370
8379
  return COMMANDS.flatMap(({ name, aliasNames, description, documentationUrl }) =>
@@ -8375,7 +8384,10 @@
8375
8384
  ]).join('\n');
8376
8385
  }
8377
8386
  /**
8378
- * @@@
8387
+ * Attempts to parse a command variant using the provided input parameters
8388
+ *
8389
+ * @param input Object containing command parsing information including raw command text and normalized values
8390
+ * @returns A parsed Command object if successful, or null if the command cannot be parsed
8379
8391
  */
8380
8392
  function parseCommandVariant(input) {
8381
8393
  const { commandNameRaw, usagePlace, normalized, args, raw, rawArgs } = input;
@@ -10079,11 +10091,16 @@
10079
10091
  */
10080
10092
 
10081
10093
  /**
10082
- * @@@
10094
+ * Creates LLM execution tools from provided configuration objects
10095
+ *
10096
+ * Instantiates and configures LLM tool instances for each configuration entry,
10097
+ * combining them into a unified interface via MultipleLlmExecutionTools.
10083
10098
  *
10084
10099
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
10085
10100
  *
10086
- * @returns @@@
10101
+ * @param configuration Array of LLM tool configurations to instantiate
10102
+ * @param options Additional options for configuring the LLM tools
10103
+ * @returns A unified interface combining all successfully instantiated LLM tools
10087
10104
  * @public exported from `@promptbook/core`
10088
10105
  */
10089
10106
  function createLlmToolsFromConfiguration(configuration, options = {}) {
@@ -10122,7 +10139,11 @@
10122
10139
  /**
10123
10140
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
10124
10141
  * TODO: [🧠][🎌] Dynamically install required providers
10125
- * TODO: @@@ write discussion about this - wizzard
10142
+ * TODO: We should implement an interactive configuration wizard that would:
10143
+ * 1. Detect which LLM providers are available in the environment
10144
+ * 2. Guide users through required configuration settings for each provider
10145
+ * 3. Allow testing connections before completing setup
10146
+ * 4. Generate appropriate configuration code for application integration
10126
10147
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
10127
10148
  * TODO: [🧠] Is there some meaningfull way how to test this util
10128
10149
  * TODO: This should be maybe not under `_common` but under `utils`
@@ -10130,11 +10151,14 @@
10130
10151
  */
10131
10152
 
10132
10153
  /**
10133
- * @@@
10154
+ * Automatically configures LLM tools from environment variables in Node.js
10155
+ *
10156
+ * This utility function detects available LLM providers based on environment variables
10157
+ * and creates properly configured LLM execution tools for each detected provider.
10134
10158
  *
10135
10159
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
10136
10160
  *
10137
- * @@@ .env
10161
+ * Supports environment variables from .env files when dotenv is configured
10138
10162
  * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
10139
10163
  *
10140
10164
  * It looks for environment variables:
@@ -10142,7 +10166,8 @@
10142
10166
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
10143
10167
  * - ...
10144
10168
  *
10145
- * @returns @@@
10169
+ * @param options Configuration options for the LLM tools
10170
+ * @returns A unified interface containing all detected and configured LLM tools
10146
10171
  * @public exported from `@promptbook/node`
10147
10172
  */
10148
10173
  async function $provideLlmToolsFromEnv(options = {}) {
@@ -10168,7 +10193,16 @@
10168
10193
  return createLlmToolsFromConfiguration(configuration, options);
10169
10194
  }
10170
10195
  /**
10171
- * TODO: @@@ write `$provideLlmToolsFromEnv` vs `$provideLlmToolsConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
10196
+ * TODO: The architecture for LLM tools configuration consists of three key functions:
10197
+ * 1. `$provideLlmToolsFromEnv` - High-level function that detects available providers from env vars and returns ready-to-use LLM tools
10198
+ * 2. `$provideLlmToolsConfigurationFromEnv` - Middle layer that extracts configuration objects from environment variables
10199
+ * 3. `createLlmToolsFromConfiguration` - Low-level function that instantiates LLM tools from explicit configuration
10200
+ *
10201
+ * This layered approach allows flexibility in how tools are configured:
10202
+ * - Use $provideLlmToolsFromEnv for automatic detection and setup in Node.js environments
10203
+ * - Use $provideLlmToolsConfigurationFromEnv to extract config objects for modification before instantiation
10204
+ * - Use createLlmToolsFromConfiguration for explicit control over tool configurations
10205
+ *
10172
10206
  * TODO: [🧠][🍛] Which name is better `$provideLlmToolsFromEnv` or `$provideLlmToolsFromEnvironment`?
10173
10207
  * TODO: [🧠] Is there some meaningfull way how to test this util
10174
10208
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment