@promptbook/remote-server 0.92.0-26 → 0.92.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +40 -18
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  4. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -2
  5. package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
  6. package/esm/typings/src/constants.d.ts +35 -0
  7. package/esm/typings/src/executables/$provideExecutablesForNode.d.ts +1 -1
  8. package/esm/typings/src/executables/apps/locateLibreoffice.d.ts +2 -1
  9. package/esm/typings/src/executables/apps/locatePandoc.d.ts +2 -1
  10. package/esm/typings/src/executables/platforms/locateAppOnLinux.d.ts +2 -1
  11. package/esm/typings/src/executables/platforms/locateAppOnMacOs.d.ts +2 -1
  12. package/esm/typings/src/executables/platforms/locateAppOnWindows.d.ts +2 -1
  13. package/esm/typings/src/execution/AbstractTaskResult.d.ts +1 -1
  14. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +2 -1
  15. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +1 -1
  16. package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +12 -9
  17. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +20 -14
  18. package/esm/typings/src/execution/createPipelineExecutor/filterJustOutputParameters.d.ts +7 -6
  19. package/esm/typings/src/execution/createPipelineExecutor/getContextForTask.d.ts +5 -1
  20. package/esm/typings/src/execution/createPipelineExecutor/getExamplesForTask.d.ts +1 -1
  21. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +8 -11
  22. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +4 -4
  23. package/esm/typings/src/execution/utils/uncertainNumber.d.ts +3 -2
  24. package/esm/typings/src/formats/csv/CsvSettings.d.ts +2 -2
  25. package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +16 -7
  26. package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +3 -1
  27. package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +2 -2
  28. package/esm/typings/src/formfactors/completion/CompletionFormfactorDefinition.d.ts +1 -1
  29. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +2 -1
  30. package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
  31. package/esm/typings/src/formfactors/index.d.ts +5 -5
  32. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +4 -2
  33. package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +3 -2
  34. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +4 -3
  35. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +17 -4
  36. package/esm/typings/src/llm-providers/_common/register/LlmToolsConfiguration.d.ts +11 -4
  37. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +21 -42
  38. package/esm/typings/src/llm-providers/_common/register/LlmToolsOptions.d.ts +5 -1
  39. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +12 -3
  40. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +5 -0
  41. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  42. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  45. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  46. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  47. package/esm/typings/src/version.d.ts +1 -1
  48. package/package.json +2 -2
  49. package/umd/index.umd.js +40 -18
  50. package/umd/index.umd.js.map +1 -1
@@ -1,11 +1,13 @@
1
1
  /**
2
- * Matcher is form of app that @@@
2
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
3
+ * determining if it matches or meets specific requirements. Used for classification,
4
+ * validation, filtering, and quality assessment of inputs.
3
5
  *
4
6
  * @public exported from `@promptbook/core`
5
7
  */
6
8
  export declare const MatcherFormfactorDefinition: {
7
9
  readonly name: "EXPERIMENTAL_MATCHER";
8
- readonly description: "@@@";
10
+ readonly description: "An evaluation system that determines whether content meets specific criteria or patterns.\n Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.";
9
11
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/177";
10
12
  readonly pipelineInterface: {
11
13
  readonly inputParameters: readonly [{
@@ -1,11 +1,12 @@
1
1
  /**
2
- * Translator is form of app that @@@
2
+ * Translator is form of app that transforms input text from one form to another,
3
+ * such as language translation, style conversion, tone modification, or other text transformations.
3
4
  *
4
5
  * @public exported from `@promptbook/core`
5
6
  */
6
7
  export declare const TranslatorFormfactorDefinition: {
7
8
  readonly name: "TRANSLATOR";
8
- readonly description: "@@@";
9
+ readonly description: "A text transformation system that converts input content into different forms,\n including language translations, paraphrasing, style conversions, and tone adjustments.\n This form factor takes one input and produces one transformed output.";
9
10
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/175";
10
11
  readonly pipelineInterface: {
11
12
  readonly inputParameters: readonly [{
@@ -3,7 +3,8 @@ import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage
3
3
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
4
4
  type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromConfigurationOptions & {
5
5
  /**
6
- * @@@
6
+ * Flag indicating whether the cache should be reloaded or reused
7
+ * When set to true, the existing cache will not be used but thinks will be still saved to the cache
7
8
  *
8
9
  * @default false
9
10
  */
@@ -22,5 +23,5 @@ export {};
22
23
  * Note: [⚪] This should never be in any released package
23
24
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
24
25
  * TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
25
- * TODO: [®] DRY Register logic
26
- */
26
+ * TODO: [®] DRY Register logi
27
+ */
@@ -1,11 +1,14 @@
1
1
  import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
2
2
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
3
3
  /**
4
- * @@@
4
+ * Automatically configures LLM tools from environment variables in Node.js
5
+ *
6
+ * This utility function detects available LLM providers based on environment variables
7
+ * and creates properly configured LLM execution tools for each detected provider.
5
8
  *
6
9
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7
10
  *
8
- * @@@ .env
11
+ * Supports environment variables from .env files when dotenv is configured
9
12
  * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
10
13
  *
11
14
  * It looks for environment variables:
@@ -13,12 +16,22 @@ import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFro
13
16
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
14
17
  * - ...
15
18
  *
16
- * @returns @@@
19
+ * @param options Configuration options for the LLM tools
20
+ * @returns A unified interface containing all detected and configured LLM tools
17
21
  * @public exported from `@promptbook/node`
18
22
  */
19
23
  export declare function $provideLlmToolsFromEnv(options?: CreateLlmToolsFromConfigurationOptions): Promise<MultipleLlmExecutionTools>;
20
24
  /**
21
- * TODO: @@@ write `$provideLlmToolsFromEnv` vs `$provideLlmToolsConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
25
+ * TODO: The architecture for LLM tools configuration consists of three key functions:
26
+ * 1. `$provideLlmToolsFromEnv` - High-level function that detects available providers from env vars and returns ready-to-use LLM tools
27
+ * 2. `$provideLlmToolsConfigurationFromEnv` - Middle layer that extracts configuration objects from environment variables
28
+ * 3. `createLlmToolsFromConfiguration` - Low-level function that instantiates LLM tools from explicit configuration
29
+ *
30
+ * This layered approach allows flexibility in how tools are configured:
31
+ * - Use $provideLlmToolsFromEnv for automatic detection and setup in Node.js environments
32
+ * - Use $provideLlmToolsConfigurationFromEnv to extract config objects for modification before instantiation
33
+ * - Use createLlmToolsFromConfiguration for explicit control over tool configurations
34
+ *
22
35
  * TODO: [🧠][🍛] Which name is better `$provideLlmToolsFromEnv` or `$provideLlmToolsFromEnvironment`?
23
36
  * TODO: [🧠] Is there some meaningfull way how to test this util
24
37
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
@@ -2,17 +2,24 @@ import type { string_title } from '../../../types/typeAliases';
2
2
  import type { Registered } from '../../../utils/$Register';
3
3
  import type { LlmToolsOptions } from './LlmToolsOptions';
4
4
  /**
5
- * @@@
5
+ * Configuration definition for LLM execution tools, containing provider-specific settings
6
+ * that can be passed during runtime to instantiate and configure LLM tools properly.
6
7
  *
7
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
8
+ * The Promptbook LLM tools architecture involves several related types:
9
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
10
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
11
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
12
+ * - `Registered`: The record of a registered tool in the global registry
8
13
  */
9
14
  export type LlmToolsConfiguration = ReadonlyArray<Registered & {
10
15
  /**
11
- * @@@
16
+ * Human-readable name for this specific provider configuration
17
+ * Used in UI components and logs for identifying this particular configuration
12
18
  */
13
19
  readonly title: string_title;
14
20
  /**
15
- * @@@
21
+ * Provider-specific configuration options used for instantiating and configuring LLM tools
22
+ * Contains values like API keys, model preferences, endpoint URLs, and other settings
16
23
  */
17
24
  readonly options: LlmToolsOptions;
18
25
  }>;
@@ -1,61 +1,34 @@
1
+ import { MODEL_ORDERS } from '../../../constants';
2
+ import { MODEL_TRUST_LEVELS } from '../../../constants';
1
3
  import type { string_name } from '../../../types/typeAliases';
2
4
  import type { string_title } from '../../../types/typeAliases';
3
5
  import type { Registered } from '../../../utils/$Register';
4
6
  import type { string_SCREAMING_CASE } from '../../../utils/normalization/normalizeTo_SCREAMING_CASE';
5
7
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
6
8
  /**
7
- * How is the model provider trusted?
9
+ * Metadata definition for LLM execution tools that provides information about a provider's capabilities,
10
+ * configuration options, and relationships within the registry system.
8
11
  *
9
- * @public exported from `@promptbook/core`
10
- */
11
- export declare const MODEL_TRUST_LEVEL: {
12
- readonly FULL: "Model is running on the local machine, training data and model weights are known, data are ethically sourced";
13
- readonly OPEN: "Model is open source, training data and model weights are known";
14
- readonly PARTIALLY_OPEN: "Model is open source, but training data and model weights are not (fully) known";
15
- readonly CLOSED_LOCAL: "Model can be run locally, but it is not open source";
16
- readonly CLOSED_FREE: "Model is behind API gateway but free to use";
17
- readonly CLOSED_BUSINESS: "Model is behind API gateway and paid but has good SLA, TOS, privacy policy and in general is a good to use in business applications";
18
- readonly CLOSED: "Model is behind API gateway and paid";
19
- readonly UNTRUSTED: "Model has questions about the training data and ethics, but it is not known if it is a problem or not";
20
- readonly VURNABLE: "Model has some known serious vulnerabilities, leaks, ethical problems, etc.";
21
- };
22
- /**
23
- * How is the model provider important?
24
- *
25
- * @public exported from `@promptbook/core`
26
- */
27
- export declare const MODEL_ORDER: {
28
- /**
29
- * Top-tier models, e.g. OpenAI, Anthropic,...
30
- */
31
- readonly TOP_TIER: 333;
32
- /**
33
- * Mid-tier models, e.g. Llama, Mistral, etc.
34
- */
35
- readonly NORMAL: 100;
36
- /**
37
- * Low-tier models, e.g. Phi, Tiny, etc.
38
- */
39
- readonly LOW_TIER: 0;
40
- };
41
- /**
42
- * @@@
43
- *
44
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
12
+ * The Promptbook LLM tools architecture involves several related types:
13
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
14
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
15
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
16
+ * - `Registered`: The record of a registered tool in the global registry
45
17
  */
46
18
  export type LlmToolsMetadata = Registered & {
47
19
  /**
48
- * @@@
20
+ * Human-readable display name for the LLM provider
21
+ * Used in UI components and documentation references
49
22
  */
50
23
  readonly title: string_title;
51
24
  /**
52
25
  * How is the model is trusted?
53
26
  */
54
- readonly trustLevel: keyof typeof MODEL_TRUST_LEVEL;
27
+ readonly trustLevel: keyof typeof MODEL_TRUST_LEVELS;
55
28
  /**
56
29
  * How is the model provider important and should be sorted in the list of available providers?
57
30
  */
58
- readonly order: typeof MODEL_ORDER[keyof typeof MODEL_ORDER] | number;
31
+ readonly order: typeof MODEL_ORDERS[keyof typeof MODEL_ORDERS] | number;
59
32
  /**
60
33
  * List of environment variables that can be used to configure the provider
61
34
  *
@@ -64,11 +37,17 @@ export type LlmToolsMetadata = Registered & {
64
37
  */
65
38
  readonly envVariables: ReadonlyArray<string_name & string_SCREAMING_CASE> | null;
66
39
  /**
67
- * @@@
40
+ * Provides a default configuration template for this LLM provider
41
+ * Used to generate example configurations or as fallback when no specific configuration is provided
42
+ * @returns A standardized configuration object for this LLM provider
68
43
  */
69
44
  getBoilerplateConfiguration(): LlmToolsConfiguration[number];
70
45
  /**
71
- * @@@
46
+ * Creates a provider-specific configuration object from environment variables
47
+ * Used to automatically configure LLM tools based on available environment settings
48
+ *
49
+ * @param env Dictionary of environment variables (key-value pairs)
50
+ * @returns Configuration object for this LLM provider if required variables are present, or null if configuration is not possible
72
51
  */
73
52
  createConfigurationFromEnv(env: Record<string_name, string>): LlmToolsConfiguration[number] | null;
74
53
  };
@@ -5,7 +5,11 @@ import type { TODO_object } from '../../../utils/organization/TODO_object';
5
5
  * This type is used to pass provider-specific options to LLM execution tools.
6
6
  *
7
7
  *
8
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
8
+ * The Promptbook LLM tools architecture involves several related types:
9
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
10
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
11
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
12
+ * - `Registered`: The record of a registered tool in the global registry
9
13
  */
10
14
  export type LlmToolsOptions = TODO_object;
11
15
  /**
@@ -21,18 +21,27 @@ export type CreateLlmToolsFromConfigurationOptions = {
21
21
  readonly userId?: string_user_id;
22
22
  };
23
23
  /**
24
- * @@@
24
+ * Creates LLM execution tools from provided configuration objects
25
+ *
26
+ * Instantiates and configures LLM tool instances for each configuration entry,
27
+ * combining them into a unified interface via MultipleLlmExecutionTools.
25
28
  *
26
29
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
27
30
  *
28
- * @returns @@@
31
+ * @param configuration Array of LLM tool configurations to instantiate
32
+ * @param options Additional options for configuring the LLM tools
33
+ * @returns A unified interface combining all successfully instantiated LLM tools
29
34
  * @public exported from `@promptbook/core`
30
35
  */
31
36
  export declare function createLlmToolsFromConfiguration(configuration: LlmToolsConfiguration, options?: CreateLlmToolsFromConfigurationOptions): MultipleLlmExecutionTools;
32
37
  /**
33
38
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
34
39
  * TODO: [🧠][🎌] Dynamically install required providers
35
- * TODO: @@@ write discussion about this - wizzard
40
+ * TODO: We should implement an interactive configuration wizard that would:
41
+ * 1. Detect which LLM providers are available in the environment
42
+ * 2. Guide users through required configuration settings for each provider
43
+ * 3. Allow testing connections before completing setup
44
+ * 4. Generate appropriate configuration code for application integration
36
45
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
37
46
  * TODO: [🧠] Is there some meaningfull way how to test this util
38
47
  * TODO: This should be maybe not under `_common` but under `utils`
@@ -1,6 +1,7 @@
1
1
  import type { PromptResult } from '../../../../execution/PromptResult';
2
2
  import type { Prompt } from '../../../../types/Prompt';
3
3
  import type { string_date_iso8601 } from '../../../../types/typeAliases';
4
+ import type { string_semantic_version } from '../../../../types/typeAliases';
4
5
  import type { string_promptbook_version } from '../../../../version';
5
6
  /**
6
7
  * Represents a single item stored in the LLM cache.
@@ -14,6 +15,10 @@ export type CacheItem = {
14
15
  * The version of the Promptbook library used when this cache item was created.
15
16
  */
16
17
  promptbookVersion?: string_promptbook_version;
18
+ /**
19
+ * The version of the Book language used when this cache item was created.
20
+ */
21
+ bookVersion?: string_semantic_version;
17
22
  /**
18
23
  * The prompt that was sent to the LLM.
19
24
  */
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Anthropic Claude models with pricing
5
5
  *
6
- * Note: Done at 2024-08-16
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://docs.anthropic.com/en/docs/models-overview
9
9
  * @public exported from `@promptbook/anthropic-claude`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Deepseek models with descriptions
5
5
  *
6
- * Note: Done at 2025-04-22
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://www.deepseek.com/models
9
9
  * @public exported from `@promptbook/deepseek`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Google models with descriptions
5
5
  *
6
- * Note: Done at 2025-04-22
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://ai.google.dev/models/gemini
9
9
  * @public exported from `@promptbook/google`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available OpenAI models with pricing
5
5
  *
6
- * Note: Done at 2024-05-20
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://platform.openai.com/docs/models/
9
9
  * @see https://openai.com/api/pricing/
@@ -10,9 +10,9 @@ import type { Registration } from '../../utils/$Register';
10
10
  */
11
11
  export declare const _OpenAiMetadataRegistration: Registration;
12
12
  /**
13
- * @@@ registration1 of default configuration for Open AI
13
+ * Registration of the OpenAI Assistant metadata
14
14
  *
15
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
15
+ * Note: [🏐] Configurations registrations are done in the metadata registration section, but the constructor registration is handled separately.
16
16
  *
17
17
  * @public exported from `@promptbook/core`
18
18
  * @public exported from `@promptbook/wizzard`
@@ -10,9 +10,9 @@ import type { Registration } from '../../utils/$Register';
10
10
  */
11
11
  export declare const _OpenAiRegistration: Registration;
12
12
  /**
13
- * @@@ registration2
13
+ * Registration of the OpenAI Assistant provider
14
14
  *
15
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
15
+ * Note: [🏐] Configurations registrations are done in register-constructor.ts BUT constructor register-constructor.ts
16
16
  *
17
17
  * @public exported from `@promptbook/openai`
18
18
  * @public exported from `@promptbook/wizzard`
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.92.0-25`).
18
+ * It follows semantic versioning (e.g., `0.92.0-27`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.92.0-26",
3
+ "version": "0.92.0-28",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.92.0-26"
54
+ "@promptbook/core": "0.92.0-28"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-26';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-28';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -887,7 +887,8 @@
887
887
  */
888
888
 
889
889
  /**
890
- * @@@
890
+ * Attempts to locate the specified application on a Linux system using the 'which' command.
891
+ * Returns the path to the executable if found, or null otherwise.
891
892
  *
892
893
  * @private within the repository
893
894
  */
@@ -954,7 +955,8 @@
954
955
  // eslint-disable-next-line @typescript-eslint/no-var-requires
955
956
  const userhome = require('userhome');
956
957
  /**
957
- * @@@
958
+ * Attempts to locate the specified application on a macOS system by checking standard application paths and using mdfind.
959
+ * Returns the path to the executable if found, or null otherwise.
958
960
  *
959
961
  * @private within the repository
960
962
  */
@@ -986,7 +988,8 @@
986
988
  */
987
989
 
988
990
  /**
989
- * @@@
991
+ * Attempts to locate the specified application on a Windows system by searching common installation directories.
992
+ * Returns the path to the executable if found, or null otherwise.
990
993
  *
991
994
  * @private within the repository
992
995
  */
@@ -1057,7 +1060,8 @@
1057
1060
  */
1058
1061
 
1059
1062
  /**
1060
- * @@@
1063
+ * Locates the LibreOffice executable on the current system by searching platform-specific paths.
1064
+ * Returns the path to the executable if found, or null otherwise.
1061
1065
  *
1062
1066
  * @private within the repository
1063
1067
  */
@@ -1075,7 +1079,8 @@
1075
1079
  */
1076
1080
 
1077
1081
  /**
1078
- * @@@
1082
+ * Locates the Pandoc executable on the current system by searching platform-specific paths.
1083
+ * Returns the path to the executable if found, or null otherwise.
1079
1084
  *
1080
1085
  * @private within the repository
1081
1086
  */
@@ -1093,7 +1098,7 @@
1093
1098
  */
1094
1099
 
1095
1100
  /**
1096
- * @@@
1101
+ * Provides paths to required executables (i.e. as Pandoc and LibreOffice) for Node.js environments.
1097
1102
  *
1098
1103
  * @public exported from `@promptbook/node`
1099
1104
  */
@@ -4442,7 +4447,7 @@
4442
4447
  }
4443
4448
 
4444
4449
  /**
4445
- * @@@
4450
+ * Contains configuration options for parsing and generating CSV files, such as delimiters and quoting rules.
4446
4451
  *
4447
4452
  * @public exported from `@promptbook/core`
4448
4453
  */
@@ -5157,8 +5162,12 @@
5157
5162
  */
5158
5163
 
5159
5164
  /**
5160
- * @@@
5165
+ * Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
5166
+ * (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
5167
+ * Throws errors if execution fails after all attempts.
5161
5168
  *
5169
+ * @param options - The options for execution, including task, parameters, pipeline, and configuration.
5170
+ * @returns The result string of the executed task.
5162
5171
  * @private internal utility of `createPipelineExecutor`
5163
5172
  */
5164
5173
  async function executeAttempts(options) {
@@ -5616,8 +5625,12 @@
5616
5625
  }
5617
5626
 
5618
5627
  /**
5619
- * @@@
5628
+ * Returns the context for a given task, typically used to provide additional information or variables
5629
+ * required for the execution of the task within a pipeline. The context is returned as a string value
5630
+ * that may include markdown formatting.
5620
5631
  *
5632
+ * @param task - The task for which the context is being generated. This should be a deeply immutable TaskJson object.
5633
+ * @returns The context as a string, formatted as markdown and parameter value.
5621
5634
  * @private internal utility of `createPipelineExecutor`
5622
5635
  */
5623
5636
  async function getContextForTask(task) {
@@ -5625,7 +5638,7 @@
5625
5638
  }
5626
5639
 
5627
5640
  /**
5628
- * @@@
5641
+ * Retrieves example values or templates for a given task, used to guide or validate pipeline execution.
5629
5642
  *
5630
5643
  * @private internal utility of `createPipelineExecutor`
5631
5644
  */
@@ -5672,9 +5685,8 @@
5672
5685
  }
5673
5686
 
5674
5687
  /**
5675
- * @@@
5676
- *
5677
- * Here is the place where RAG (retrieval-augmented generation) happens
5688
+ * Retrieves the most relevant knowledge pieces for a given task using embedding-based similarity search.
5689
+ * This is where retrieval-augmented generation (RAG) is performed to enhance the task with external knowledge.
5678
5690
  *
5679
5691
  * @private internal utility of `createPipelineExecutor`
5680
5692
  */
@@ -5893,7 +5905,8 @@
5893
5905
  */
5894
5906
 
5895
5907
  /**
5896
- * @@@
5908
+ * Filters and returns only the output parameters from the provided pipeline execution options.
5909
+ * Adds warnings for any expected output parameters that are missing.
5897
5910
  *
5898
5911
  * @private internal utility of `createPipelineExecutor`
5899
5912
  */
@@ -6425,11 +6438,16 @@
6425
6438
  */
6426
6439
 
6427
6440
  /**
6428
- * @@@
6441
+ * Creates LLM execution tools from provided configuration objects
6442
+ *
6443
+ * Instantiates and configures LLM tool instances for each configuration entry,
6444
+ * combining them into a unified interface via MultipleLlmExecutionTools.
6429
6445
  *
6430
6446
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
6431
6447
  *
6432
- * @returns @@@
6448
+ * @param configuration Array of LLM tool configurations to instantiate
6449
+ * @param options Additional options for configuring the LLM tools
6450
+ * @returns A unified interface combining all successfully instantiated LLM tools
6433
6451
  * @public exported from `@promptbook/core`
6434
6452
  */
6435
6453
  function createLlmToolsFromConfiguration(configuration, options = {}) {
@@ -6468,7 +6486,11 @@
6468
6486
  /**
6469
6487
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
6470
6488
  * TODO: [🧠][🎌] Dynamically install required providers
6471
- * TODO: @@@ write discussion about this - wizzard
6489
+ * TODO: We should implement an interactive configuration wizard that would:
6490
+ * 1. Detect which LLM providers are available in the environment
6491
+ * 2. Guide users through required configuration settings for each provider
6492
+ * 3. Allow testing connections before completing setup
6493
+ * 4. Generate appropriate configuration code for application integration
6472
6494
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
6473
6495
  * TODO: [🧠] Is there some meaningfull way how to test this util
6474
6496
  * TODO: This should be maybe not under `_common` but under `utils`