@promptbook/markdown-utils 0.92.0-26 → 0.92.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +17 -9
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  4. package/esm/typings/src/commands/FOREACH/foreachCommandParser.d.ts +0 -2
  5. package/esm/typings/src/commands/_BOILERPLATE/boilerplateCommandParser.d.ts +1 -1
  6. package/esm/typings/src/constants.d.ts +35 -0
  7. package/esm/typings/src/executables/$provideExecutablesForNode.d.ts +1 -1
  8. package/esm/typings/src/executables/apps/locateLibreoffice.d.ts +2 -1
  9. package/esm/typings/src/executables/apps/locatePandoc.d.ts +2 -1
  10. package/esm/typings/src/executables/platforms/locateAppOnLinux.d.ts +2 -1
  11. package/esm/typings/src/executables/platforms/locateAppOnMacOs.d.ts +2 -1
  12. package/esm/typings/src/executables/platforms/locateAppOnWindows.d.ts +2 -1
  13. package/esm/typings/src/execution/AbstractTaskResult.d.ts +1 -1
  14. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +2 -1
  15. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +1 -1
  16. package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +12 -9
  17. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +20 -14
  18. package/esm/typings/src/execution/createPipelineExecutor/filterJustOutputParameters.d.ts +7 -6
  19. package/esm/typings/src/execution/createPipelineExecutor/getContextForTask.d.ts +5 -1
  20. package/esm/typings/src/execution/createPipelineExecutor/getExamplesForTask.d.ts +1 -1
  21. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +8 -11
  22. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +4 -4
  23. package/esm/typings/src/execution/utils/uncertainNumber.d.ts +3 -2
  24. package/esm/typings/src/formats/csv/CsvSettings.d.ts +2 -2
  25. package/esm/typings/src/formfactors/_common/AbstractFormfactorDefinition.d.ts +16 -7
  26. package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +3 -1
  27. package/esm/typings/src/formfactors/chatbot/ChatbotFormfactorDefinition.d.ts +2 -2
  28. package/esm/typings/src/formfactors/completion/CompletionFormfactorDefinition.d.ts +1 -1
  29. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +2 -1
  30. package/esm/typings/src/formfactors/generic/GenericFormfactorDefinition.d.ts +2 -2
  31. package/esm/typings/src/formfactors/index.d.ts +5 -5
  32. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +4 -2
  33. package/esm/typings/src/formfactors/translator/TranslatorFormfactorDefinition.d.ts +3 -2
  34. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +4 -3
  35. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +17 -4
  36. package/esm/typings/src/llm-providers/_common/register/LlmToolsConfiguration.d.ts +11 -4
  37. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +21 -42
  38. package/esm/typings/src/llm-providers/_common/register/LlmToolsOptions.d.ts +5 -1
  39. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +12 -3
  40. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +5 -0
  41. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  42. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  45. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  46. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  47. package/esm/typings/src/version.d.ts +1 -1
  48. package/package.json +1 -1
  49. package/umd/index.umd.js +17 -9
  50. package/umd/index.umd.js.map +1 -1
@@ -1,11 +1,13 @@
1
1
  /**
2
- * Matcher is form of app that @@@
2
+ * Matcher is form of app that evaluates (spreadsheet) content against defined criteria or patterns,
3
+ * determining if it matches or meets specific requirements. Used for classification,
4
+ * validation, filtering, and quality assessment of inputs.
3
5
  *
4
6
  * @public exported from `@promptbook/core`
5
7
  */
6
8
  export declare const MatcherFormfactorDefinition: {
7
9
  readonly name: "EXPERIMENTAL_MATCHER";
8
- readonly description: "@@@";
10
+ readonly description: "An evaluation system that determines whether content meets specific criteria or patterns.\n Used for content validation, quality assessment, and intelligent filtering tasks. Currently in experimental phase.";
9
11
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/177";
10
12
  readonly pipelineInterface: {
11
13
  readonly inputParameters: readonly [{
@@ -1,11 +1,12 @@
1
1
  /**
2
- * Translator is form of app that @@@
2
+ * Translator is form of app that transforms input text from one form to another,
3
+ * such as language translation, style conversion, tone modification, or other text transformations.
3
4
  *
4
5
  * @public exported from `@promptbook/core`
5
6
  */
6
7
  export declare const TranslatorFormfactorDefinition: {
7
8
  readonly name: "TRANSLATOR";
8
- readonly description: "@@@";
9
+ readonly description: "A text transformation system that converts input content into different forms,\n including language translations, paraphrasing, style conversions, and tone adjustments.\n This form factor takes one input and produces one transformed output.";
9
10
  readonly documentationUrl: "https://github.com/webgptorg/promptbook/discussions/175";
10
11
  readonly pipelineInterface: {
11
12
  readonly inputParameters: readonly [{
@@ -3,7 +3,8 @@ import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage
3
3
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
4
4
  type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromConfigurationOptions & {
5
5
  /**
6
- * @@@
6
+ * Flag indicating whether the cache should be reloaded or reused
7
+ * When set to true, the existing cache will not be used but thinks will be still saved to the cache
7
8
  *
8
9
  * @default false
9
10
  */
@@ -22,5 +23,5 @@ export {};
22
23
  * Note: [⚪] This should never be in any released package
23
24
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
24
25
  * TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
25
- * TODO: [®] DRY Register logic
26
- */
26
+ * TODO: [®] DRY Register logi
27
+ */
@@ -1,11 +1,14 @@
1
1
  import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
2
2
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
3
3
  /**
4
- * @@@
4
+ * Automatically configures LLM tools from environment variables in Node.js
5
+ *
6
+ * This utility function detects available LLM providers based on environment variables
7
+ * and creates properly configured LLM execution tools for each detected provider.
5
8
  *
6
9
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7
10
  *
8
- * @@@ .env
11
+ * Supports environment variables from .env files when dotenv is configured
9
12
  * Note: `$` is used to indicate that this function is not a pure function - it uses filesystem to access `.env` file
10
13
  *
11
14
  * It looks for environment variables:
@@ -13,12 +16,22 @@ import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFro
13
16
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
14
17
  * - ...
15
18
  *
16
- * @returns @@@
19
+ * @param options Configuration options for the LLM tools
20
+ * @returns A unified interface containing all detected and configured LLM tools
17
21
  * @public exported from `@promptbook/node`
18
22
  */
19
23
  export declare function $provideLlmToolsFromEnv(options?: CreateLlmToolsFromConfigurationOptions): Promise<MultipleLlmExecutionTools>;
20
24
  /**
21
- * TODO: @@@ write `$provideLlmToolsFromEnv` vs `$provideLlmToolsConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
25
+ * TODO: The architecture for LLM tools configuration consists of three key functions:
26
+ * 1. `$provideLlmToolsFromEnv` - High-level function that detects available providers from env vars and returns ready-to-use LLM tools
27
+ * 2. `$provideLlmToolsConfigurationFromEnv` - Middle layer that extracts configuration objects from environment variables
28
+ * 3. `createLlmToolsFromConfiguration` - Low-level function that instantiates LLM tools from explicit configuration
29
+ *
30
+ * This layered approach allows flexibility in how tools are configured:
31
+ * - Use $provideLlmToolsFromEnv for automatic detection and setup in Node.js environments
32
+ * - Use $provideLlmToolsConfigurationFromEnv to extract config objects for modification before instantiation
33
+ * - Use createLlmToolsFromConfiguration for explicit control over tool configurations
34
+ *
22
35
  * TODO: [🧠][🍛] Which name is better `$provideLlmToolsFromEnv` or `$provideLlmToolsFromEnvironment`?
23
36
  * TODO: [🧠] Is there some meaningfull way how to test this util
24
37
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
@@ -2,17 +2,24 @@ import type { string_title } from '../../../types/typeAliases';
2
2
  import type { Registered } from '../../../utils/$Register';
3
3
  import type { LlmToolsOptions } from './LlmToolsOptions';
4
4
  /**
5
- * @@@
5
+ * Configuration definition for LLM execution tools, containing provider-specific settings
6
+ * that can be passed during runtime to instantiate and configure LLM tools properly.
6
7
  *
7
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
8
+ * The Promptbook LLM tools architecture involves several related types:
9
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
10
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
11
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
12
+ * - `Registered`: The record of a registered tool in the global registry
8
13
  */
9
14
  export type LlmToolsConfiguration = ReadonlyArray<Registered & {
10
15
  /**
11
- * @@@
16
+ * Human-readable name for this specific provider configuration
17
+ * Used in UI components and logs for identifying this particular configuration
12
18
  */
13
19
  readonly title: string_title;
14
20
  /**
15
- * @@@
21
+ * Provider-specific configuration options used for instantiating and configuring LLM tools
22
+ * Contains values like API keys, model preferences, endpoint URLs, and other settings
16
23
  */
17
24
  readonly options: LlmToolsOptions;
18
25
  }>;
@@ -1,61 +1,34 @@
1
+ import { MODEL_ORDERS } from '../../../constants';
2
+ import { MODEL_TRUST_LEVELS } from '../../../constants';
1
3
  import type { string_name } from '../../../types/typeAliases';
2
4
  import type { string_title } from '../../../types/typeAliases';
3
5
  import type { Registered } from '../../../utils/$Register';
4
6
  import type { string_SCREAMING_CASE } from '../../../utils/normalization/normalizeTo_SCREAMING_CASE';
5
7
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
6
8
  /**
7
- * How is the model provider trusted?
9
+ * Metadata definition for LLM execution tools that provides information about a provider's capabilities,
10
+ * configuration options, and relationships within the registry system.
8
11
  *
9
- * @public exported from `@promptbook/core`
10
- */
11
- export declare const MODEL_TRUST_LEVEL: {
12
- readonly FULL: "Model is running on the local machine, training data and model weights are known, data are ethically sourced";
13
- readonly OPEN: "Model is open source, training data and model weights are known";
14
- readonly PARTIALLY_OPEN: "Model is open source, but training data and model weights are not (fully) known";
15
- readonly CLOSED_LOCAL: "Model can be run locally, but it is not open source";
16
- readonly CLOSED_FREE: "Model is behind API gateway but free to use";
17
- readonly CLOSED_BUSINESS: "Model is behind API gateway and paid but has good SLA, TOS, privacy policy and in general is a good to use in business applications";
18
- readonly CLOSED: "Model is behind API gateway and paid";
19
- readonly UNTRUSTED: "Model has questions about the training data and ethics, but it is not known if it is a problem or not";
20
- readonly VURNABLE: "Model has some known serious vulnerabilities, leaks, ethical problems, etc.";
21
- };
22
- /**
23
- * How is the model provider important?
24
- *
25
- * @public exported from `@promptbook/core`
26
- */
27
- export declare const MODEL_ORDER: {
28
- /**
29
- * Top-tier models, e.g. OpenAI, Anthropic,...
30
- */
31
- readonly TOP_TIER: 333;
32
- /**
33
- * Mid-tier models, e.g. Llama, Mistral, etc.
34
- */
35
- readonly NORMAL: 100;
36
- /**
37
- * Low-tier models, e.g. Phi, Tiny, etc.
38
- */
39
- readonly LOW_TIER: 0;
40
- };
41
- /**
42
- * @@@
43
- *
44
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
12
+ * The Promptbook LLM tools architecture involves several related types:
13
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
14
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
15
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
16
+ * - `Registered`: The record of a registered tool in the global registry
45
17
  */
46
18
  export type LlmToolsMetadata = Registered & {
47
19
  /**
48
- * @@@
20
+ * Human-readable display name for the LLM provider
21
+ * Used in UI components and documentation references
49
22
  */
50
23
  readonly title: string_title;
51
24
  /**
52
25
  * How is the model is trusted?
53
26
  */
54
- readonly trustLevel: keyof typeof MODEL_TRUST_LEVEL;
27
+ readonly trustLevel: keyof typeof MODEL_TRUST_LEVELS;
55
28
  /**
56
29
  * How is the model provider important and should be sorted in the list of available providers?
57
30
  */
58
- readonly order: typeof MODEL_ORDER[keyof typeof MODEL_ORDER] | number;
31
+ readonly order: typeof MODEL_ORDERS[keyof typeof MODEL_ORDERS] | number;
59
32
  /**
60
33
  * List of environment variables that can be used to configure the provider
61
34
  *
@@ -64,11 +37,17 @@ export type LlmToolsMetadata = Registered & {
64
37
  */
65
38
  readonly envVariables: ReadonlyArray<string_name & string_SCREAMING_CASE> | null;
66
39
  /**
67
- * @@@
40
+ * Provides a default configuration template for this LLM provider
41
+ * Used to generate example configurations or as fallback when no specific configuration is provided
42
+ * @returns A standardized configuration object for this LLM provider
68
43
  */
69
44
  getBoilerplateConfiguration(): LlmToolsConfiguration[number];
70
45
  /**
71
- * @@@
46
+ * Creates a provider-specific configuration object from environment variables
47
+ * Used to automatically configure LLM tools based on available environment settings
48
+ *
49
+ * @param env Dictionary of environment variables (key-value pairs)
50
+ * @returns Configuration object for this LLM provider if required variables are present, or null if configuration is not possible
72
51
  */
73
52
  createConfigurationFromEnv(env: Record<string_name, string>): LlmToolsConfiguration[number] | null;
74
53
  };
@@ -5,7 +5,11 @@ import type { TODO_object } from '../../../utils/organization/TODO_object';
5
5
  * This type is used to pass provider-specific options to LLM execution tools.
6
6
  *
7
7
  *
8
- * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
8
+ * The Promptbook LLM tools architecture involves several related types:
9
+ * - `LlmToolsMetadata`: Contains static metadata about the tool, such as name, version, and capabilities
10
+ * - `LlmToolsConfiguration`: Runtime configuration from environment variables or settings
11
+ * - `LlmToolsOptions`: Provider-specific options for instantiating tools
12
+ * - `Registered`: The record of a registered tool in the global registry
9
13
  */
10
14
  export type LlmToolsOptions = TODO_object;
11
15
  /**
@@ -21,18 +21,27 @@ export type CreateLlmToolsFromConfigurationOptions = {
21
21
  readonly userId?: string_user_id;
22
22
  };
23
23
  /**
24
- * @@@
24
+ * Creates LLM execution tools from provided configuration objects
25
+ *
26
+ * Instantiates and configures LLM tool instances for each configuration entry,
27
+ * combining them into a unified interface via MultipleLlmExecutionTools.
25
28
  *
26
29
  * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
27
30
  *
28
- * @returns @@@
31
+ * @param configuration Array of LLM tool configurations to instantiate
32
+ * @param options Additional options for configuring the LLM tools
33
+ * @returns A unified interface combining all successfully instantiated LLM tools
29
34
  * @public exported from `@promptbook/core`
30
35
  */
31
36
  export declare function createLlmToolsFromConfiguration(configuration: LlmToolsConfiguration, options?: CreateLlmToolsFromConfigurationOptions): MultipleLlmExecutionTools;
32
37
  /**
33
38
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
34
39
  * TODO: [🧠][🎌] Dynamically install required providers
35
- * TODO: @@@ write discussion about this - wizzard
40
+ * TODO: We should implement an interactive configuration wizard that would:
41
+ * 1. Detect which LLM providers are available in the environment
42
+ * 2. Guide users through required configuration settings for each provider
43
+ * 3. Allow testing connections before completing setup
44
+ * 4. Generate appropriate configuration code for application integration
36
45
  * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
37
46
  * TODO: [🧠] Is there some meaningfull way how to test this util
38
47
  * TODO: This should be maybe not under `_common` but under `utils`
@@ -1,6 +1,7 @@
1
1
  import type { PromptResult } from '../../../../execution/PromptResult';
2
2
  import type { Prompt } from '../../../../types/Prompt';
3
3
  import type { string_date_iso8601 } from '../../../../types/typeAliases';
4
+ import type { string_semantic_version } from '../../../../types/typeAliases';
4
5
  import type { string_promptbook_version } from '../../../../version';
5
6
  /**
6
7
  * Represents a single item stored in the LLM cache.
@@ -14,6 +15,10 @@ export type CacheItem = {
14
15
  * The version of the Promptbook library used when this cache item was created.
15
16
  */
16
17
  promptbookVersion?: string_promptbook_version;
18
+ /**
19
+ * The version of the Book language used when this cache item was created.
20
+ */
21
+ bookVersion?: string_semantic_version;
17
22
  /**
18
23
  * The prompt that was sent to the LLM.
19
24
  */
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Anthropic Claude models with pricing
5
5
  *
6
- * Note: Done at 2024-08-16
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://docs.anthropic.com/en/docs/models-overview
9
9
  * @public exported from `@promptbook/anthropic-claude`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Deepseek models with descriptions
5
5
  *
6
- * Note: Done at 2025-04-22
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://www.deepseek.com/models
9
9
  * @public exported from `@promptbook/deepseek`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Google models with descriptions
5
5
  *
6
- * Note: Done at 2025-04-22
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://ai.google.dev/models/gemini
9
9
  * @public exported from `@promptbook/google`
@@ -3,7 +3,7 @@ import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available OpenAI models with pricing
5
5
  *
6
- * Note: Done at 2024-05-20
6
+ * Note: Done at 2025-05-06
7
7
  *
8
8
  * @see https://platform.openai.com/docs/models/
9
9
  * @see https://openai.com/api/pricing/
@@ -10,9 +10,9 @@ import type { Registration } from '../../utils/$Register';
10
10
  */
11
11
  export declare const _OpenAiMetadataRegistration: Registration;
12
12
  /**
13
- * @@@ registration1 of default configuration for Open AI
13
+ * Registration of the OpenAI Assistant metadata
14
14
  *
15
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
15
+ * Note: [🏐] Configurations registrations are done in the metadata registration section, but the constructor registration is handled separately.
16
16
  *
17
17
  * @public exported from `@promptbook/core`
18
18
  * @public exported from `@promptbook/wizzard`
@@ -10,9 +10,9 @@ import type { Registration } from '../../utils/$Register';
10
10
  */
11
11
  export declare const _OpenAiRegistration: Registration;
12
12
  /**
13
- * @@@ registration2
13
+ * Registration of the OpenAI Assistant provider
14
14
  *
15
- * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
15
+ * Note: [🏐] Configurations registrations are done in register-constructor.ts BUT constructor register-constructor.ts
16
16
  *
17
17
  * @public exported from `@promptbook/openai`
18
18
  * @public exported from `@promptbook/wizzard`
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.92.0-25`).
18
+ * It follows semantic versioning (e.g., `0.92.0-27`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/markdown-utils",
3
- "version": "0.92.0-26",
3
+ "version": "0.92.0-28",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-26';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-28';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4180,7 +4180,7 @@
4180
4180
  }
4181
4181
 
4182
4182
  /**
4183
- * @@@
4183
+ * Contains configuration options for parsing and generating CSV files, such as delimiters and quoting rules.
4184
4184
  *
4185
4185
  * @public exported from `@promptbook/core`
4186
4186
  */
@@ -4776,8 +4776,12 @@
4776
4776
  */
4777
4777
 
4778
4778
  /**
4779
- * @@@
4779
+ * Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
4780
+ * (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
4781
+ * Throws errors if execution fails after all attempts.
4780
4782
  *
4783
+ * @param options - The options for execution, including task, parameters, pipeline, and configuration.
4784
+ * @returns The result string of the executed task.
4781
4785
  * @private internal utility of `createPipelineExecutor`
4782
4786
  */
4783
4787
  async function executeAttempts(options) {
@@ -5235,8 +5239,12 @@
5235
5239
  }
5236
5240
 
5237
5241
  /**
5238
- * @@@
5242
+ * Returns the context for a given task, typically used to provide additional information or variables
5243
+ * required for the execution of the task within a pipeline. The context is returned as a string value
5244
+ * that may include markdown formatting.
5239
5245
  *
5246
+ * @param task - The task for which the context is being generated. This should be a deeply immutable TaskJson object.
5247
+ * @returns The context as a string, formatted as markdown and parameter value.
5240
5248
  * @private internal utility of `createPipelineExecutor`
5241
5249
  */
5242
5250
  async function getContextForTask(task) {
@@ -5244,7 +5252,7 @@
5244
5252
  }
5245
5253
 
5246
5254
  /**
5247
- * @@@
5255
+ * Retrieves example values or templates for a given task, used to guide or validate pipeline execution.
5248
5256
  *
5249
5257
  * @private internal utility of `createPipelineExecutor`
5250
5258
  */
@@ -5291,9 +5299,8 @@
5291
5299
  }
5292
5300
 
5293
5301
  /**
5294
- * @@@
5295
- *
5296
- * Here is the place where RAG (retrieval-augmented generation) happens
5302
+ * Retrieves the most relevant knowledge pieces for a given task using embedding-based similarity search.
5303
+ * This is where retrieval-augmented generation (RAG) is performed to enhance the task with external knowledge.
5297
5304
  *
5298
5305
  * @private internal utility of `createPipelineExecutor`
5299
5306
  */
@@ -5512,7 +5519,8 @@
5512
5519
  */
5513
5520
 
5514
5521
  /**
5515
- * @@@
5522
+ * Filters and returns only the output parameters from the provided pipeline execution options.
5523
+ * Adds warnings for any expected output parameters that are missing.
5516
5524
  *
5517
5525
  * @private internal utility of `createPipelineExecutor`
5518
5526
  */