@promptbook/core 0.68.3 → 0.68.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +32 -31
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  6. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  7. package/esm/typings/src/config.d.ts +2 -2
  8. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  9. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  10. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  11. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  12. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -1
  15. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +2 -1
  20. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  23. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  24. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  25. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  26. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  27. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  28. package/package.json +1 -1
  29. package/umd/index.umd.js +32 -31
  30. package/umd/index.umd.js.map +1 -1
  31. package/esm/typings/src/personas/preparePersona.test.d.ts +0 -1
@@ -1,7 +1,6 @@
1
1
  declare const _default: ({
2
2
  title: string;
3
3
  pipelineUrl: string;
4
- promptbookVersion: string;
5
4
  parameters: {
6
5
  name: string;
7
6
  description: string;
@@ -24,7 +23,6 @@ declare const _default: ({
24
23
  } | {
25
24
  title: string;
26
25
  pipelineUrl: string;
27
- promptbookVersion: string;
28
26
  parameters: {
29
27
  name: string;
30
28
  description: string;
@@ -53,7 +51,6 @@ declare const _default: ({
53
51
  } | {
54
52
  title: string;
55
53
  pipelineUrl: string;
56
- promptbookVersion: string;
57
54
  parameters: {
58
55
  name: string;
59
56
  description: string;
@@ -6,7 +6,7 @@ import type { Command as Program } from 'commander';
6
6
  */
7
7
  export declare function initializeMakeCommand(program: Program): void;
8
8
  /**
9
- * TODO: [🥃] !!! Allow `ptbk make` without configuring any llm tools
9
+ * TODO: [🥃][main] !!! Allow `ptbk make` without configuring any llm tools
10
10
  * TODO: Maybe remove this command - "about" command should be enough?
11
11
  * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types
12
12
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -25,5 +25,5 @@ type CreatePipelineCollectionFromUrlyOptions = {
25
25
  export declare function createCollectionFromUrl(url: string_url | URL, options: CreatePipelineCollectionFromUrlyOptions): Promise<PipelineCollection>;
26
26
  export {};
27
27
  /**
28
- * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
28
+ * TODO:[main] !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
29
29
  */
@@ -68,14 +68,14 @@ export declare const MAX_EXECUTION_ATTEMPTS = 3;
68
68
  export declare const MAX_FILENAME_LENGTH = 30;
69
69
  /**
70
70
  * @@@
71
- * TODO: [🐝] !!! Use
71
+ * TODO: [🐝][main] !!! Use
72
72
  *
73
73
  * @public exported from `@promptbook/core`
74
74
  */
75
75
  export declare const MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH = 3;
76
76
  /**
77
77
  * @@@
78
- * TODO: [🐝] !!! Use
78
+ * TODO: [🐝][main] !!! Use
79
79
  *
80
80
  * @public exported from `@promptbook/core`
81
81
  */
@@ -18,7 +18,7 @@ import type { PipelineString } from '../types/PipelineString';
18
18
  */
19
19
  export declare function pipelineStringToJsonSync(pipelineString: PipelineString): PipelineJson;
20
20
  /**
21
- * TODO: !!!! Warn if used only sync version
21
+ * TODO:[main] !!!! Warn if used only sync version
22
22
  * TODO: [🚞] Report here line/column of error
23
23
  * TODO: Use spaceTrim more effectively
24
24
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -27,11 +27,11 @@ export declare function validatePipeline(pipeline: PipelineJson): PipelineJson;
27
27
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
28
28
  */
29
29
  /**
30
- * TODO: [🐣] !!!! Validate that all samples match expectations
31
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
32
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
33
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
34
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
30
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
31
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
32
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
33
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
34
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
35
35
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
36
36
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
37
37
  */
@@ -58,7 +58,7 @@ interface CreatePipelineExecutorOptions {
58
58
  export declare function createPipelineExecutor(options: CreatePipelineExecutorOptions): PipelineExecutor;
59
59
  export {};
60
60
  /**
61
- * TODO: !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
61
+ * TODO:[main] !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
62
62
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
63
63
  * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
64
64
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
@@ -8,7 +8,7 @@ import type { string_markdown } from '../../../types/typeAliases';
8
8
  */
9
9
  export declare function prepareKnowledgeFromMarkdown(knowledgeContent: string_markdown, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
10
10
  /**
11
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
11
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
12
12
  * TODO: [🪂] Do it in parallel 11:11
13
13
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
14
14
  */
@@ -8,7 +8,7 @@ import type { string_base64 } from '../../../types/typeAliases';
8
8
  */
9
9
  export declare function prepareKnowledgeFromPdf(content: string_base64, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
10
10
  /**
11
- * TODO: [🐝][🔼] !!! Export via `@promptbook/pdf`
11
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/pdf`
12
12
  * TODO: [🧺] In future, content can be alse File or Blob BUT for now for wider compatibility its only base64
13
13
  * @see https://stackoverflow.com/questions/14653349/node-js-cant-create-blobs
14
14
  * TODO: [🪂] Do it in parallel
@@ -13,7 +13,7 @@ export type CacheItem = {
13
13
  /**
14
14
  * @@@
15
15
  */
16
- promptbookVersion: string_promptbook_version;
16
+ promptbookVersion?: string_promptbook_version;
17
17
  /**
18
18
  * @@@
19
19
  */
@@ -1,3 +1,4 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -26,7 +27,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
26
27
  constructor(options?: AnthropicClaudeExecutionToolsDirectOptions);
27
28
  get title(): string_title & string_markdown_text;
28
29
  get description(): string_markdown;
29
- private getClient;
30
+ getClient(): Promise<Anthropic>;
30
31
  /**
31
32
  * Check the `options` passed to `constructor`
32
33
  */
@@ -16,7 +16,7 @@ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * Note: [🤖] Add models of new variant
19
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
19
+ * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
20
20
  * TODO: [🧠] Some mechanism to propagate unsureness
21
21
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
22
22
  * TODO: [🎰] Some mechanism to auto-update available models
@@ -11,8 +11,8 @@ export declare const createAnthropicClaudeExecutionTools: ((options: AnthropicCl
11
11
  className: string;
12
12
  };
13
13
  /**
14
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
15
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
14
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
15
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
16
16
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
17
17
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
18
18
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
- * TODO: !!! Playground with WebGPT / Promptbook.studio anonymous server
5
- * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
4
+ * TODO:[main] !!! Playground with WebGPT / Promptbook.studio anonymous server
5
+ * TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
6
6
  */
@@ -1,3 +1,4 @@
1
+ import { OpenAIClient } from '@azure/openai';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -26,7 +27,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
26
27
  constructor(options: AzureOpenAiExecutionToolsOptions);
27
28
  get title(): string_title & string_markdown_text;
28
29
  get description(): string_markdown;
29
- private getClient;
30
+ getClient(): Promise<OpenAIClient>;
30
31
  /**
31
32
  * Check the `options` passed to `constructor`
32
33
  */
@@ -1,3 +1,4 @@
1
+ import OpenAI from 'openai';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -27,7 +28,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
27
28
  constructor(options?: OpenAiExecutionToolsOptions);
28
29
  get title(): string_title & string_markdown_text;
29
30
  get description(): string_markdown;
30
- private getClient;
31
+ getClient(): Promise<OpenAI>;
31
32
  /**
32
33
  * Check the `options` passed to `constructor`
33
34
  */
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
- * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
4
+ * TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
5
  */
@@ -9,7 +9,7 @@ import type { string_persona_description } from '../types/typeAliases';
9
9
  */
10
10
  export declare function preparePersona(personaDescription: string_persona_description, options: PrepareOptions): Promise<PersonaPreparedJson['modelRequirements']>;
11
11
  /**
12
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
12
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
13
13
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
14
14
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
15
15
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -6,7 +6,7 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
6
6
  */
7
7
  export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
8
8
  /**
9
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
9
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
10
10
  * TODO: [🐠] Maybe base this on `makeValidator`
11
11
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
12
12
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -23,7 +23,7 @@ export {};
23
23
  /**
24
24
  * TODO: [🧠] Add context to each template (if missing)
25
25
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
26
- * TODO: [♨] !!! Prepare index the samples and maybe templates
26
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
27
27
  * TODO: Write tests for `preparePipeline`
28
28
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
29
29
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -42,7 +42,7 @@ export type PipelineJson = {
42
42
  /**
43
43
  * Version of the .ptbk.json file
44
44
  */
45
- readonly promptbookVersion: string_semantic_version;
45
+ readonly promptbookVersion?: string_semantic_version;
46
46
  /**
47
47
  * Description of the promptbook
48
48
  * It can use multiple paragraphs of simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
@@ -592,7 +592,7 @@ export type number_megabytes = number_positive;
592
592
  export type number_gigabytes = number_positive;
593
593
  export type number_terabytes = number_positive;
594
594
  /**.
595
- * TODO: !!! Change "For example" to @example
595
+ * TODO:[main] !!! Change "For example" to @example
596
596
  * TODO: !! Change to branded types
597
597
  * TODO: Delete type aliases that are not exported or used internally
598
598
  */
@@ -22,6 +22,6 @@ import type { string_name } from '../../types/typeAliases';
22
22
  export declare function checkSerializableAsJson(name: string_name, value: unknown): void;
23
23
  /**
24
24
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
25
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
25
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
26
26
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
27
27
  */
@@ -19,6 +19,6 @@
19
19
  */
20
20
  export declare function isSerializableAsJson(value: unknown): boolean;
21
21
  /**
22
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
22
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
23
23
  * TODO: [🧠][💺] Can be done this on type-level?
24
24
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/core",
3
- "version": "0.68.3",
3
+ "version": "0.68.5",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -16,8 +16,8 @@
16
16
  /**
17
17
  * The version of the Promptbook library
18
18
  */
19
- var PROMPTBOOK_VERSION = '0.68.2';
20
- // TODO: !!!! List here all the versions and annotate + put into script
19
+ var PROMPTBOOK_VERSION = '0.68.4';
20
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
21
21
 
22
22
  /*! *****************************************************************************
23
23
  Copyright (c) Microsoft Corporation.
@@ -229,7 +229,7 @@
229
229
  commands.push("PIPELINE URL ".concat(pipelineUrl));
230
230
  }
231
231
  commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
232
- // TODO: !!! This increase size of the bundle and is probbably not necessary
232
+ // TODO:[main] !!! This increase size of the bundle and is probbably not necessary
233
233
  pipelineString = prettifyMarkdown(pipelineString);
234
234
  try {
235
235
  for (var _g = __values(parameters.filter(function (_a) {
@@ -377,12 +377,12 @@
377
377
  pipelineString += '```' + contentLanguage;
378
378
  pipelineString += '\n';
379
379
  pipelineString += spaceTrim__default["default"](content);
380
- // <- TODO: !!! Escape
380
+ // <- TODO:[main] !!! Escape
381
381
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
382
382
  pipelineString += '\n';
383
383
  pipelineString += '```';
384
384
  pipelineString += '\n\n';
385
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: !!! If the parameter here has description, add it and use templateParameterJsonToString
385
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO:[main] !!! If the parameter here has description, add it and use templateParameterJsonToString
386
386
  }
387
387
  }
388
388
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -602,7 +602,7 @@
602
602
  }
603
603
  /**
604
604
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
605
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
605
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
606
606
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
607
607
  */
608
608
 
@@ -673,14 +673,14 @@
673
673
  var MAX_FILENAME_LENGTH = 30;
674
674
  /**
675
675
  * @@@
676
- * TODO: [🐝] !!! Use
676
+ * TODO: [🐝][main] !!! Use
677
677
  *
678
678
  * @public exported from `@promptbook/core`
679
679
  */
680
680
  var MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH = 3;
681
681
  /**
682
682
  * @@@
683
- * TODO: [🐝] !!! Use
683
+ * TODO: [🐝][main] !!! Use
684
684
  *
685
685
  * @public exported from `@promptbook/core`
686
686
  */
@@ -835,7 +835,7 @@
835
835
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
836
836
  return false;
837
837
  }
838
- // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
838
+ // <- TODO:[main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
839
839
  return true;
840
840
  }
841
841
 
@@ -984,7 +984,7 @@
984
984
  // <- Note: [🚲]
985
985
  throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
986
986
  }
987
- if (!isValidPromptbookVersion(pipeline.promptbookVersion)) {
987
+ if (pipeline.promptbookVersion !== undefined && !isValidPromptbookVersion(pipeline.promptbookVersion)) {
988
988
  // <- Note: [🚲]
989
989
  throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
990
990
  }
@@ -1179,11 +1179,11 @@
1179
1179
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1180
1180
  */
1181
1181
  /**
1182
- * TODO: [🐣] !!!! Validate that all samples match expectations
1183
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1184
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1185
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1186
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1182
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
1183
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
1184
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
1185
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1186
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
1187
1187
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1188
1188
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1189
1189
  */
@@ -1497,7 +1497,7 @@
1497
1497
  });
1498
1498
  }
1499
1499
  /**
1500
- * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
1500
+ * TODO:[main] !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
1501
1501
  */
1502
1502
 
1503
1503
  /**
@@ -1794,7 +1794,7 @@
1794
1794
  });
1795
1795
  }
1796
1796
 
1797
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1797
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1798
1798
 
1799
1799
  var defaultDiacriticsRemovalMap = [
1800
1800
  {
@@ -2902,7 +2902,7 @@
2902
2902
  return true;
2903
2903
  }
2904
2904
  /**
2905
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2905
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2906
2906
  * TODO: [🐠] Maybe base this on `makeValidator`
2907
2907
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2908
2908
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -3303,7 +3303,7 @@
3303
3303
  console.warn(spaceTrim.spaceTrim(function (block) { return "\n Pipeline is not prepared\n\n ".concat(block(pipelineIdentification), "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n "); }));
3304
3304
  }
3305
3305
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
3306
- // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3306
+ // TODO:[main] !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3307
3307
  function getContextForTemplate(template) {
3308
3308
  return __awaiter(this, void 0, void 0, function () {
3309
3309
  return __generator(this, function (_a) {
@@ -4122,7 +4122,7 @@
4122
4122
  return pipelineExecutor;
4123
4123
  }
4124
4124
  /**
4125
- * TODO: !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
4125
+ * TODO:[main] !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
4126
4126
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
4127
4127
  * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
4128
4128
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
@@ -4185,7 +4185,7 @@
4185
4185
  outputParameters = result.outputParameters;
4186
4186
  knowledgePiecesRaw = outputParameters.knowledgePieces;
4187
4187
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
4188
- // <- TODO: !!!!! Smarter split and filter out empty pieces
4188
+ // <- TODO:[main] !!!!! Smarter split and filter out empty pieces
4189
4189
  if (isVerbose) {
4190
4190
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
4191
4191
  }
@@ -4265,7 +4265,7 @@
4265
4265
  });
4266
4266
  }
4267
4267
  /**
4268
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
4268
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
4269
4269
  * TODO: [🪂] Do it in parallel 11:11
4270
4270
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
4271
4271
  */
@@ -4289,7 +4289,7 @@
4289
4289
  var partialPieces, pieces;
4290
4290
  return __generator(this, function (_a) {
4291
4291
  switch (_a.label) {
4292
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4292
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
4293
4293
  options)];
4294
4294
  case 1:
4295
4295
  partialPieces = _a.sent();
@@ -4481,7 +4481,7 @@
4481
4481
  });
4482
4482
  }
4483
4483
  /**
4484
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4484
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4485
4485
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
4486
4486
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
4487
4487
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -4530,7 +4530,7 @@
4530
4530
  case 0:
4531
4531
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
4532
4532
  templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
4533
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
4533
+ // TODO:[main] !!!!! Apply samples to each template (if missing and is for the template defined)
4534
4534
  TODO_USE(parameters);
4535
4535
  templatesPrepared = new Array(
4536
4536
  // <- TODO: [🧱] Implement in a functional (not new Class) way
@@ -4562,7 +4562,7 @@
4562
4562
  /**
4563
4563
  * TODO: [🧠] Add context to each template (if missing)
4564
4564
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
4565
- * TODO: [♨] !!! Prepare index the samples and maybe templates
4565
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
4566
4566
  * TODO: Write tests for `preparePipeline`
4567
4567
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
4568
4568
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -4734,7 +4734,7 @@
4734
4734
  if (sourceContent === '') {
4735
4735
  throw new ParseError("Source is not defined");
4736
4736
  }
4737
- // TODO: !!!! Following checks should be applied every link in the `sourceContent`
4737
+ // TODO:[main] !!!! Following checks should be applied every link in the `sourceContent`
4738
4738
  if (sourceContent.startsWith('http://')) {
4739
4739
  throw new ParseError("Source is not secure");
4740
4740
  }
@@ -4919,7 +4919,7 @@
4919
4919
  if (command.templateType === 'KNOWLEDGE') {
4920
4920
  knowledgeCommandParser.$applyToPipelineJson({
4921
4921
  type: 'KNOWLEDGE',
4922
- sourceContent: $templateJson.content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4922
+ sourceContent: $templateJson.content, // <- TODO: [🐝][main] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4923
4923
  }, $pipelineJson);
4924
4924
  $templateJson.isTemplate = false;
4925
4925
  return;
@@ -5961,6 +5961,7 @@
5961
5961
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
5962
5962
  */
5963
5963
  $applyToPipelineJson: function (command, $pipelineJson) {
5964
+ // TODO: Warn if the version is overridden
5964
5965
  $pipelineJson.promptbookVersion = command.promptbookVersion;
5965
5966
  },
5966
5967
  /**
@@ -6850,7 +6851,7 @@
6850
6851
  var $pipelineJson = {
6851
6852
  title: undefined /* <- Note: [🍙] Putting here placeholder to keep `title` on top at final JSON */,
6852
6853
  pipelineUrl: undefined /* <- Note: Putting here placeholder to keep `pipelineUrl` on top at final JSON */,
6853
- promptbookVersion: PROMPTBOOK_VERSION,
6854
+ promptbookVersion: undefined /* <- Note: By default no explicit version */,
6854
6855
  description: undefined /* <- Note: [🍙] Putting here placeholder to keep `description` on top at final JSON */,
6855
6856
  parameters: [],
6856
6857
  templates: [],
@@ -7141,7 +7142,7 @@
7141
7142
  return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
7142
7143
  }
7143
7144
  /**
7144
- * TODO: !!!! Warn if used only sync version
7145
+ * TODO:[main] !!!! Warn if used only sync version
7145
7146
  * TODO: [🚞] Report here line/column of error
7146
7147
  * TODO: Use spaceTrim more effectively
7147
7148
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -7416,7 +7417,7 @@
7416
7417
  }
7417
7418
  }
7418
7419
  /**
7419
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
7420
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
7420
7421
  * TODO: [🧠][💺] Can be done this on type-level?
7421
7422
  */
7422
7423