@promptbook/node 0.68.3 → 0.68.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +29 -28
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +0 -3
  5. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  6. package/esm/typings/src/collection/constructors/createCollectionFromUrl.d.ts +1 -1
  7. package/esm/typings/src/config.d.ts +2 -2
  8. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -1
  9. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +5 -5
  10. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  11. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  12. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -1
  15. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +2 -1
  20. package/esm/typings/src/llm-providers/openai/playground/playground.d.ts +1 -1
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -1
  23. package/esm/typings/src/prepare/prepareTemplates.d.ts +1 -1
  24. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  25. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  26. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +1 -1
  27. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +1 -1
  28. package/package.json +2 -2
  29. package/umd/index.umd.js +29 -28
  30. package/umd/index.umd.js.map +1 -1
  31. package/esm/typings/src/personas/preparePersona.test.d.ts +0 -1
@@ -1,7 +1,6 @@
1
1
  declare const _default: ({
2
2
  title: string;
3
3
  pipelineUrl: string;
4
- promptbookVersion: string;
5
4
  parameters: {
6
5
  name: string;
7
6
  description: string;
@@ -24,7 +23,6 @@ declare const _default: ({
24
23
  } | {
25
24
  title: string;
26
25
  pipelineUrl: string;
27
- promptbookVersion: string;
28
26
  parameters: {
29
27
  name: string;
30
28
  description: string;
@@ -53,7 +51,6 @@ declare const _default: ({
53
51
  } | {
54
52
  title: string;
55
53
  pipelineUrl: string;
56
- promptbookVersion: string;
57
54
  parameters: {
58
55
  name: string;
59
56
  description: string;
@@ -6,7 +6,7 @@ import type { Command as Program } from 'commander';
6
6
  */
7
7
  export declare function initializeMakeCommand(program: Program): void;
8
8
  /**
9
- * TODO: [🥃] !!! Allow `ptbk make` without configuring any llm tools
9
+ * TODO: [🥃][main] !!! Allow `ptbk make` without configuring any llm tools
10
10
  * TODO: Maybe remove this command - "about" command should be enough?
11
11
  * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types
12
12
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -25,5 +25,5 @@ type CreatePipelineCollectionFromUrlyOptions = {
25
25
  export declare function createCollectionFromUrl(url: string_url | URL, options: CreatePipelineCollectionFromUrlyOptions): Promise<PipelineCollection>;
26
26
  export {};
27
27
  /**
28
- * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
28
+ * TODO:[main] !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
29
29
  */
@@ -68,14 +68,14 @@ export declare const MAX_EXECUTION_ATTEMPTS = 3;
68
68
  export declare const MAX_FILENAME_LENGTH = 30;
69
69
  /**
70
70
  * @@@
71
- * TODO: [🐝] !!! Use
71
+ * TODO: [🐝][main] !!! Use
72
72
  *
73
73
  * @public exported from `@promptbook/core`
74
74
  */
75
75
  export declare const MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH = 3;
76
76
  /**
77
77
  * @@@
78
- * TODO: [🐝] !!! Use
78
+ * TODO: [🐝][main] !!! Use
79
79
  *
80
80
  * @public exported from `@promptbook/core`
81
81
  */
@@ -18,7 +18,7 @@ import type { PipelineString } from '../types/PipelineString';
18
18
  */
19
19
  export declare function pipelineStringToJsonSync(pipelineString: PipelineString): PipelineJson;
20
20
  /**
21
- * TODO: !!!! Warn if used only sync version
21
+ * TODO:[main] !!!! Warn if used only sync version
22
22
  * TODO: [🚞] Report here line/column of error
23
23
  * TODO: Use spaceTrim more effectively
24
24
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -27,11 +27,11 @@ export declare function validatePipeline(pipeline: PipelineJson): PipelineJson;
27
27
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
28
28
  */
29
29
  /**
30
- * TODO: [🐣] !!!! Validate that all samples match expectations
31
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
32
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
33
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
34
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
30
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
31
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
32
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
33
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
34
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
35
35
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
36
36
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
37
37
  */
@@ -58,7 +58,7 @@ interface CreatePipelineExecutorOptions {
58
58
  export declare function createPipelineExecutor(options: CreatePipelineExecutorOptions): PipelineExecutor;
59
59
  export {};
60
60
  /**
61
- * TODO: !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
61
+ * TODO:[main] !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
62
62
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
63
63
  * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
64
64
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
@@ -8,7 +8,7 @@ import type { string_markdown } from '../../../types/typeAliases';
8
8
  */
9
9
  export declare function prepareKnowledgeFromMarkdown(knowledgeContent: string_markdown, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
10
10
  /**
11
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
11
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
12
12
  * TODO: [🪂] Do it in parallel 11:11
13
13
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
14
14
  */
@@ -8,7 +8,7 @@ import type { string_base64 } from '../../../types/typeAliases';
8
8
  */
9
9
  export declare function prepareKnowledgeFromPdf(content: string_base64, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
10
10
  /**
11
- * TODO: [🐝][🔼] !!! Export via `@promptbook/pdf`
11
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/pdf`
12
12
  * TODO: [🧺] In future, content can be alse File or Blob BUT for now for wider compatibility its only base64
13
13
  * @see https://stackoverflow.com/questions/14653349/node-js-cant-create-blobs
14
14
  * TODO: [🪂] Do it in parallel
@@ -13,7 +13,7 @@ export type CacheItem = {
13
13
  /**
14
14
  * @@@
15
15
  */
16
- promptbookVersion: string_promptbook_version;
16
+ promptbookVersion?: string_promptbook_version;
17
17
  /**
18
18
  * @@@
19
19
  */
@@ -1,3 +1,4 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -26,7 +27,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
26
27
  constructor(options?: AnthropicClaudeExecutionToolsDirectOptions);
27
28
  get title(): string_title & string_markdown_text;
28
29
  get description(): string_markdown;
29
- private getClient;
30
+ getClient(): Promise<Anthropic>;
30
31
  /**
31
32
  * Check the `options` passed to `constructor`
32
33
  */
@@ -16,7 +16,7 @@ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * Note: [🤖] Add models of new variant
19
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
19
+ * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
20
20
  * TODO: [🧠] Some mechanism to propagate unsureness
21
21
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
22
22
  * TODO: [🎰] Some mechanism to auto-update available models
@@ -11,8 +11,8 @@ export declare const createAnthropicClaudeExecutionTools: ((options: AnthropicCl
11
11
  className: string;
12
12
  };
13
13
  /**
14
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
15
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
14
+ * TODO: [🧠][main] !!!! Make anonymous this with all LLM providers
15
+ * TODO: [🧠][🧱][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
16
16
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
17
17
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
18
18
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
- * TODO: !!! Playground with WebGPT / Promptbook.studio anonymous server
5
- * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
4
+ * TODO:[main] !!! Playground with WebGPT / Promptbook.studio anonymous server
5
+ * TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
6
6
  */
@@ -1,3 +1,4 @@
1
+ import { OpenAIClient } from '@azure/openai';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -26,7 +27,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
26
27
  constructor(options: AzureOpenAiExecutionToolsOptions);
27
28
  get title(): string_title & string_markdown_text;
28
29
  get description(): string_markdown;
29
- private getClient;
30
+ getClient(): Promise<OpenAIClient>;
30
31
  /**
31
32
  * Check the `options` passed to `constructor`
32
33
  */
@@ -1,3 +1,4 @@
1
+ import OpenAI from 'openai';
1
2
  import type { AvailableModel } from '../../execution/AvailableModel';
2
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
@@ -27,7 +28,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
27
28
  constructor(options?: OpenAiExecutionToolsOptions);
28
29
  get title(): string_title & string_markdown_text;
29
30
  get description(): string_markdown;
30
- private getClient;
31
+ getClient(): Promise<OpenAI>;
31
32
  /**
32
33
  * Check the `options` passed to `constructor`
33
34
  */
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
- * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
4
+ * TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
5
  */
@@ -9,7 +9,7 @@ import type { string_persona_description } from '../types/typeAliases';
9
9
  */
10
10
  export declare function preparePersona(personaDescription: string_persona_description, options: PrepareOptions): Promise<PersonaPreparedJson['modelRequirements']>;
11
11
  /**
12
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
12
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
13
13
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
14
14
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
15
15
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -6,7 +6,7 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
6
6
  */
7
7
  export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
8
8
  /**
9
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
9
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
10
10
  * TODO: [🐠] Maybe base this on `makeValidator`
11
11
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
12
12
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -23,7 +23,7 @@ export {};
23
23
  /**
24
24
  * TODO: [🧠] Add context to each template (if missing)
25
25
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
26
- * TODO: [♨] !!! Prepare index the samples and maybe templates
26
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
27
27
  * TODO: Write tests for `preparePipeline`
28
28
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
29
29
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -42,7 +42,7 @@ export type PipelineJson = {
42
42
  /**
43
43
  * Version of the .ptbk.json file
44
44
  */
45
- readonly promptbookVersion: string_semantic_version;
45
+ readonly promptbookVersion?: string_semantic_version;
46
46
  /**
47
47
  * Description of the promptbook
48
48
  * It can use multiple paragraphs of simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
@@ -592,7 +592,7 @@ export type number_megabytes = number_positive;
592
592
  export type number_gigabytes = number_positive;
593
593
  export type number_terabytes = number_positive;
594
594
  /**.
595
- * TODO: !!! Change "For example" to @example
595
+ * TODO:[main] !!! Change "For example" to @example
596
596
  * TODO: !! Change to branded types
597
597
  * TODO: Delete type aliases that are not exported or used internally
598
598
  */
@@ -22,6 +22,6 @@ import type { string_name } from '../../types/typeAliases';
22
22
  export declare function checkSerializableAsJson(name: string_name, value: unknown): void;
23
23
  /**
24
24
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
25
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
25
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
26
26
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
27
27
  */
@@ -19,6 +19,6 @@
19
19
  */
20
20
  export declare function isSerializableAsJson(value: unknown): boolean;
21
21
  /**
22
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
22
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
23
23
  * TODO: [🧠][💺] Can be done this on type-level?
24
24
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.68.3",
3
+ "version": "0.68.5",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.68.3"
50
+ "@promptbook/core": "0.68.5"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -35,8 +35,8 @@
35
35
  /**
36
36
  * The version of the Promptbook library
37
37
  */
38
- var PROMPTBOOK_VERSION = '0.68.2';
39
- // TODO: !!!! List here all the versions and annotate + put into script
38
+ var PROMPTBOOK_VERSION = '0.68.4';
39
+ // TODO:[main] !!!! List here all the versions and annotate + put into script
40
40
 
41
41
  /*! *****************************************************************************
42
42
  Copyright (c) Microsoft Corporation.
@@ -346,7 +346,7 @@
346
346
  }
347
347
  /**
348
348
  * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
349
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
349
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
350
350
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
351
351
  */
352
352
 
@@ -515,7 +515,7 @@
515
515
  commands.push("PIPELINE URL ".concat(pipelineUrl));
516
516
  }
517
517
  commands.push("PROMPTBOOK VERSION ".concat(promptbookVersion));
518
- // TODO: !!! This increase size of the bundle and is probbably not necessary
518
+ // TODO:[main] !!! This increase size of the bundle and is probbably not necessary
519
519
  pipelineString = prettifyMarkdown(pipelineString);
520
520
  try {
521
521
  for (var _g = __values(parameters.filter(function (_a) {
@@ -663,12 +663,12 @@
663
663
  pipelineString += '```' + contentLanguage;
664
664
  pipelineString += '\n';
665
665
  pipelineString += spaceTrim__default["default"](content);
666
- // <- TODO: !!! Escape
666
+ // <- TODO:[main] !!! Escape
667
667
  // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
668
668
  pipelineString += '\n';
669
669
  pipelineString += '```';
670
670
  pipelineString += '\n\n';
671
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: !!! If the parameter here has description, add it and use templateParameterJsonToString
671
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO:[main] !!! If the parameter here has description, add it and use templateParameterJsonToString
672
672
  }
673
673
  }
674
674
  catch (e_3_1) { e_3 = { error: e_3_1 }; }
@@ -895,7 +895,7 @@
895
895
  });
896
896
  }
897
897
 
898
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.68.2",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
898
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],templates:[{templateType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
899
899
 
900
900
  /**
901
901
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -970,7 +970,7 @@
970
970
  if ( /* version === '1.0.0' || */version === '2.0.0' || version === '3.0.0') {
971
971
  return false;
972
972
  }
973
- // <- TODO: !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
973
+ // <- TODO:[main] !!! Check isValidPromptbookVersion against PROMPTBOOK_VERSIONS
974
974
  return true;
975
975
  }
976
976
 
@@ -1119,7 +1119,7 @@
1119
1119
  // <- Note: [🚲]
1120
1120
  throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1121
1121
  }
1122
- if (!isValidPromptbookVersion(pipeline.promptbookVersion)) {
1122
+ if (pipeline.promptbookVersion !== undefined && !isValidPromptbookVersion(pipeline.promptbookVersion)) {
1123
1123
  // <- Note: [🚲]
1124
1124
  throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
1125
1125
  }
@@ -1314,11 +1314,11 @@
1314
1314
  * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
1315
1315
  */
1316
1316
  /**
1317
- * TODO: [🐣] !!!! Validate that all samples match expectations
1318
- * TODO: [🐣][🐝] !!!! Validate that knowledge is valid (non-void)
1319
- * TODO: [🐣] !!!! Validate that persona can be used only with CHAT variant
1320
- * TODO: [🐣] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1321
- * TODO: [🐣] !!!! Validate that reserved parameter is not used as joker
1317
+ * TODO: [🐣][main] !!!! Validate that all samples match expectations
1318
+ * TODO: [🐣][🐝][main] !!!! Validate that knowledge is valid (non-void)
1319
+ * TODO: [🐣][main] !!!! Validate that persona can be used only with CHAT variant
1320
+ * TODO: [🐣][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
1321
+ * TODO: [🐣][main] !!!! Validate that reserved parameter is not used as joker
1322
1322
  * TODO: [🧠] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
1323
1323
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
1324
1324
  */
@@ -2619,7 +2619,7 @@
2619
2619
  return true;
2620
2620
  }
2621
2621
  /**
2622
- * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2622
+ * TODO: [🔃][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2623
2623
  * TODO: [🐠] Maybe base this on `makeValidator`
2624
2624
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2625
2625
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -2998,7 +2998,7 @@
2998
2998
  console.warn(spaceTrim.spaceTrim(function (block) { return "\n Pipeline is not prepared\n\n ".concat(block(pipelineIdentification), "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n "); }));
2999
2999
  }
3000
3000
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
3001
- // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3001
+ // TODO:[main] !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
3002
3002
  function getContextForTemplate(template) {
3003
3003
  return __awaiter(this, void 0, void 0, function () {
3004
3004
  return __generator(this, function (_a) {
@@ -3817,7 +3817,7 @@
3817
3817
  return pipelineExecutor;
3818
3818
  }
3819
3819
  /**
3820
- * TODO: !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3820
+ * TODO:[main] !!! Identify not only pipeline BUT exact template ${block(pipelineIdentification)}
3821
3821
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3822
3822
  * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3823
3823
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
@@ -3880,7 +3880,7 @@
3880
3880
  outputParameters = result.outputParameters;
3881
3881
  knowledgePiecesRaw = outputParameters.knowledgePieces;
3882
3882
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3883
- // <- TODO: !!!!! Smarter split and filter out empty pieces
3883
+ // <- TODO:[main] !!!!! Smarter split and filter out empty pieces
3884
3884
  if (isVerbose) {
3885
3885
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3886
3886
  }
@@ -3960,7 +3960,7 @@
3960
3960
  });
3961
3961
  }
3962
3962
  /**
3963
- * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
3963
+ * TODO: [🐝][🔼][main] !!! Export via `@promptbook/markdown`
3964
3964
  * TODO: [🪂] Do it in parallel 11:11
3965
3965
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
3966
3966
  */
@@ -3984,7 +3984,7 @@
3984
3984
  var partialPieces, pieces;
3985
3985
  return __generator(this, function (_a) {
3986
3986
  switch (_a.label) {
3987
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3987
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝][main] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3988
3988
  options)];
3989
3989
  case 1:
3990
3990
  partialPieces = _a.sent();
@@ -4176,7 +4176,7 @@
4176
4176
  });
4177
4177
  }
4178
4178
  /**
4179
- * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4179
+ * TODO: [🔃][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
4180
4180
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
4181
4181
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
4182
4182
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -4225,7 +4225,7 @@
4225
4225
  case 0:
4226
4226
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
4227
4227
  templates = pipeline.templates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
4228
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
4228
+ // TODO:[main] !!!!! Apply samples to each template (if missing and is for the template defined)
4229
4229
  TODO_USE(parameters);
4230
4230
  templatesPrepared = new Array(
4231
4231
  // <- TODO: [🧱] Implement in a functional (not new Class) way
@@ -4257,7 +4257,7 @@
4257
4257
  /**
4258
4258
  * TODO: [🧠] Add context to each template (if missing)
4259
4259
  * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
4260
- * TODO: [♨] !!! Prepare index the samples and maybe templates
4260
+ * TODO: [♨][main] !!! Prepare index the samples and maybe templates
4261
4261
  * TODO: Write tests for `preparePipeline`
4262
4262
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
4263
4263
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
@@ -4429,7 +4429,7 @@
4429
4429
  if (sourceContent === '') {
4430
4430
  throw new ParseError("Source is not defined");
4431
4431
  }
4432
- // TODO: !!!! Following checks should be applied every link in the `sourceContent`
4432
+ // TODO:[main] !!!! Following checks should be applied every link in the `sourceContent`
4433
4433
  if (sourceContent.startsWith('http://')) {
4434
4434
  throw new ParseError("Source is not secure");
4435
4435
  }
@@ -4632,7 +4632,7 @@
4632
4632
  if (command.templateType === 'KNOWLEDGE') {
4633
4633
  knowledgeCommandParser.$applyToPipelineJson({
4634
4634
  type: 'KNOWLEDGE',
4635
- sourceContent: $templateJson.content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4635
+ sourceContent: $templateJson.content, // <- TODO: [🐝][main] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
4636
4636
  }, $pipelineJson);
4637
4637
  $templateJson.isTemplate = false;
4638
4638
  return;
@@ -5674,6 +5674,7 @@
5674
5674
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
5675
5675
  */
5676
5676
  $applyToPipelineJson: function (command, $pipelineJson) {
5677
+ // TODO: Warn if the version is overridden
5677
5678
  $pipelineJson.promptbookVersion = command.promptbookVersion;
5678
5679
  },
5679
5680
  /**
@@ -6563,7 +6564,7 @@
6563
6564
  var $pipelineJson = {
6564
6565
  title: undefined /* <- Note: [🍙] Putting here placeholder to keep `title` on top at final JSON */,
6565
6566
  pipelineUrl: undefined /* <- Note: Putting here placeholder to keep `pipelineUrl` on top at final JSON */,
6566
- promptbookVersion: PROMPTBOOK_VERSION,
6567
+ promptbookVersion: undefined /* <- Note: By default no explicit version */,
6567
6568
  description: undefined /* <- Note: [🍙] Putting here placeholder to keep `description` on top at final JSON */,
6568
6569
  parameters: [],
6569
6570
  templates: [],
@@ -6854,7 +6855,7 @@
6854
6855
  return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
6855
6856
  }
6856
6857
  /**
6857
- * TODO: !!!! Warn if used only sync version
6858
+ * TODO:[main] !!!! Warn if used only sync version
6858
6859
  * TODO: [🚞] Report here line/column of error
6859
6860
  * TODO: Use spaceTrim more effectively
6860
6861
  * TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
@@ -7637,7 +7638,7 @@
7637
7638
  }
7638
7639
  }
7639
7640
  /**
7640
- * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
7641
+ * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
7641
7642
  * TODO: [🧠][💺] Can be done this on type-level?
7642
7643
  */
7643
7644