@promptbook/cli 0.61.0-27 โ†’ 0.61.0-29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
1
  import type { Promisable } from 'type-fest';
2
+ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
2
3
  import { PipelineExecutionError } from '../errors/PipelineExecutionError';
3
4
  import type { TaskProgress } from '../types/TaskProgress';
4
5
  import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
@@ -10,6 +11,8 @@ import type { PromptResultUsage } from './PromptResultUsage';
10
11
  *
11
12
  * It can be created with `createPipelineExecutor` function.
12
13
  *
14
+ * @@@ almost-JSON (what about errors)
15
+ *
13
16
  * @see https://github.com/webgptorg/promptbook#executor
14
17
  */
15
18
  export type PipelineExecutor = {
@@ -17,8 +20,16 @@ export type PipelineExecutor = {
17
20
  };
18
21
  /**
19
22
  * @@@
23
+ *
24
+ * @@@ almost-JSON (what about errors)
20
25
  */
21
26
  export type PipelineExecutorResult = {
27
+ /**
28
+ * Result parameters of the execution
29
+ *
30
+ * Note: If the execution was not successful, there are only some of the result parameters
31
+ */
32
+ readonly outputParameters: Parameters;
22
33
  /**
23
34
  * Whether the execution was successful, details are aviable in `executionReport`
24
35
  */
@@ -40,13 +51,14 @@ export type PipelineExecutorResult = {
40
51
  */
41
52
  readonly executionReport: ExecutionReportJson;
42
53
  /**
43
- * Result parameters of the execution
54
+ * The prepared pipeline that was used for the execution
44
55
  *
45
- * Note: If the execution was not successful, there are only some of the result parameters
56
+ * Note: If you called `createPipelineExecutor` with fully prepared pipeline, this is the same object as this pipeline
57
+ * If you passed not fully prepared pipeline, this is same pipeline but fully prepared
46
58
  */
47
- readonly outputParameters: Parameters;
59
+ readonly preparedPipeline: PipelineJson;
48
60
  };
49
61
  /**
50
62
  * TODO: [๐Ÿง ] Should this file be in /execution or /types folder?
51
- * TODO: [๐Ÿ’ท] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
63
+ * TODO: [๐Ÿ’ท] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result - BUT maybe NOT?
52
64
  */
@@ -1,5 +1,6 @@
1
1
  import type { string_date_iso8601 } from '../types/typeAliases';
2
2
  import type { string_model_name } from '../types/typeAliases';
3
+ import type { string_prompt } from '../types/typeAliases';
3
4
  import type { TODO_object } from '../utils/organization/TODO_object';
4
5
  import type { EmbeddingVector } from './EmbeddingVector';
5
6
  import type { PromptResultUsage } from './PromptResultUsage';
@@ -69,8 +70,22 @@ export type CommonPromptResult = {
69
70
  * Usage of the prompt execution
70
71
  */
71
72
  readonly usage: PromptResultUsage;
73
+ /**
74
+ * Exact text of the prompt (with all replacements)
75
+ *
76
+ * Note: This contains redundant information
77
+ */
78
+ readonly rawPromptContent: string_prompt;
79
+ /**
80
+ * Raw request to the model
81
+ *
82
+ * Note: This contains redundant information
83
+ */
84
+ readonly rawRequest: TODO_object | null;
72
85
  /**
73
86
  * Raw response from the model
87
+ *
88
+ * Note: This contains redundant information
74
89
  */
75
90
  readonly rawResponse: TODO_object;
76
91
  };
@@ -20,6 +20,16 @@ type CreatePipelineExecutorSettings = {
20
20
  * @default false
21
21
  */
22
22
  readonly isVerbose?: boolean;
23
+ /**
24
+ * If you pass fully prepared pipeline, this does not matter
25
+ *
26
+ * Otherwise:
27
+ * If false or not set, warning is shown when pipeline is not prepared
28
+ * If true, warning is suppressed
29
+ *
30
+ * @default false
31
+ */
32
+ readonly isNotPreparedWarningSupressed?: boolean;
23
33
  };
24
34
  /**
25
35
  * Options for `createPipelineExecutor`
@@ -47,8 +57,6 @@ interface CreatePipelineExecutorOptions {
47
57
  export declare function createPipelineExecutor(options: CreatePipelineExecutorOptions): PipelineExecutor;
48
58
  export {};
49
59
  /**
50
- * TODO: !!!!! return `preparedPipeline` from execution
51
- * TODO: !!!!! `isNotPreparedWarningSupressed`
52
60
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
53
61
  * TODO: [๐Ÿช‚] Use maxParallelCount here (not only pass to `preparePipeline`)
54
62
  * TODO: [โ™ˆ] Probbably move expectations from templates to parameters
@@ -50,5 +50,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
50
50
  }
51
51
  /**
52
52
  * TODO: [๐Ÿ“] Allow to list compatible models with each variant
53
- * TODO: [๐Ÿคนโ€โ™‚๏ธ] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
53
+ * TODO: [๐Ÿ—ฏ] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
54
54
  */
@@ -12,8 +12,8 @@ export declare function startRemoteServer(options: RemoteServerOptions): IDestro
12
12
  /**
13
13
  * TODO: [โš–] Expose the collection to be able to connect to same collection via createCollectionFromUrl
14
14
  * TODO: Handle progress - support streaming
15
- * TODO: [๐Ÿคนโ€โ™‚๏ธ] Do not hang up immediately but wait until client closes OR timeout
16
- * TODO: [๐Ÿคนโ€โ™‚๏ธ] Timeout on chat to free up resources
15
+ * TODO: [๐Ÿ—ฏ] Do not hang up immediately but wait until client closes OR timeout
16
+ * TODO: [๐Ÿ—ฏ] Timeout on chat to free up resources
17
17
  * TODO: [๐Ÿƒ] Pass here some security token to prevent malitious usage and/or DDoS
18
18
  * TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
19
19
  */
@@ -12,6 +12,6 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
12
12
  * TODO: Write tests for `preparePipeline`
13
13
  * TODO: [๐Ÿ] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
14
14
  * TODO: [๐ŸงŠ] In future one preparation can take data from previous preparation and save tokens and time
15
- * TODO: [๐ŸŽ] !!!!!! Use here countTotalUsage
15
+ * TODO: [๐ŸŽ] !!!!! Use here countTotalUsage
16
16
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
17
17
  */
@@ -41,7 +41,7 @@ export type ExecutionReportJson = {
41
41
  /**
42
42
  * The prompt wich was executed
43
43
  */
44
- readonly prompt: Omit<Prompt, 'pipelineUrl' | 'parameters'>;
44
+ readonly prompt: Omit<Prompt, 'pipelineUrl'>;
45
45
  /**
46
46
  * Result of the prompt execution (if not failed during LLM execution)
47
47
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/cli",
3
- "version": "0.61.0-27",
3
+ "version": "0.61.0-29",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -54,7 +54,7 @@
54
54
  }
55
55
  ],
56
56
  "peerDependencies": {
57
- "@promptbook/core": "0.61.0-27"
57
+ "@promptbook/core": "0.61.0-29"
58
58
  },
59
59
  "main": "./umd/index.umd.js",
60
60
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -154,7 +154,7 @@
154
154
  /**
155
155
  * The version of the Promptbook library
156
156
  */
157
- var PROMPTBOOK_VERSION = '0.61.0-26';
157
+ var PROMPTBOOK_VERSION = '0.61.0-28';
158
158
  // TODO: !!!! List here all the versions and annotate + put into script
159
159
 
160
160
  /**
@@ -751,7 +751,7 @@
751
751
  });
752
752
  }
753
753
 
754
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
754
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
755
755
 
756
756
  /**
757
757
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2480,17 +2480,17 @@
2480
2480
  */
2481
2481
  function createPipelineExecutor(options) {
2482
2482
  var _this = this;
2483
- var rawPipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2484
- var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d;
2485
- validatePipeline(rawPipeline);
2483
+ var pipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2484
+ var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d, _e = settings.isNotPreparedWarningSupressed, isNotPreparedWarningSupressed = _e === void 0 ? false : _e;
2485
+ validatePipeline(pipeline);
2486
2486
  var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
2487
- var pipeline;
2488
- if (isPipelinePrepared(rawPipeline)) {
2489
- pipeline = rawPipeline;
2487
+ var preparedPipeline;
2488
+ if (isPipelinePrepared(pipeline)) {
2489
+ preparedPipeline = pipeline;
2490
2490
  }
2491
- else {
2492
- // TODO: !!!!! This should be maybe warning in report
2493
- console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2491
+ else if (isNotPreparedWarningSupressed !== true) {
2492
+ // TODO: !!!!! Test that this work as intended together with prepared pipeline
2493
+ console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(pipeline.pipelineUrl || pipeline.sourceFile || pipeline.title, " is not prepared\n\n ").concat(pipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2494
2494
  }
2495
2495
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2496
2496
  // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
@@ -2507,9 +2507,9 @@
2507
2507
  template) {
2508
2508
  return __awaiter(this, void 0, void 0, function () {
2509
2509
  return __generator(this, function (_a) {
2510
- // TODO: [โ™จ] Implement Better - use real index and keyword search
2510
+ // TODO: [โ™จ] Implement Better - use real index and keyword search from `template` and {samples}
2511
2511
  TODO_USE(template);
2512
- return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2512
+ return [2 /*return*/, preparedPipeline.knowledgePieces.map(function (_a) {
2513
2513
  var content = _a.content;
2514
2514
  return "- ".concat(content);
2515
2515
  }).join('\n')];
@@ -2582,7 +2582,7 @@
2582
2582
  case 0:
2583
2583
  name = "pipeline-executor-frame-".concat(currentTemplate.name);
2584
2584
  title = currentTemplate.title;
2585
- priority = pipeline.promptTemplates.length - pipeline.promptTemplates.indexOf(currentTemplate);
2585
+ priority = preparedPipeline.promptTemplates.length - preparedPipeline.promptTemplates.indexOf(currentTemplate);
2586
2586
  if (!onProgress /* <- [3] */) return [3 /*break*/, 2]; /* <- [3] */
2587
2587
  return [4 /*yield*/, onProgress({
2588
2588
  name: name,
@@ -2686,13 +2686,13 @@
2686
2686
  case 7:
2687
2687
  prompt = {
2688
2688
  title: currentTemplate.title,
2689
- pipelineUrl: "".concat(pipeline.pipelineUrl
2690
- ? pipeline.pipelineUrl
2689
+ pipelineUrl: "".concat(preparedPipeline.pipelineUrl
2690
+ ? preparedPipeline.pipelineUrl
2691
2691
  : 'anonymous' /* <- TODO: [๐Ÿง ] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
2692
2692
  parameters: parameters,
2693
2693
  content: preparedContent,
2694
2694
  modelRequirements: currentTemplate.modelRequirements,
2695
- expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
2695
+ expectations: __assign(__assign({}, (preparedPipeline.personas.find(function (_a) {
2696
2696
  var name = _a.name;
2697
2697
  return name === currentTemplate.personaName;
2698
2698
  }) || {})), currentTemplate.expectations),
@@ -2984,7 +2984,7 @@
2984
2984
  ) {
2985
2985
  // TODO: [๐Ÿง ] Maybe put other blockTypes into report
2986
2986
  executionReport.promptExecutions.push({
2987
- prompt: __assign(__assign({ '!!! All information': null }, prompt), { '!!! Wanted information': null, title: currentTemplate.title /* <- Note: If title in pipeline contains emojis, pass it innto report */, content: prompt.content, modelRequirements: prompt.modelRequirements, expectations: prompt.expectations, expectFormat: prompt.expectFormat }),
2987
+ prompt: __assign({}, prompt),
2988
2988
  result: result || undefined,
2989
2989
  error: expectError || undefined,
2990
2990
  });
@@ -3025,7 +3025,7 @@
3025
3025
  var outputParameters = {};
3026
3026
  try {
3027
3027
  // Note: Filter ONLY output parameters
3028
- for (var _b = __values(pipeline.parameters.filter(function (_a) {
3028
+ for (var _b = __values(preparedPipeline.parameters.filter(function (_a) {
3029
3029
  var isOutput = _a.isOutput;
3030
3030
  return isOutput;
3031
3031
  })), _c = _b.next(); !_c.done; _c = _b.next()) {
@@ -3052,29 +3052,29 @@
3052
3052
  return __generator(this, function (_g) {
3053
3053
  switch (_g.label) {
3054
3054
  case 0:
3055
- if (!(pipeline === undefined)) return [3 /*break*/, 2];
3056
- return [4 /*yield*/, preparePipeline(rawPipeline, {
3055
+ if (!(preparedPipeline === undefined)) return [3 /*break*/, 2];
3056
+ return [4 /*yield*/, preparePipeline(pipeline, {
3057
3057
  llmTools: llmTools,
3058
3058
  isVerbose: isVerbose,
3059
3059
  maxParallelCount: maxParallelCount,
3060
3060
  })];
3061
3061
  case 1:
3062
- pipeline = _g.sent();
3062
+ preparedPipeline = _g.sent();
3063
3063
  _g.label = 2;
3064
3064
  case 2:
3065
3065
  errors = [];
3066
3066
  warnings = [];
3067
3067
  executionReport = {
3068
- pipelineUrl: pipeline.pipelineUrl,
3069
- title: pipeline.title,
3068
+ pipelineUrl: preparedPipeline.pipelineUrl,
3069
+ title: preparedPipeline.title,
3070
3070
  promptbookUsedVersion: PROMPTBOOK_VERSION,
3071
- promptbookRequestedVersion: pipeline.promptbookVersion,
3072
- description: pipeline.description,
3071
+ promptbookRequestedVersion: preparedPipeline.promptbookVersion,
3072
+ description: preparedPipeline.description,
3073
3073
  promptExecutions: [],
3074
3074
  };
3075
3075
  try {
3076
3076
  // Note: Check that all input input parameters are defined
3077
- for (_a = __values(pipeline.parameters.filter(function (_a) {
3077
+ for (_a = __values(preparedPipeline.parameters.filter(function (_a) {
3078
3078
  var isInput = _a.isInput;
3079
3079
  return isInput;
3080
3080
  })), _b = _a.next(); !_b.done; _b = _a.next()) {
@@ -3089,6 +3089,7 @@
3089
3089
  executionReport: executionReport,
3090
3090
  outputParameters: {},
3091
3091
  usage: ZERO_USAGE,
3092
+ preparedPipeline: preparedPipeline,
3092
3093
  })];
3093
3094
  }
3094
3095
  }
@@ -3101,7 +3102,7 @@
3101
3102
  finally { if (e_1) throw e_1.error; }
3102
3103
  }
3103
3104
  _loop_1 = function (parameterName) {
3104
- var parameter = pipeline.parameters.find(function (_a) {
3105
+ var parameter = preparedPipeline.parameters.find(function (_a) {
3105
3106
  var name = _a.name;
3106
3107
  return name === parameterName;
3107
3108
  });
@@ -3118,6 +3119,7 @@
3118
3119
  executionReport: executionReport,
3119
3120
  outputParameters: {},
3120
3121
  usage: ZERO_USAGE,
3122
+ preparedPipeline: preparedPipeline,
3121
3123
  }) };
3122
3124
  }
3123
3125
  };
@@ -3141,7 +3143,7 @@
3141
3143
  _g.label = 3;
3142
3144
  case 3:
3143
3145
  _g.trys.push([3, 8, , 9]);
3144
- resovedParameterNames_1 = pipeline.parameters
3146
+ resovedParameterNames_1 = preparedPipeline.parameters
3145
3147
  .filter(function (_a) {
3146
3148
  var isInput = _a.isInput;
3147
3149
  return isInput;
@@ -3150,7 +3152,7 @@
3150
3152
  var name = _a.name;
3151
3153
  return name;
3152
3154
  });
3153
- unresovedTemplates_1 = __spreadArray([], __read(pipeline.promptTemplates), false);
3155
+ unresovedTemplates_1 = __spreadArray([], __read(preparedPipeline.promptTemplates), false);
3154
3156
  resolving_1 = [];
3155
3157
  loopLimit = LOOP_LIMIT;
3156
3158
  _loop_2 = function () {
@@ -3227,6 +3229,7 @@
3227
3229
  usage: usage_1,
3228
3230
  executionReport: executionReport,
3229
3231
  outputParameters: outputParameters_1,
3232
+ preparedPipeline: preparedPipeline,
3230
3233
  })];
3231
3234
  case 9:
3232
3235
  usage = addUsage.apply(void 0, __spreadArray([], __read(executionReport.promptExecutions.map(function (_a) {
@@ -3241,6 +3244,7 @@
3241
3244
  usage: usage,
3242
3245
  executionReport: executionReport,
3243
3246
  outputParameters: outputParameters,
3247
+ preparedPipeline: preparedPipeline,
3244
3248
  })];
3245
3249
  }
3246
3250
  });
@@ -3248,8 +3252,6 @@
3248
3252
  return pipelineExecutor;
3249
3253
  }
3250
3254
  /**
3251
- * TODO: !!!!! return `preparedPipeline` from execution
3252
- * TODO: !!!!! `isNotPreparedWarningSupressed`
3253
3255
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3254
3256
  * TODO: [๐Ÿช‚] Use maxParallelCount here (not only pass to `preparePipeline`)
3255
3257
  * TODO: [โ™ˆ] Probbably move expectations from templates to parameters
@@ -3309,6 +3311,7 @@
3309
3311
  outputParameters = result.outputParameters;
3310
3312
  knowledgePiecesRaw = outputParameters.knowledgePieces;
3311
3313
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3314
+ // <- TODO: !!!!! Smarter split and filter out empty pieces
3312
3315
  if (isVerbose) {
3313
3316
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3314
3317
  }
@@ -3534,7 +3537,7 @@
3534
3537
  case 0:
3535
3538
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3536
3539
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3537
- // TODO: !!!!!! Apply samples to each template (if missing and is for the template defined)
3540
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3538
3541
  TODO_USE(parameters);
3539
3542
  promptTemplatesPrepared = new Array(promptTemplates.length);
3540
3543
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [๐Ÿช‚] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
@@ -3643,7 +3646,7 @@
3643
3646
  * TODO: Write tests for `preparePipeline`
3644
3647
  * TODO: [๐Ÿ] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3645
3648
  * TODO: [๐ŸงŠ] In future one preparation can take data from previous preparation and save tokens and time
3646
- * TODO: [๐ŸŽ] !!!!!! Use here countTotalUsage
3649
+ * TODO: [๐ŸŽ] !!!!! Use here countTotalUsage
3647
3650
  * TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
3648
3651
  */
3649
3652
 
@@ -6238,7 +6241,7 @@
6238
6241
  */
6239
6242
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6240
6243
  return __awaiter(this, void 0, void 0, function () {
6241
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6244
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6242
6245
  return __generator(this, function (_a) {
6243
6246
  switch (_a.label) {
6244
6247
  case 0:
@@ -6251,6 +6254,7 @@
6251
6254
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6252
6255
  }
6253
6256
  modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6257
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6254
6258
  rawRequest = {
6255
6259
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
6256
6260
  max_tokens: modelRequirements.maxTokens || 4096,
@@ -6262,7 +6266,7 @@
6262
6266
  messages: [
6263
6267
  {
6264
6268
  role: 'user',
6265
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6269
+ content: rawPromptContent,
6266
6270
  },
6267
6271
  ],
6268
6272
  // TODO: Is here some equivalent of user identification?> user: this.options.user,
@@ -6299,8 +6303,10 @@
6299
6303
  complete: complete,
6300
6304
  },
6301
6305
  usage: usage,
6306
+ rawPromptContent: rawPromptContent,
6307
+ rawRequest: rawRequest,
6302
6308
  rawResponse: rawResponse,
6303
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6309
+ // <- [๐Ÿ—ฏ]
6304
6310
  }];
6305
6311
  }
6306
6312
  });
@@ -6333,7 +6339,7 @@
6333
6339
 
6334
6340
  const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
6335
6341
  ...modelSettings,
6336
- prompt: replaceParameters(content, { ...parameters, modelName }),
6342
+ prompt: rawPromptContent,
6337
6343
  user: this.options.user,
6338
6344
  };
6339
6345
  const start: string_date_iso8601 = getCurrentIsoDate();
@@ -6372,7 +6378,7 @@
6372
6378
  },
6373
6379
  usage,
6374
6380
  rawResponse,
6375
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6381
+ // <- [๐Ÿ—ฏ]
6376
6382
  };
6377
6383
  }
6378
6384
  */
@@ -6840,7 +6846,7 @@
6840
6846
  */
6841
6847
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6842
6848
  return __awaiter(this, void 0, void 0, function () {
6843
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6849
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6844
6850
  return __generator(this, function (_a) {
6845
6851
  switch (_a.label) {
6846
6852
  case 0:
@@ -6866,6 +6872,7 @@
6866
6872
  type: 'json_object',
6867
6873
  };
6868
6874
  }
6875
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6869
6876
  rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
6870
6877
  ? []
6871
6878
  : [
@@ -6876,7 +6883,7 @@
6876
6883
  ])), false), [
6877
6884
  {
6878
6885
  role: 'user',
6879
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6886
+ content: rawPromptContent,
6880
6887
  },
6881
6888
  ], false), user: this.options.user });
6882
6889
  start = getCurrentIsoDate();
@@ -6911,8 +6918,10 @@
6911
6918
  complete: complete,
6912
6919
  },
6913
6920
  usage: usage,
6921
+ rawPromptContent: rawPromptContent,
6922
+ rawRequest: rawRequest,
6914
6923
  rawResponse: rawResponse,
6915
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6924
+ // <- [๐Ÿ—ฏ]
6916
6925
  }];
6917
6926
  }
6918
6927
  });
@@ -6923,7 +6932,7 @@
6923
6932
  */
6924
6933
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6925
6934
  return __awaiter(this, void 0, void 0, function () {
6926
- var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6935
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6927
6936
  return __generator(this, function (_a) {
6928
6937
  switch (_a.label) {
6929
6938
  case 0:
@@ -6944,7 +6953,8 @@
6944
6953
  // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6945
6954
  // <- Note: [๐Ÿง†]
6946
6955
  };
6947
- rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
6956
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6957
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
6948
6958
  start = getCurrentIsoDate();
6949
6959
  if (this.options.isVerbose) {
6950
6960
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
@@ -6974,8 +6984,10 @@
6974
6984
  complete: complete,
6975
6985
  },
6976
6986
  usage: usage,
6987
+ rawPromptContent: rawPromptContent,
6988
+ rawRequest: rawRequest,
6977
6989
  rawResponse: rawResponse,
6978
- // <- [๐Ÿคนโ€โ™‚๏ธ]
6990
+ // <- [๐Ÿ—ฏ]
6979
6991
  }];
6980
6992
  }
6981
6993
  });
@@ -6986,7 +6998,7 @@
6986
6998
  */
6987
6999
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6988
7000
  return __awaiter(this, void 0, void 0, function () {
6989
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
7001
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6990
7002
  return __generator(this, function (_a) {
6991
7003
  switch (_a.label) {
6992
7004
  case 0:
@@ -6999,8 +7011,9 @@
6999
7011
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
7000
7012
  }
7001
7013
  modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
7014
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7002
7015
  rawRequest = {
7003
- input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
7016
+ input: rawPromptContent,
7004
7017
  model: modelName,
7005
7018
  };
7006
7019
  start = getCurrentIsoDate();
@@ -7028,8 +7041,10 @@
7028
7041
  complete: complete,
7029
7042
  },
7030
7043
  usage: usage,
7044
+ rawPromptContent: rawPromptContent,
7045
+ rawRequest: rawRequest,
7031
7046
  rawResponse: rawResponse,
7032
- // <- [๐Ÿคนโ€โ™‚๏ธ]
7047
+ // <- [๐Ÿ—ฏ]
7033
7048
  }];
7034
7049
  }
7035
7050
  });