@promptbook/node 0.61.0-22 → 0.61.0-24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +186 -61
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -2
- package/esm/typings/src/config.d.ts +8 -4
- package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
- package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
- package/esm/typings/src/execution/PipelineExecutor.d.ts +32 -24
- package/esm/typings/src/execution/PromptResultUsage.d.ts +3 -0
- package/esm/typings/src/prepare/isPipelinePrepared.d.ts +4 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
- package/esm/typings/src/prepare/prepareTemplates.d.ts +31 -0
- package/esm/typings/src/prepare/unpreparePipeline.d.ts +2 -0
- package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
- package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
- package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
- package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
- package/package.json +2 -2
- package/umd/index.umd.js +186 -61
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/src/_packages/types.index.d.ts +2 -2
- package/umd/typings/src/config.d.ts +8 -4
- package/umd/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
- package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
- package/umd/typings/src/execution/PipelineExecutor.d.ts +32 -24
- package/umd/typings/src/execution/PromptResultUsage.d.ts +3 -0
- package/umd/typings/src/prepare/isPipelinePrepared.d.ts +4 -0
- package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
- package/umd/typings/src/prepare/prepareTemplates.d.ts +31 -0
- package/umd/typings/src/prepare/unpreparePipeline.d.ts +2 -0
- package/umd/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
- package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
- package/umd/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
- package/umd/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
package/esm/index.es.js
CHANGED
|
@@ -186,15 +186,26 @@ var MAX_EXECUTION_ATTEMPTS = 3;
|
|
|
186
186
|
* The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
|
|
187
187
|
*/
|
|
188
188
|
var PIPELINE_COLLECTION_BASE_FILENAME = "index";
|
|
189
|
+
/**
|
|
190
|
+
* Nonce which is used for replacing things in strings
|
|
191
|
+
*/
|
|
192
|
+
var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
|
|
189
193
|
/**
|
|
190
194
|
* The names of the parameters that are reserved for special purposes
|
|
191
195
|
*/
|
|
192
196
|
var RESERVED_PARAMETER_NAMES = deepFreeze([
|
|
193
197
|
'context',
|
|
198
|
+
'knowledge',
|
|
199
|
+
'samples',
|
|
200
|
+
'modelName',
|
|
194
201
|
'currentDate',
|
|
195
202
|
// <- TODO: Add more like 'date', 'modelName',...
|
|
196
203
|
// <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
|
|
197
204
|
]);
|
|
205
|
+
/**
|
|
206
|
+
* @@@
|
|
207
|
+
*/
|
|
208
|
+
var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
198
209
|
/*
|
|
199
210
|
TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
|
|
200
211
|
*/
|
|
@@ -638,7 +649,7 @@ function forEachAsync(array, options, callbackfunction) {
|
|
|
638
649
|
});
|
|
639
650
|
}
|
|
640
651
|
|
|
641
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-
|
|
652
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
642
653
|
|
|
643
654
|
/**
|
|
644
655
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -906,7 +917,7 @@ function validatePipeline(pipeline) {
|
|
|
906
917
|
throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
|
|
907
918
|
}
|
|
908
919
|
if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
|
|
909
|
-
throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use
|
|
920
|
+
throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
|
|
910
921
|
}
|
|
911
922
|
definedParameters.add(template.resultingParameterName);
|
|
912
923
|
if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
|
|
@@ -1076,14 +1087,17 @@ var ReferenceError$1 = /** @class */ (function (_super) {
|
|
|
1076
1087
|
* Unprepare just strips the preparation data of the pipeline
|
|
1077
1088
|
*/
|
|
1078
1089
|
function unpreparePipeline(pipeline) {
|
|
1079
|
-
var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources;
|
|
1090
|
+
var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, promptTemplates = pipeline.promptTemplates;
|
|
1080
1091
|
personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
|
|
1081
1092
|
knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
|
|
1082
|
-
return __assign(__assign({},
|
|
1093
|
+
promptTemplates = promptTemplates.map(function (promptTemplate) { return (__assign(__assign({}, promptTemplate), { preparedContent: undefined })); });
|
|
1094
|
+
return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
|
|
1083
1095
|
}
|
|
1084
1096
|
/**
|
|
1085
1097
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
1098
|
+
* TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
|
|
1086
1099
|
* TODO: Write tests for `preparePipeline`
|
|
1100
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
1087
1101
|
*/
|
|
1088
1102
|
|
|
1089
1103
|
/**
|
|
@@ -1985,22 +1999,27 @@ function isPipelinePrepared(pipeline) {
|
|
|
1985
1999
|
// Note: Ignoring `pipeline.preparations` @@@
|
|
1986
2000
|
// Note: Ignoring `pipeline.knowledgePieces` @@@
|
|
1987
2001
|
if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
|
|
1988
|
-
console.log('!!!!', 'Not all personas have modelRequirements');
|
|
1989
2002
|
return false;
|
|
1990
2003
|
}
|
|
1991
2004
|
if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
|
|
1992
|
-
console.log('!!!!', 'Not all knowledgeSources have preparationIds');
|
|
1993
2005
|
return false;
|
|
1994
2006
|
}
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
2007
|
+
/*
|
|
2008
|
+
TODO: [🧠][🍫] `promptTemplates` can not be determined if they are fully prepared SO ignoring them
|
|
2009
|
+
> if (!pipeline.promptTemplates.every(({ preparedContent }) => preparedContent === undefined)) {
|
|
2010
|
+
> return false;
|
|
2011
|
+
> }
|
|
2012
|
+
*/
|
|
1998
2013
|
return true;
|
|
1999
2014
|
}
|
|
2000
2015
|
/**
|
|
2001
2016
|
* TODO: [🐠] Maybe base this on `makeValidator`
|
|
2002
2017
|
* TODO: [🔼] Export via core or utils
|
|
2003
2018
|
* TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
|
|
2019
|
+
* TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
|
|
2020
|
+
* - Is context in each template
|
|
2021
|
+
* - Are samples prepared
|
|
2022
|
+
* - Are templates prepared
|
|
2004
2023
|
*/
|
|
2005
2024
|
|
|
2006
2025
|
/**
|
|
@@ -2062,6 +2081,22 @@ var LimitReachedError = /** @class */ (function (_super) {
|
|
|
2062
2081
|
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
2063
2082
|
*/
|
|
2064
2083
|
function replaceParameters(template, parameters) {
|
|
2084
|
+
var e_1, _a;
|
|
2085
|
+
try {
|
|
2086
|
+
for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
|
|
2087
|
+
var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
|
|
2088
|
+
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
2089
|
+
throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
|
|
2090
|
+
}
|
|
2091
|
+
}
|
|
2092
|
+
}
|
|
2093
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
2094
|
+
finally {
|
|
2095
|
+
try {
|
|
2096
|
+
if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
|
|
2097
|
+
}
|
|
2098
|
+
finally { if (e_1) throw e_1.error; }
|
|
2099
|
+
}
|
|
2065
2100
|
var replacedTemplate = template;
|
|
2066
2101
|
var match;
|
|
2067
2102
|
var loopLimit = LOOP_LIMIT;
|
|
@@ -2187,7 +2222,7 @@ function union() {
|
|
|
2187
2222
|
/**
|
|
2188
2223
|
* The version of the Promptbook library
|
|
2189
2224
|
*/
|
|
2190
|
-
var PROMPTBOOK_VERSION = '0.61.0-
|
|
2225
|
+
var PROMPTBOOK_VERSION = '0.61.0-23';
|
|
2191
2226
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
2192
2227
|
|
|
2193
2228
|
/**
|
|
@@ -2317,14 +2352,25 @@ function createPipelineExecutor(options) {
|
|
|
2317
2352
|
pipeline = rawPipeline;
|
|
2318
2353
|
}
|
|
2319
2354
|
else {
|
|
2320
|
-
|
|
2355
|
+
// TODO: !!!! This should be maybe warning in report
|
|
2356
|
+
console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
|
|
2321
2357
|
}
|
|
2322
2358
|
var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
|
|
2359
|
+
// TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
|
|
2323
2360
|
function getContextForTemplate(// <- TODO: [🧠][🥜]
|
|
2324
2361
|
template) {
|
|
2325
2362
|
return __awaiter(this, void 0, void 0, function () {
|
|
2326
2363
|
return __generator(this, function (_a) {
|
|
2327
|
-
|
|
2364
|
+
TODO_USE(template);
|
|
2365
|
+
return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
|
|
2366
|
+
});
|
|
2367
|
+
});
|
|
2368
|
+
}
|
|
2369
|
+
function getKnowledgeForTemplate(// <- TODO: [🧠][🥜]
|
|
2370
|
+
template) {
|
|
2371
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2372
|
+
return __generator(this, function (_a) {
|
|
2373
|
+
// TODO: !!!! Implement Better - use real index and keyword search
|
|
2328
2374
|
TODO_USE(template);
|
|
2329
2375
|
return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
|
|
2330
2376
|
var content = _a.content;
|
|
@@ -2333,19 +2379,39 @@ function createPipelineExecutor(options) {
|
|
|
2333
2379
|
});
|
|
2334
2380
|
});
|
|
2335
2381
|
}
|
|
2382
|
+
function getSamplesForTemplate(// <- TODO: [🧠][🥜]
|
|
2383
|
+
template) {
|
|
2384
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2385
|
+
return __generator(this, function (_a) {
|
|
2386
|
+
// TODO: !!!! Implement Better - use real index and keyword search
|
|
2387
|
+
TODO_USE(template);
|
|
2388
|
+
return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
|
|
2389
|
+
});
|
|
2390
|
+
});
|
|
2391
|
+
}
|
|
2336
2392
|
function getReservedParametersForTemplate(template) {
|
|
2337
2393
|
return __awaiter(this, void 0, void 0, function () {
|
|
2338
|
-
var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
|
|
2394
|
+
var context, knowledge, samples, currentDate, modelName, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
|
|
2339
2395
|
var e_3, _a;
|
|
2340
2396
|
return __generator(this, function (_b) {
|
|
2341
2397
|
switch (_b.label) {
|
|
2342
2398
|
case 0: return [4 /*yield*/, getContextForTemplate(template)];
|
|
2343
2399
|
case 1:
|
|
2344
2400
|
context = _b.sent();
|
|
2401
|
+
return [4 /*yield*/, getKnowledgeForTemplate(template)];
|
|
2402
|
+
case 2:
|
|
2403
|
+
knowledge = _b.sent();
|
|
2404
|
+
return [4 /*yield*/, getSamplesForTemplate(template)];
|
|
2405
|
+
case 3:
|
|
2406
|
+
samples = _b.sent();
|
|
2345
2407
|
currentDate = new Date().toISOString();
|
|
2408
|
+
modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
2346
2409
|
reservedParameters = {
|
|
2347
2410
|
context: context,
|
|
2411
|
+
knowledge: knowledge,
|
|
2412
|
+
samples: samples,
|
|
2348
2413
|
currentDate: currentDate,
|
|
2414
|
+
modelName: modelName,
|
|
2349
2415
|
};
|
|
2350
2416
|
try {
|
|
2351
2417
|
// Note: Doublecheck that ALL reserved parameters are defined:
|
|
@@ -2370,7 +2436,7 @@ function createPipelineExecutor(options) {
|
|
|
2370
2436
|
}
|
|
2371
2437
|
function executeSingleTemplate(currentTemplate) {
|
|
2372
2438
|
return __awaiter(this, void 0, void 0, function () {
|
|
2373
|
-
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
|
|
2439
|
+
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
|
|
2374
2440
|
var e_7, _p, e_4, _q, e_6, _r, e_5, _s, _t;
|
|
2375
2441
|
var _this = this;
|
|
2376
2442
|
return __generator(this, function (_u) {
|
|
@@ -2440,6 +2506,9 @@ function createPipelineExecutor(options) {
|
|
|
2440
2506
|
expectError = null;
|
|
2441
2507
|
maxAttempts = currentTemplate.blockType === 'PROMPT_DIALOG' ? Infinity : maxExecutionAttempts;
|
|
2442
2508
|
jokerParameterNames = currentTemplate.jokerParameterNames || [];
|
|
2509
|
+
preparedContent = (currentTemplate.preparedContent || '{content}')
|
|
2510
|
+
.split('{content}')
|
|
2511
|
+
.join(currentTemplate.content);
|
|
2443
2512
|
attempt = -jokerParameterNames.length;
|
|
2444
2513
|
_u.label = 4;
|
|
2445
2514
|
case 4:
|
|
@@ -2474,7 +2543,7 @@ function createPipelineExecutor(options) {
|
|
|
2474
2543
|
}
|
|
2475
2544
|
return [3 /*break*/, 29];
|
|
2476
2545
|
case 6:
|
|
2477
|
-
resultString = replaceParameters(
|
|
2546
|
+
resultString = replaceParameters(preparedContent, parameters);
|
|
2478
2547
|
return [3 /*break*/, 30];
|
|
2479
2548
|
case 7:
|
|
2480
2549
|
prompt = {
|
|
@@ -2483,7 +2552,7 @@ function createPipelineExecutor(options) {
|
|
|
2483
2552
|
? pipeline.pipelineUrl
|
|
2484
2553
|
: 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
|
|
2485
2554
|
parameters: parameters,
|
|
2486
|
-
content:
|
|
2555
|
+
content: preparedContent,
|
|
2487
2556
|
modelRequirements: currentTemplate.modelRequirements,
|
|
2488
2557
|
expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
|
|
2489
2558
|
var name = _a.name;
|
|
@@ -2605,7 +2674,7 @@ function createPipelineExecutor(options) {
|
|
|
2605
2674
|
_u.trys.push([19, 21, , 22]);
|
|
2606
2675
|
return [4 /*yield*/, scriptTools.execute(deepFreeze({
|
|
2607
2676
|
scriptLanguage: currentTemplate.contentLanguage,
|
|
2608
|
-
script:
|
|
2677
|
+
script: preparedContent,
|
|
2609
2678
|
parameters: parameters,
|
|
2610
2679
|
}))];
|
|
2611
2680
|
case 20:
|
|
@@ -2654,7 +2723,7 @@ function createPipelineExecutor(options) {
|
|
|
2654
2723
|
return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
|
|
2655
2724
|
promptTitle: currentTemplate.title,
|
|
2656
2725
|
promptMessage: replaceParameters(currentTemplate.description || '', parameters),
|
|
2657
|
-
defaultValue: replaceParameters(
|
|
2726
|
+
defaultValue: replaceParameters(preparedContent, parameters),
|
|
2658
2727
|
// TODO: [🧠] !! Figure out how to define placeholder in .ptbk.md file
|
|
2659
2728
|
placeholder: undefined,
|
|
2660
2729
|
priority: priority,
|
|
@@ -2832,7 +2901,7 @@ function createPipelineExecutor(options) {
|
|
|
2832
2901
|
var parameter = _c.value;
|
|
2833
2902
|
if (parametersToPass[parameter.name] === undefined) {
|
|
2834
2903
|
// [4]
|
|
2835
|
-
|
|
2904
|
+
warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not generated during pipeline execution")));
|
|
2836
2905
|
continue;
|
|
2837
2906
|
}
|
|
2838
2907
|
outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
|
|
@@ -2847,7 +2916,7 @@ function createPipelineExecutor(options) {
|
|
|
2847
2916
|
}
|
|
2848
2917
|
return outputParameters;
|
|
2849
2918
|
}
|
|
2850
|
-
var executionReport, _a, _b, parameter,
|
|
2919
|
+
var errors, warnings, executionReport, _a, _b, parameter, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
|
|
2851
2920
|
var e_1, _e, e_2, _f;
|
|
2852
2921
|
return __generator(this, function (_g) {
|
|
2853
2922
|
switch (_g.label) {
|
|
@@ -2862,6 +2931,8 @@ function createPipelineExecutor(options) {
|
|
|
2862
2931
|
pipeline = _g.sent();
|
|
2863
2932
|
_g.label = 2;
|
|
2864
2933
|
case 2:
|
|
2934
|
+
errors = [];
|
|
2935
|
+
warnings = [];
|
|
2865
2936
|
executionReport = {
|
|
2866
2937
|
pipelineUrl: pipeline.pipelineUrl,
|
|
2867
2938
|
title: pipeline.title,
|
|
@@ -2880,10 +2951,10 @@ function createPipelineExecutor(options) {
|
|
|
2880
2951
|
if (inputParameters[parameter.name] === undefined) {
|
|
2881
2952
|
return [2 /*return*/, deepFreezeWithSameType({
|
|
2882
2953
|
isSuccessful: false,
|
|
2883
|
-
errors: [
|
|
2884
|
-
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
|
|
2885
|
-
|
|
2886
|
-
],
|
|
2954
|
+
errors: __spreadArray([
|
|
2955
|
+
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
|
|
2956
|
+
], __read(errors), false),
|
|
2957
|
+
warnings: [],
|
|
2887
2958
|
executionReport: executionReport,
|
|
2888
2959
|
outputParameters: {},
|
|
2889
2960
|
usage: ZERO_USAGE,
|
|
@@ -2898,22 +2969,21 @@ function createPipelineExecutor(options) {
|
|
|
2898
2969
|
}
|
|
2899
2970
|
finally { if (e_1) throw e_1.error; }
|
|
2900
2971
|
}
|
|
2901
|
-
errors = [];
|
|
2902
2972
|
_loop_1 = function (parameterName) {
|
|
2903
2973
|
var parameter = pipeline.parameters.find(function (_a) {
|
|
2904
2974
|
var name = _a.name;
|
|
2905
2975
|
return name === parameterName;
|
|
2906
2976
|
});
|
|
2907
2977
|
if (parameter === undefined) {
|
|
2908
|
-
|
|
2978
|
+
warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
|
|
2909
2979
|
}
|
|
2910
2980
|
else if (parameter.isInput === false) {
|
|
2911
2981
|
return { value: deepFreezeWithSameType({
|
|
2912
2982
|
isSuccessful: false,
|
|
2913
|
-
errors: [
|
|
2914
|
-
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but is not input"))
|
|
2915
|
-
|
|
2916
|
-
|
|
2983
|
+
errors: __spreadArray([
|
|
2984
|
+
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input"))
|
|
2985
|
+
], __read(errors), false),
|
|
2986
|
+
warnings: warnings,
|
|
2917
2987
|
executionReport: executionReport,
|
|
2918
2988
|
outputParameters: {},
|
|
2919
2989
|
usage: ZERO_USAGE,
|
|
@@ -3022,6 +3092,7 @@ function createPipelineExecutor(options) {
|
|
|
3022
3092
|
return [2 /*return*/, deepFreezeWithSameType({
|
|
3023
3093
|
isSuccessful: false,
|
|
3024
3094
|
errors: __spreadArray([error_1], __read(errors), false),
|
|
3095
|
+
warnings: warnings,
|
|
3025
3096
|
usage: usage_1,
|
|
3026
3097
|
executionReport: executionReport,
|
|
3027
3098
|
outputParameters: outputParameters_1,
|
|
@@ -3035,6 +3106,7 @@ function createPipelineExecutor(options) {
|
|
|
3035
3106
|
return [2 /*return*/, deepFreezeWithSameType({
|
|
3036
3107
|
isSuccessful: true,
|
|
3037
3108
|
errors: errors,
|
|
3109
|
+
warnings: warnings,
|
|
3038
3110
|
usage: usage,
|
|
3039
3111
|
executionReport: executionReport,
|
|
3040
3112
|
outputParameters: outputParameters,
|
|
@@ -3061,7 +3133,7 @@ function createPipelineExecutor(options) {
|
|
|
3061
3133
|
*/
|
|
3062
3134
|
function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
|
|
3063
3135
|
return __awaiter(this, void 0, void 0, function () {
|
|
3064
|
-
var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters,
|
|
3136
|
+
var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
|
|
3065
3137
|
var _f, _g, _h;
|
|
3066
3138
|
var _this = this;
|
|
3067
3139
|
return __generator(this, function (_j) {
|
|
@@ -3102,8 +3174,8 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
|
|
|
3102
3174
|
result = _j.sent();
|
|
3103
3175
|
assertsExecutionSuccessful(result);
|
|
3104
3176
|
outputParameters = result.outputParameters;
|
|
3105
|
-
|
|
3106
|
-
knowledgeTextPieces = (
|
|
3177
|
+
knowledgePiecesRaw = outputParameters.knowledgePieces;
|
|
3178
|
+
knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
|
|
3107
3179
|
if (isVerbose) {
|
|
3108
3180
|
console.info('knowledgeTextPieces:', knowledgeTextPieces);
|
|
3109
3181
|
}
|
|
@@ -3317,6 +3389,53 @@ function preparePersona(personaDescription, options) {
|
|
|
3317
3389
|
* TODO: [🏢] !! Check validity of `temperature` in pipeline
|
|
3318
3390
|
*/
|
|
3319
3391
|
|
|
3392
|
+
/**
|
|
3393
|
+
* @@@
|
|
3394
|
+
*/
|
|
3395
|
+
function prepareTemplates(pipeline, options) {
|
|
3396
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
3397
|
+
var _a, maxParallelCount, promptTemplates, parameters, knowledgePiecesCount, promptTemplatesPrepared;
|
|
3398
|
+
var _this = this;
|
|
3399
|
+
return __generator(this, function (_b) {
|
|
3400
|
+
switch (_b.label) {
|
|
3401
|
+
case 0:
|
|
3402
|
+
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3403
|
+
promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
|
|
3404
|
+
// TODO: !!!! Apply samples to each template (if missing and is for the template defined)
|
|
3405
|
+
TODO_USE(parameters);
|
|
3406
|
+
promptTemplatesPrepared = new Array(promptTemplates.length);
|
|
3407
|
+
return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
|
|
3408
|
+
var preparedContent, preparedTemplate;
|
|
3409
|
+
return __generator(this, function (_a) {
|
|
3410
|
+
preparedContent = undefined;
|
|
3411
|
+
if (knowledgePiecesCount > 0) {
|
|
3412
|
+
preparedContent = spaceTrim$1("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
|
|
3413
|
+
// <- TODO: [🧠][🧻] Cutomize shape/language/formatting of the addition to the prompt
|
|
3414
|
+
}
|
|
3415
|
+
preparedTemplate = __assign(__assign({}, template), { preparedContent: preparedContent });
|
|
3416
|
+
promptTemplatesPrepared[index] = preparedTemplate;
|
|
3417
|
+
return [2 /*return*/];
|
|
3418
|
+
});
|
|
3419
|
+
}); })];
|
|
3420
|
+
case 1:
|
|
3421
|
+
_b.sent();
|
|
3422
|
+
return [2 /*return*/, { promptTemplatesPrepared: promptTemplatesPrepared }];
|
|
3423
|
+
}
|
|
3424
|
+
});
|
|
3425
|
+
});
|
|
3426
|
+
}
|
|
3427
|
+
/**
|
|
3428
|
+
* TODO: [🧠] Add context to each template (if missing)
|
|
3429
|
+
* TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
|
|
3430
|
+
* TODO: !!!!! Index the samples and maybe templates
|
|
3431
|
+
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
3432
|
+
* TODO: Write tests for `preparePipeline`
|
|
3433
|
+
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
3434
|
+
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
3435
|
+
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
3436
|
+
* TODO: [🧠][🥜]
|
|
3437
|
+
*/
|
|
3438
|
+
|
|
3320
3439
|
/**
|
|
3321
3440
|
* Prepare pipeline from string (markdown) format to JSON format
|
|
3322
3441
|
*
|
|
@@ -3325,18 +3444,18 @@ function preparePersona(personaDescription, options) {
|
|
|
3325
3444
|
*/
|
|
3326
3445
|
function preparePipeline(pipeline, options) {
|
|
3327
3446
|
return __awaiter(this, void 0, void 0, function () {
|
|
3328
|
-
var _a, maxParallelCount,
|
|
3447
|
+
var _a, maxParallelCount, parameters, promptTemplates,
|
|
3329
3448
|
/*
|
|
3330
3449
|
<- TODO: [🧠][0] `promptbookVersion` */
|
|
3331
3450
|
knowledgeSources /*
|
|
3332
3451
|
<- TODO: [🧊] `knowledgePieces` */, personas /*
|
|
3333
|
-
<- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared
|
|
3452
|
+
<- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
|
|
3334
3453
|
var _this = this;
|
|
3335
3454
|
return __generator(this, function (_b) {
|
|
3336
3455
|
switch (_b.label) {
|
|
3337
3456
|
case 0:
|
|
3338
3457
|
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3339
|
-
knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3458
|
+
parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3340
3459
|
currentPreparation = {
|
|
3341
3460
|
id: 1,
|
|
3342
3461
|
// TODO: [🍥]> date: $currentDate(),
|
|
@@ -3369,16 +3488,20 @@ function preparePipeline(pipeline, options) {
|
|
|
3369
3488
|
case 2:
|
|
3370
3489
|
partialknowledgePiecesPrepared = _b.sent();
|
|
3371
3490
|
knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
|
|
3372
|
-
|
|
3373
|
-
|
|
3374
|
-
|
|
3375
|
-
|
|
3491
|
+
return [4 /*yield*/, prepareTemplates({
|
|
3492
|
+
parameters: parameters,
|
|
3493
|
+
promptTemplates: promptTemplates,
|
|
3494
|
+
knowledgePiecesCount: knowledgePiecesPrepared.length,
|
|
3495
|
+
}, options)];
|
|
3496
|
+
case 3:
|
|
3497
|
+
promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
|
|
3498
|
+
// ----- /Templates preparation -----
|
|
3499
|
+
return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
|
|
3376
3500
|
}
|
|
3377
3501
|
});
|
|
3378
3502
|
});
|
|
3379
3503
|
}
|
|
3380
3504
|
/**
|
|
3381
|
-
* TODO: !!!!! Index the samples and maybe templates
|
|
3382
3505
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
3383
3506
|
* TODO: Write tests for `preparePipeline`
|
|
3384
3507
|
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
@@ -5311,6 +5434,7 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
5311
5434
|
* TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
|
|
5312
5435
|
* TODO: [♈] Probbably move expectations from templates to parameters
|
|
5313
5436
|
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
5437
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
5314
5438
|
*/
|
|
5315
5439
|
|
|
5316
5440
|
/**
|
|
@@ -5894,7 +6018,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
5894
6018
|
*/
|
|
5895
6019
|
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
5896
6020
|
return __awaiter(this, void 0, void 0, function () {
|
|
5897
|
-
var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6021
|
+
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
5898
6022
|
return __generator(this, function (_a) {
|
|
5899
6023
|
switch (_a.label) {
|
|
5900
6024
|
case 0:
|
|
@@ -5906,6 +6030,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
5906
6030
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
5907
6031
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
5908
6032
|
}
|
|
6033
|
+
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
5909
6034
|
rawRequest = {
|
|
5910
6035
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
5911
6036
|
max_tokens: modelRequirements.maxTokens || 4096,
|
|
@@ -5917,7 +6042,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
5917
6042
|
messages: [
|
|
5918
6043
|
{
|
|
5919
6044
|
role: 'user',
|
|
5920
|
-
content: replaceParameters(content, parameters),
|
|
6045
|
+
content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
5921
6046
|
},
|
|
5922
6047
|
],
|
|
5923
6048
|
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
@@ -5978,9 +6103,9 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
5978
6103
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
5979
6104
|
}
|
|
5980
6105
|
|
|
5981
|
-
const
|
|
6106
|
+
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
5982
6107
|
const modelSettings = {
|
|
5983
|
-
model:
|
|
6108
|
+
model: modelName,
|
|
5984
6109
|
max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
|
|
5985
6110
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
5986
6111
|
// <- TODO: Use here `systemMessage`, `temperature` and `seed`
|
|
@@ -5988,7 +6113,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
5988
6113
|
|
|
5989
6114
|
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
5990
6115
|
...modelSettings,
|
|
5991
|
-
prompt: replaceParameters(content, parameters),
|
|
6116
|
+
prompt: replaceParameters(content, { ...parameters, modelName }),
|
|
5992
6117
|
user: this.options.user,
|
|
5993
6118
|
};
|
|
5994
6119
|
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
@@ -6495,7 +6620,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6495
6620
|
*/
|
|
6496
6621
|
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6497
6622
|
return __awaiter(this, void 0, void 0, function () {
|
|
6498
|
-
var content, parameters, modelRequirements, expectFormat,
|
|
6623
|
+
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6499
6624
|
return __generator(this, function (_a) {
|
|
6500
6625
|
switch (_a.label) {
|
|
6501
6626
|
case 0:
|
|
@@ -6507,9 +6632,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6507
6632
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6508
6633
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6509
6634
|
}
|
|
6510
|
-
|
|
6635
|
+
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6511
6636
|
modelSettings = {
|
|
6512
|
-
model:
|
|
6637
|
+
model: modelName,
|
|
6513
6638
|
max_tokens: modelRequirements.maxTokens,
|
|
6514
6639
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6515
6640
|
temperature: modelRequirements.temperature,
|
|
@@ -6531,7 +6656,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6531
6656
|
])), false), [
|
|
6532
6657
|
{
|
|
6533
6658
|
role: 'user',
|
|
6534
|
-
content: replaceParameters(content, parameters),
|
|
6659
|
+
content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6535
6660
|
},
|
|
6536
6661
|
], false), user: this.options.user });
|
|
6537
6662
|
start = getCurrentIsoDate();
|
|
@@ -6560,7 +6685,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6560
6685
|
}
|
|
6561
6686
|
return [2 /*return*/, {
|
|
6562
6687
|
content: resultContent,
|
|
6563
|
-
modelName: rawResponse.model ||
|
|
6688
|
+
modelName: rawResponse.model || modelName,
|
|
6564
6689
|
timing: {
|
|
6565
6690
|
start: start,
|
|
6566
6691
|
complete: complete,
|
|
@@ -6578,7 +6703,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6578
6703
|
*/
|
|
6579
6704
|
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6580
6705
|
return __awaiter(this, void 0, void 0, function () {
|
|
6581
|
-
var content, parameters, modelRequirements,
|
|
6706
|
+
var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6582
6707
|
return __generator(this, function (_a) {
|
|
6583
6708
|
switch (_a.label) {
|
|
6584
6709
|
case 0:
|
|
@@ -6590,16 +6715,16 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6590
6715
|
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
6591
6716
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
6592
6717
|
}
|
|
6593
|
-
|
|
6718
|
+
modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
6594
6719
|
modelSettings = {
|
|
6595
|
-
model:
|
|
6720
|
+
model: modelName,
|
|
6596
6721
|
max_tokens: modelRequirements.maxTokens || 2000,
|
|
6597
6722
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6598
6723
|
temperature: modelRequirements.temperature,
|
|
6599
6724
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6600
6725
|
// <- Note: [🧆]
|
|
6601
6726
|
};
|
|
6602
|
-
rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
|
|
6727
|
+
rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
|
|
6603
6728
|
start = getCurrentIsoDate();
|
|
6604
6729
|
if (this.options.isVerbose) {
|
|
6605
6730
|
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
@@ -6623,7 +6748,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6623
6748
|
usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
|
|
6624
6749
|
return [2 /*return*/, {
|
|
6625
6750
|
content: resultContent,
|
|
6626
|
-
modelName: rawResponse.model ||
|
|
6751
|
+
modelName: rawResponse.model || modelName,
|
|
6627
6752
|
timing: {
|
|
6628
6753
|
start: start,
|
|
6629
6754
|
complete: complete,
|
|
@@ -6641,7 +6766,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6641
6766
|
*/
|
|
6642
6767
|
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6643
6768
|
return __awaiter(this, void 0, void 0, function () {
|
|
6644
|
-
var content, parameters, modelRequirements,
|
|
6769
|
+
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6645
6770
|
return __generator(this, function (_a) {
|
|
6646
6771
|
switch (_a.label) {
|
|
6647
6772
|
case 0:
|
|
@@ -6653,10 +6778,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6653
6778
|
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
6654
6779
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
6655
6780
|
}
|
|
6656
|
-
|
|
6781
|
+
modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
6657
6782
|
rawRequest = {
|
|
6658
|
-
input: replaceParameters(content, parameters),
|
|
6659
|
-
model:
|
|
6783
|
+
input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6784
|
+
model: modelName,
|
|
6660
6785
|
// TODO: !!!! Test model 3 and dimensions
|
|
6661
6786
|
};
|
|
6662
6787
|
start = getCurrentIsoDate();
|
|
@@ -6678,7 +6803,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6678
6803
|
usage = computeOpenaiUsage(content, '', rawResponse);
|
|
6679
6804
|
return [2 /*return*/, {
|
|
6680
6805
|
content: resultContent,
|
|
6681
|
-
modelName: rawResponse.model ||
|
|
6806
|
+
modelName: rawResponse.model || modelName,
|
|
6682
6807
|
timing: {
|
|
6683
6808
|
start: start,
|
|
6684
6809
|
complete: complete,
|