@promptbook/cli 0.61.0-22 → 0.61.0-24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -4
- package/esm/index.es.js +190 -68
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -2
- package/esm/typings/src/config.d.ts +8 -4
- package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
- package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
- package/esm/typings/src/execution/PipelineExecutor.d.ts +32 -24
- package/esm/typings/src/execution/PromptResultUsage.d.ts +3 -0
- package/esm/typings/src/prepare/isPipelinePrepared.d.ts +4 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
- package/esm/typings/src/prepare/prepareTemplates.d.ts +31 -0
- package/esm/typings/src/prepare/unpreparePipeline.d.ts +2 -0
- package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
- package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
- package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
- package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
- package/package.json +2 -2
- package/umd/index.umd.js +190 -68
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/src/_packages/types.index.d.ts +2 -2
- package/umd/typings/src/config.d.ts +8 -4
- package/umd/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
- package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
- package/umd/typings/src/execution/PipelineExecutor.d.ts +32 -24
- package/umd/typings/src/execution/PromptResultUsage.d.ts +3 -0
- package/umd/typings/src/prepare/isPipelinePrepared.d.ts +4 -0
- package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
- package/umd/typings/src/prepare/prepareTemplates.d.ts +31 -0
- package/umd/typings/src/prepare/unpreparePipeline.d.ts +2 -0
- package/umd/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
- package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
- package/umd/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
- package/umd/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
package/umd/index.umd.js
CHANGED
|
@@ -154,7 +154,7 @@
|
|
|
154
154
|
/**
|
|
155
155
|
* The version of the Promptbook library
|
|
156
156
|
*/
|
|
157
|
-
var PROMPTBOOK_VERSION = '0.61.0-
|
|
157
|
+
var PROMPTBOOK_VERSION = '0.61.0-23';
|
|
158
158
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
159
159
|
|
|
160
160
|
/**
|
|
@@ -283,19 +283,26 @@
|
|
|
283
283
|
* The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
|
|
284
284
|
*/
|
|
285
285
|
var PIPELINE_COLLECTION_BASE_FILENAME = "index";
|
|
286
|
+
/**
|
|
287
|
+
* Nonce which is used for replacing things in strings
|
|
288
|
+
*/
|
|
289
|
+
var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
|
|
286
290
|
/**
|
|
287
291
|
* The names of the parameters that are reserved for special purposes
|
|
288
292
|
*/
|
|
289
293
|
var RESERVED_PARAMETER_NAMES = deepFreeze([
|
|
290
294
|
'context',
|
|
295
|
+
'knowledge',
|
|
296
|
+
'samples',
|
|
297
|
+
'modelName',
|
|
291
298
|
'currentDate',
|
|
292
299
|
// <- TODO: Add more like 'date', 'modelName',...
|
|
293
300
|
// <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
|
|
294
301
|
]);
|
|
295
302
|
/**
|
|
296
|
-
*
|
|
303
|
+
* @@@
|
|
297
304
|
*/
|
|
298
|
-
var
|
|
305
|
+
var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
299
306
|
/*
|
|
300
307
|
TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
|
|
301
308
|
*/
|
|
@@ -739,7 +746,7 @@
|
|
|
739
746
|
});
|
|
740
747
|
}
|
|
741
748
|
|
|
742
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-
|
|
749
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
743
750
|
|
|
744
751
|
/**
|
|
745
752
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -1007,7 +1014,7 @@
|
|
|
1007
1014
|
throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
|
|
1008
1015
|
}
|
|
1009
1016
|
if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
|
|
1010
|
-
throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use
|
|
1017
|
+
throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
|
|
1011
1018
|
}
|
|
1012
1019
|
definedParameters.add(template.resultingParameterName);
|
|
1013
1020
|
if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
|
|
@@ -1177,14 +1184,17 @@
|
|
|
1177
1184
|
* Unprepare just strips the preparation data of the pipeline
|
|
1178
1185
|
*/
|
|
1179
1186
|
function unpreparePipeline(pipeline) {
|
|
1180
|
-
var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources;
|
|
1187
|
+
var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, promptTemplates = pipeline.promptTemplates;
|
|
1181
1188
|
personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
|
|
1182
1189
|
knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
|
|
1183
|
-
return __assign(__assign({},
|
|
1190
|
+
promptTemplates = promptTemplates.map(function (promptTemplate) { return (__assign(__assign({}, promptTemplate), { preparedContent: undefined })); });
|
|
1191
|
+
return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
|
|
1184
1192
|
}
|
|
1185
1193
|
/**
|
|
1186
1194
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
1195
|
+
* TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
|
|
1187
1196
|
* TODO: Write tests for `preparePipeline`
|
|
1197
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
1188
1198
|
*/
|
|
1189
1199
|
|
|
1190
1200
|
/**
|
|
@@ -2086,22 +2096,27 @@
|
|
|
2086
2096
|
// Note: Ignoring `pipeline.preparations` @@@
|
|
2087
2097
|
// Note: Ignoring `pipeline.knowledgePieces` @@@
|
|
2088
2098
|
if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
|
|
2089
|
-
console.log('!!!!', 'Not all personas have modelRequirements');
|
|
2090
2099
|
return false;
|
|
2091
2100
|
}
|
|
2092
2101
|
if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
|
|
2093
|
-
console.log('!!!!', 'Not all knowledgeSources have preparationIds');
|
|
2094
2102
|
return false;
|
|
2095
2103
|
}
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2104
|
+
/*
|
|
2105
|
+
TODO: [🧠][🍫] `promptTemplates` can not be determined if they are fully prepared SO ignoring them
|
|
2106
|
+
> if (!pipeline.promptTemplates.every(({ preparedContent }) => preparedContent === undefined)) {
|
|
2107
|
+
> return false;
|
|
2108
|
+
> }
|
|
2109
|
+
*/
|
|
2099
2110
|
return true;
|
|
2100
2111
|
}
|
|
2101
2112
|
/**
|
|
2102
2113
|
* TODO: [🐠] Maybe base this on `makeValidator`
|
|
2103
2114
|
* TODO: [🔼] Export via core or utils
|
|
2104
2115
|
* TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
|
|
2116
|
+
* TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
|
|
2117
|
+
* - Is context in each template
|
|
2118
|
+
* - Are samples prepared
|
|
2119
|
+
* - Are templates prepared
|
|
2105
2120
|
*/
|
|
2106
2121
|
|
|
2107
2122
|
/**
|
|
@@ -2163,6 +2178,22 @@
|
|
|
2163
2178
|
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
2164
2179
|
*/
|
|
2165
2180
|
function replaceParameters(template, parameters) {
|
|
2181
|
+
var e_1, _a;
|
|
2182
|
+
try {
|
|
2183
|
+
for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
|
|
2184
|
+
var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
|
|
2185
|
+
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
2186
|
+
throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
|
|
2187
|
+
}
|
|
2188
|
+
}
|
|
2189
|
+
}
|
|
2190
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
2191
|
+
finally {
|
|
2192
|
+
try {
|
|
2193
|
+
if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
|
|
2194
|
+
}
|
|
2195
|
+
finally { if (e_1) throw e_1.error; }
|
|
2196
|
+
}
|
|
2166
2197
|
var replacedTemplate = template;
|
|
2167
2198
|
var match;
|
|
2168
2199
|
var loopLimit = LOOP_LIMIT;
|
|
@@ -2412,14 +2443,25 @@
|
|
|
2412
2443
|
pipeline = rawPipeline;
|
|
2413
2444
|
}
|
|
2414
2445
|
else {
|
|
2415
|
-
|
|
2446
|
+
// TODO: !!!! This should be maybe warning in report
|
|
2447
|
+
console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
|
|
2416
2448
|
}
|
|
2417
2449
|
var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
|
|
2450
|
+
// TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
|
|
2418
2451
|
function getContextForTemplate(// <- TODO: [🧠][🥜]
|
|
2419
2452
|
template) {
|
|
2420
2453
|
return __awaiter(this, void 0, void 0, function () {
|
|
2421
2454
|
return __generator(this, function (_a) {
|
|
2422
|
-
|
|
2455
|
+
TODO_USE(template);
|
|
2456
|
+
return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
|
|
2457
|
+
});
|
|
2458
|
+
});
|
|
2459
|
+
}
|
|
2460
|
+
function getKnowledgeForTemplate(// <- TODO: [🧠][🥜]
|
|
2461
|
+
template) {
|
|
2462
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2463
|
+
return __generator(this, function (_a) {
|
|
2464
|
+
// TODO: !!!! Implement Better - use real index and keyword search
|
|
2423
2465
|
TODO_USE(template);
|
|
2424
2466
|
return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
|
|
2425
2467
|
var content = _a.content;
|
|
@@ -2428,19 +2470,39 @@
|
|
|
2428
2470
|
});
|
|
2429
2471
|
});
|
|
2430
2472
|
}
|
|
2473
|
+
function getSamplesForTemplate(// <- TODO: [🧠][🥜]
|
|
2474
|
+
template) {
|
|
2475
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2476
|
+
return __generator(this, function (_a) {
|
|
2477
|
+
// TODO: !!!! Implement Better - use real index and keyword search
|
|
2478
|
+
TODO_USE(template);
|
|
2479
|
+
return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
|
|
2480
|
+
});
|
|
2481
|
+
});
|
|
2482
|
+
}
|
|
2431
2483
|
function getReservedParametersForTemplate(template) {
|
|
2432
2484
|
return __awaiter(this, void 0, void 0, function () {
|
|
2433
|
-
var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
|
|
2485
|
+
var context, knowledge, samples, currentDate, modelName, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
|
|
2434
2486
|
var e_3, _a;
|
|
2435
2487
|
return __generator(this, function (_b) {
|
|
2436
2488
|
switch (_b.label) {
|
|
2437
2489
|
case 0: return [4 /*yield*/, getContextForTemplate(template)];
|
|
2438
2490
|
case 1:
|
|
2439
2491
|
context = _b.sent();
|
|
2492
|
+
return [4 /*yield*/, getKnowledgeForTemplate(template)];
|
|
2493
|
+
case 2:
|
|
2494
|
+
knowledge = _b.sent();
|
|
2495
|
+
return [4 /*yield*/, getSamplesForTemplate(template)];
|
|
2496
|
+
case 3:
|
|
2497
|
+
samples = _b.sent();
|
|
2440
2498
|
currentDate = new Date().toISOString();
|
|
2499
|
+
modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
2441
2500
|
reservedParameters = {
|
|
2442
2501
|
context: context,
|
|
2502
|
+
knowledge: knowledge,
|
|
2503
|
+
samples: samples,
|
|
2443
2504
|
currentDate: currentDate,
|
|
2505
|
+
modelName: modelName,
|
|
2444
2506
|
};
|
|
2445
2507
|
try {
|
|
2446
2508
|
// Note: Doublecheck that ALL reserved parameters are defined:
|
|
@@ -2465,7 +2527,7 @@
|
|
|
2465
2527
|
}
|
|
2466
2528
|
function executeSingleTemplate(currentTemplate) {
|
|
2467
2529
|
return __awaiter(this, void 0, void 0, function () {
|
|
2468
|
-
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
|
|
2530
|
+
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
|
|
2469
2531
|
var e_7, _p, e_4, _q, e_6, _r, e_5, _s, _t;
|
|
2470
2532
|
var _this = this;
|
|
2471
2533
|
return __generator(this, function (_u) {
|
|
@@ -2535,6 +2597,9 @@
|
|
|
2535
2597
|
expectError = null;
|
|
2536
2598
|
maxAttempts = currentTemplate.blockType === 'PROMPT_DIALOG' ? Infinity : maxExecutionAttempts;
|
|
2537
2599
|
jokerParameterNames = currentTemplate.jokerParameterNames || [];
|
|
2600
|
+
preparedContent = (currentTemplate.preparedContent || '{content}')
|
|
2601
|
+
.split('{content}')
|
|
2602
|
+
.join(currentTemplate.content);
|
|
2538
2603
|
attempt = -jokerParameterNames.length;
|
|
2539
2604
|
_u.label = 4;
|
|
2540
2605
|
case 4:
|
|
@@ -2569,7 +2634,7 @@
|
|
|
2569
2634
|
}
|
|
2570
2635
|
return [3 /*break*/, 29];
|
|
2571
2636
|
case 6:
|
|
2572
|
-
resultString = replaceParameters(
|
|
2637
|
+
resultString = replaceParameters(preparedContent, parameters);
|
|
2573
2638
|
return [3 /*break*/, 30];
|
|
2574
2639
|
case 7:
|
|
2575
2640
|
prompt = {
|
|
@@ -2578,7 +2643,7 @@
|
|
|
2578
2643
|
? pipeline.pipelineUrl
|
|
2579
2644
|
: 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
|
|
2580
2645
|
parameters: parameters,
|
|
2581
|
-
content:
|
|
2646
|
+
content: preparedContent,
|
|
2582
2647
|
modelRequirements: currentTemplate.modelRequirements,
|
|
2583
2648
|
expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
|
|
2584
2649
|
var name = _a.name;
|
|
@@ -2700,7 +2765,7 @@
|
|
|
2700
2765
|
_u.trys.push([19, 21, , 22]);
|
|
2701
2766
|
return [4 /*yield*/, scriptTools.execute(deepFreeze({
|
|
2702
2767
|
scriptLanguage: currentTemplate.contentLanguage,
|
|
2703
|
-
script:
|
|
2768
|
+
script: preparedContent,
|
|
2704
2769
|
parameters: parameters,
|
|
2705
2770
|
}))];
|
|
2706
2771
|
case 20:
|
|
@@ -2749,7 +2814,7 @@
|
|
|
2749
2814
|
return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
|
|
2750
2815
|
promptTitle: currentTemplate.title,
|
|
2751
2816
|
promptMessage: replaceParameters(currentTemplate.description || '', parameters),
|
|
2752
|
-
defaultValue: replaceParameters(
|
|
2817
|
+
defaultValue: replaceParameters(preparedContent, parameters),
|
|
2753
2818
|
// TODO: [🧠] !! Figure out how to define placeholder in .ptbk.md file
|
|
2754
2819
|
placeholder: undefined,
|
|
2755
2820
|
priority: priority,
|
|
@@ -2927,7 +2992,7 @@
|
|
|
2927
2992
|
var parameter = _c.value;
|
|
2928
2993
|
if (parametersToPass[parameter.name] === undefined) {
|
|
2929
2994
|
// [4]
|
|
2930
|
-
|
|
2995
|
+
warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not generated during pipeline execution")));
|
|
2931
2996
|
continue;
|
|
2932
2997
|
}
|
|
2933
2998
|
outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
|
|
@@ -2942,7 +3007,7 @@
|
|
|
2942
3007
|
}
|
|
2943
3008
|
return outputParameters;
|
|
2944
3009
|
}
|
|
2945
|
-
var executionReport, _a, _b, parameter,
|
|
3010
|
+
var errors, warnings, executionReport, _a, _b, parameter, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
|
|
2946
3011
|
var e_1, _e, e_2, _f;
|
|
2947
3012
|
return __generator(this, function (_g) {
|
|
2948
3013
|
switch (_g.label) {
|
|
@@ -2957,6 +3022,8 @@
|
|
|
2957
3022
|
pipeline = _g.sent();
|
|
2958
3023
|
_g.label = 2;
|
|
2959
3024
|
case 2:
|
|
3025
|
+
errors = [];
|
|
3026
|
+
warnings = [];
|
|
2960
3027
|
executionReport = {
|
|
2961
3028
|
pipelineUrl: pipeline.pipelineUrl,
|
|
2962
3029
|
title: pipeline.title,
|
|
@@ -2975,10 +3042,10 @@
|
|
|
2975
3042
|
if (inputParameters[parameter.name] === undefined) {
|
|
2976
3043
|
return [2 /*return*/, deepFreezeWithSameType({
|
|
2977
3044
|
isSuccessful: false,
|
|
2978
|
-
errors: [
|
|
2979
|
-
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
|
|
2980
|
-
|
|
2981
|
-
],
|
|
3045
|
+
errors: __spreadArray([
|
|
3046
|
+
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
|
|
3047
|
+
], __read(errors), false),
|
|
3048
|
+
warnings: [],
|
|
2982
3049
|
executionReport: executionReport,
|
|
2983
3050
|
outputParameters: {},
|
|
2984
3051
|
usage: ZERO_USAGE,
|
|
@@ -2993,22 +3060,21 @@
|
|
|
2993
3060
|
}
|
|
2994
3061
|
finally { if (e_1) throw e_1.error; }
|
|
2995
3062
|
}
|
|
2996
|
-
errors = [];
|
|
2997
3063
|
_loop_1 = function (parameterName) {
|
|
2998
3064
|
var parameter = pipeline.parameters.find(function (_a) {
|
|
2999
3065
|
var name = _a.name;
|
|
3000
3066
|
return name === parameterName;
|
|
3001
3067
|
});
|
|
3002
3068
|
if (parameter === undefined) {
|
|
3003
|
-
|
|
3069
|
+
warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
|
|
3004
3070
|
}
|
|
3005
3071
|
else if (parameter.isInput === false) {
|
|
3006
3072
|
return { value: deepFreezeWithSameType({
|
|
3007
3073
|
isSuccessful: false,
|
|
3008
|
-
errors: [
|
|
3009
|
-
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but is not input"))
|
|
3010
|
-
|
|
3011
|
-
|
|
3074
|
+
errors: __spreadArray([
|
|
3075
|
+
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input"))
|
|
3076
|
+
], __read(errors), false),
|
|
3077
|
+
warnings: warnings,
|
|
3012
3078
|
executionReport: executionReport,
|
|
3013
3079
|
outputParameters: {},
|
|
3014
3080
|
usage: ZERO_USAGE,
|
|
@@ -3117,6 +3183,7 @@
|
|
|
3117
3183
|
return [2 /*return*/, deepFreezeWithSameType({
|
|
3118
3184
|
isSuccessful: false,
|
|
3119
3185
|
errors: __spreadArray([error_1], __read(errors), false),
|
|
3186
|
+
warnings: warnings,
|
|
3120
3187
|
usage: usage_1,
|
|
3121
3188
|
executionReport: executionReport,
|
|
3122
3189
|
outputParameters: outputParameters_1,
|
|
@@ -3130,6 +3197,7 @@
|
|
|
3130
3197
|
return [2 /*return*/, deepFreezeWithSameType({
|
|
3131
3198
|
isSuccessful: true,
|
|
3132
3199
|
errors: errors,
|
|
3200
|
+
warnings: warnings,
|
|
3133
3201
|
usage: usage,
|
|
3134
3202
|
executionReport: executionReport,
|
|
3135
3203
|
outputParameters: outputParameters,
|
|
@@ -3156,7 +3224,7 @@
|
|
|
3156
3224
|
*/
|
|
3157
3225
|
function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
|
|
3158
3226
|
return __awaiter(this, void 0, void 0, function () {
|
|
3159
|
-
var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters,
|
|
3227
|
+
var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
|
|
3160
3228
|
var _f, _g, _h;
|
|
3161
3229
|
var _this = this;
|
|
3162
3230
|
return __generator(this, function (_j) {
|
|
@@ -3197,8 +3265,8 @@
|
|
|
3197
3265
|
result = _j.sent();
|
|
3198
3266
|
assertsExecutionSuccessful(result);
|
|
3199
3267
|
outputParameters = result.outputParameters;
|
|
3200
|
-
|
|
3201
|
-
knowledgeTextPieces = (
|
|
3268
|
+
knowledgePiecesRaw = outputParameters.knowledgePieces;
|
|
3269
|
+
knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
|
|
3202
3270
|
if (isVerbose) {
|
|
3203
3271
|
console.info('knowledgeTextPieces:', knowledgeTextPieces);
|
|
3204
3272
|
}
|
|
@@ -3412,6 +3480,53 @@
|
|
|
3412
3480
|
* TODO: [🏢] !! Check validity of `temperature` in pipeline
|
|
3413
3481
|
*/
|
|
3414
3482
|
|
|
3483
|
+
/**
|
|
3484
|
+
* @@@
|
|
3485
|
+
*/
|
|
3486
|
+
function prepareTemplates(pipeline, options) {
|
|
3487
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
3488
|
+
var _a, maxParallelCount, promptTemplates, parameters, knowledgePiecesCount, promptTemplatesPrepared;
|
|
3489
|
+
var _this = this;
|
|
3490
|
+
return __generator(this, function (_b) {
|
|
3491
|
+
switch (_b.label) {
|
|
3492
|
+
case 0:
|
|
3493
|
+
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3494
|
+
promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
|
|
3495
|
+
// TODO: !!!! Apply samples to each template (if missing and is for the template defined)
|
|
3496
|
+
TODO_USE(parameters);
|
|
3497
|
+
promptTemplatesPrepared = new Array(promptTemplates.length);
|
|
3498
|
+
return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
|
|
3499
|
+
var preparedContent, preparedTemplate;
|
|
3500
|
+
return __generator(this, function (_a) {
|
|
3501
|
+
preparedContent = undefined;
|
|
3502
|
+
if (knowledgePiecesCount > 0) {
|
|
3503
|
+
preparedContent = spaceTrim.spaceTrim("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
|
|
3504
|
+
// <- TODO: [🧠][🧻] Cutomize shape/language/formatting of the addition to the prompt
|
|
3505
|
+
}
|
|
3506
|
+
preparedTemplate = __assign(__assign({}, template), { preparedContent: preparedContent });
|
|
3507
|
+
promptTemplatesPrepared[index] = preparedTemplate;
|
|
3508
|
+
return [2 /*return*/];
|
|
3509
|
+
});
|
|
3510
|
+
}); })];
|
|
3511
|
+
case 1:
|
|
3512
|
+
_b.sent();
|
|
3513
|
+
return [2 /*return*/, { promptTemplatesPrepared: promptTemplatesPrepared }];
|
|
3514
|
+
}
|
|
3515
|
+
});
|
|
3516
|
+
});
|
|
3517
|
+
}
|
|
3518
|
+
/**
|
|
3519
|
+
* TODO: [🧠] Add context to each template (if missing)
|
|
3520
|
+
* TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
|
|
3521
|
+
* TODO: !!!!! Index the samples and maybe templates
|
|
3522
|
+
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
3523
|
+
* TODO: Write tests for `preparePipeline`
|
|
3524
|
+
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
3525
|
+
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
3526
|
+
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
3527
|
+
* TODO: [🧠][🥜]
|
|
3528
|
+
*/
|
|
3529
|
+
|
|
3415
3530
|
/**
|
|
3416
3531
|
* Prepare pipeline from string (markdown) format to JSON format
|
|
3417
3532
|
*
|
|
@@ -3420,18 +3535,18 @@
|
|
|
3420
3535
|
*/
|
|
3421
3536
|
function preparePipeline(pipeline, options) {
|
|
3422
3537
|
return __awaiter(this, void 0, void 0, function () {
|
|
3423
|
-
var _a, maxParallelCount,
|
|
3538
|
+
var _a, maxParallelCount, parameters, promptTemplates,
|
|
3424
3539
|
/*
|
|
3425
3540
|
<- TODO: [🧠][0] `promptbookVersion` */
|
|
3426
3541
|
knowledgeSources /*
|
|
3427
3542
|
<- TODO: [🧊] `knowledgePieces` */, personas /*
|
|
3428
|
-
<- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared
|
|
3543
|
+
<- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
|
|
3429
3544
|
var _this = this;
|
|
3430
3545
|
return __generator(this, function (_b) {
|
|
3431
3546
|
switch (_b.label) {
|
|
3432
3547
|
case 0:
|
|
3433
3548
|
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3434
|
-
knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3549
|
+
parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3435
3550
|
currentPreparation = {
|
|
3436
3551
|
id: 1,
|
|
3437
3552
|
// TODO: [🍥]> date: $currentDate(),
|
|
@@ -3464,16 +3579,20 @@
|
|
|
3464
3579
|
case 2:
|
|
3465
3580
|
partialknowledgePiecesPrepared = _b.sent();
|
|
3466
3581
|
knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
|
|
3467
|
-
|
|
3468
|
-
|
|
3469
|
-
|
|
3470
|
-
|
|
3582
|
+
return [4 /*yield*/, prepareTemplates({
|
|
3583
|
+
parameters: parameters,
|
|
3584
|
+
promptTemplates: promptTemplates,
|
|
3585
|
+
knowledgePiecesCount: knowledgePiecesPrepared.length,
|
|
3586
|
+
}, options)];
|
|
3587
|
+
case 3:
|
|
3588
|
+
promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
|
|
3589
|
+
// ----- /Templates preparation -----
|
|
3590
|
+
return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
|
|
3471
3591
|
}
|
|
3472
3592
|
});
|
|
3473
3593
|
});
|
|
3474
3594
|
}
|
|
3475
3595
|
/**
|
|
3476
|
-
* TODO: !!!!! Index the samples and maybe templates
|
|
3477
3596
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
3478
3597
|
* TODO: Write tests for `preparePipeline`
|
|
3479
3598
|
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
@@ -5406,6 +5525,7 @@
|
|
|
5406
5525
|
* TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
|
|
5407
5526
|
* TODO: [♈] Probbably move expectations from templates to parameters
|
|
5408
5527
|
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
5528
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
5409
5529
|
*/
|
|
5410
5530
|
|
|
5411
5531
|
/**
|
|
@@ -6078,7 +6198,7 @@
|
|
|
6078
6198
|
*/
|
|
6079
6199
|
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6080
6200
|
return __awaiter(this, void 0, void 0, function () {
|
|
6081
|
-
var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6201
|
+
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6082
6202
|
return __generator(this, function (_a) {
|
|
6083
6203
|
switch (_a.label) {
|
|
6084
6204
|
case 0:
|
|
@@ -6090,6 +6210,7 @@
|
|
|
6090
6210
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6091
6211
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6092
6212
|
}
|
|
6213
|
+
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6093
6214
|
rawRequest = {
|
|
6094
6215
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
6095
6216
|
max_tokens: modelRequirements.maxTokens || 4096,
|
|
@@ -6101,7 +6222,7 @@
|
|
|
6101
6222
|
messages: [
|
|
6102
6223
|
{
|
|
6103
6224
|
role: 'user',
|
|
6104
|
-
content: replaceParameters(content, parameters),
|
|
6225
|
+
content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6105
6226
|
},
|
|
6106
6227
|
],
|
|
6107
6228
|
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
@@ -6162,9 +6283,9 @@
|
|
|
6162
6283
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
6163
6284
|
}
|
|
6164
6285
|
|
|
6165
|
-
const
|
|
6286
|
+
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6166
6287
|
const modelSettings = {
|
|
6167
|
-
model:
|
|
6288
|
+
model: modelName,
|
|
6168
6289
|
max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
|
|
6169
6290
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6170
6291
|
// <- TODO: Use here `systemMessage`, `temperature` and `seed`
|
|
@@ -6172,7 +6293,7 @@
|
|
|
6172
6293
|
|
|
6173
6294
|
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
6174
6295
|
...modelSettings,
|
|
6175
|
-
prompt: replaceParameters(content, parameters),
|
|
6296
|
+
prompt: replaceParameters(content, { ...parameters, modelName }),
|
|
6176
6297
|
user: this.options.user,
|
|
6177
6298
|
};
|
|
6178
6299
|
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
@@ -6679,7 +6800,7 @@
|
|
|
6679
6800
|
*/
|
|
6680
6801
|
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6681
6802
|
return __awaiter(this, void 0, void 0, function () {
|
|
6682
|
-
var content, parameters, modelRequirements, expectFormat,
|
|
6803
|
+
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6683
6804
|
return __generator(this, function (_a) {
|
|
6684
6805
|
switch (_a.label) {
|
|
6685
6806
|
case 0:
|
|
@@ -6691,9 +6812,9 @@
|
|
|
6691
6812
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6692
6813
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6693
6814
|
}
|
|
6694
|
-
|
|
6815
|
+
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6695
6816
|
modelSettings = {
|
|
6696
|
-
model:
|
|
6817
|
+
model: modelName,
|
|
6697
6818
|
max_tokens: modelRequirements.maxTokens,
|
|
6698
6819
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6699
6820
|
temperature: modelRequirements.temperature,
|
|
@@ -6715,7 +6836,7 @@
|
|
|
6715
6836
|
])), false), [
|
|
6716
6837
|
{
|
|
6717
6838
|
role: 'user',
|
|
6718
|
-
content: replaceParameters(content, parameters),
|
|
6839
|
+
content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6719
6840
|
},
|
|
6720
6841
|
], false), user: this.options.user });
|
|
6721
6842
|
start = getCurrentIsoDate();
|
|
@@ -6744,7 +6865,7 @@
|
|
|
6744
6865
|
}
|
|
6745
6866
|
return [2 /*return*/, {
|
|
6746
6867
|
content: resultContent,
|
|
6747
|
-
modelName: rawResponse.model ||
|
|
6868
|
+
modelName: rawResponse.model || modelName,
|
|
6748
6869
|
timing: {
|
|
6749
6870
|
start: start,
|
|
6750
6871
|
complete: complete,
|
|
@@ -6762,7 +6883,7 @@
|
|
|
6762
6883
|
*/
|
|
6763
6884
|
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6764
6885
|
return __awaiter(this, void 0, void 0, function () {
|
|
6765
|
-
var content, parameters, modelRequirements,
|
|
6886
|
+
var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6766
6887
|
return __generator(this, function (_a) {
|
|
6767
6888
|
switch (_a.label) {
|
|
6768
6889
|
case 0:
|
|
@@ -6774,16 +6895,16 @@
|
|
|
6774
6895
|
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
6775
6896
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
6776
6897
|
}
|
|
6777
|
-
|
|
6898
|
+
modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
6778
6899
|
modelSettings = {
|
|
6779
|
-
model:
|
|
6900
|
+
model: modelName,
|
|
6780
6901
|
max_tokens: modelRequirements.maxTokens || 2000,
|
|
6781
6902
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6782
6903
|
temperature: modelRequirements.temperature,
|
|
6783
6904
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6784
6905
|
// <- Note: [🧆]
|
|
6785
6906
|
};
|
|
6786
|
-
rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
|
|
6907
|
+
rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
|
|
6787
6908
|
start = getCurrentIsoDate();
|
|
6788
6909
|
if (this.options.isVerbose) {
|
|
6789
6910
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
@@ -6807,7 +6928,7 @@
|
|
|
6807
6928
|
usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
|
|
6808
6929
|
return [2 /*return*/, {
|
|
6809
6930
|
content: resultContent,
|
|
6810
|
-
modelName: rawResponse.model ||
|
|
6931
|
+
modelName: rawResponse.model || modelName,
|
|
6811
6932
|
timing: {
|
|
6812
6933
|
start: start,
|
|
6813
6934
|
complete: complete,
|
|
@@ -6825,7 +6946,7 @@
|
|
|
6825
6946
|
*/
|
|
6826
6947
|
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6827
6948
|
return __awaiter(this, void 0, void 0, function () {
|
|
6828
|
-
var content, parameters, modelRequirements,
|
|
6949
|
+
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6829
6950
|
return __generator(this, function (_a) {
|
|
6830
6951
|
switch (_a.label) {
|
|
6831
6952
|
case 0:
|
|
@@ -6837,10 +6958,10 @@
|
|
|
6837
6958
|
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
6838
6959
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
6839
6960
|
}
|
|
6840
|
-
|
|
6961
|
+
modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
6841
6962
|
rawRequest = {
|
|
6842
|
-
input: replaceParameters(content, parameters),
|
|
6843
|
-
model:
|
|
6963
|
+
input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6964
|
+
model: modelName,
|
|
6844
6965
|
// TODO: !!!! Test model 3 and dimensions
|
|
6845
6966
|
};
|
|
6846
6967
|
start = getCurrentIsoDate();
|
|
@@ -6862,7 +6983,7 @@
|
|
|
6862
6983
|
usage = computeOpenaiUsage(content, '', rawResponse);
|
|
6863
6984
|
return [2 /*return*/, {
|
|
6864
6985
|
content: resultContent,
|
|
6865
|
-
modelName: rawResponse.model ||
|
|
6986
|
+
modelName: rawResponse.model || modelName,
|
|
6866
6987
|
timing: {
|
|
6867
6988
|
start: start,
|
|
6868
6989
|
complete: complete,
|
|
@@ -7185,6 +7306,7 @@
|
|
|
7185
7306
|
* TODO: !!!! Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
|
|
7186
7307
|
* TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
|
|
7187
7308
|
* TODO: [🧠] Maybe more elegant solution than replacing via regex
|
|
7309
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
7188
7310
|
*/
|
|
7189
7311
|
|
|
7190
7312
|
/**
|
|
@@ -7535,7 +7657,7 @@
|
|
|
7535
7657
|
prettifyCommand.action(function (filesGlob, _a) {
|
|
7536
7658
|
var ignore = _a.ignore;
|
|
7537
7659
|
return __awaiter(_this, void 0, void 0, function () {
|
|
7538
|
-
var filePaths, filePaths_1, filePaths_1_1, filePath,
|
|
7660
|
+
var filePaths, filePaths_1, filePaths_1_1, filePath, pipelineMarkdown, error_1, e_1_1;
|
|
7539
7661
|
var e_1, _b;
|
|
7540
7662
|
return __generator(this, function (_c) {
|
|
7541
7663
|
switch (_c.label) {
|
|
@@ -7556,18 +7678,18 @@
|
|
|
7556
7678
|
}
|
|
7557
7679
|
return [4 /*yield*/, promises.readFile(filePath, 'utf-8')];
|
|
7558
7680
|
case 4:
|
|
7559
|
-
|
|
7681
|
+
pipelineMarkdown = (_c.sent());
|
|
7560
7682
|
_c.label = 5;
|
|
7561
7683
|
case 5:
|
|
7562
7684
|
_c.trys.push([5, 8, , 9]);
|
|
7563
|
-
return [4 /*yield*/, prettifyPipelineString(
|
|
7685
|
+
return [4 /*yield*/, prettifyPipelineString(pipelineMarkdown, {
|
|
7564
7686
|
isGraphAdded: true,
|
|
7565
7687
|
isPrettifyed: true,
|
|
7566
7688
|
// <- [🕌]
|
|
7567
7689
|
})];
|
|
7568
7690
|
case 6:
|
|
7569
|
-
|
|
7570
|
-
return [4 /*yield*/, promises.writeFile(filePath,
|
|
7691
|
+
pipelineMarkdown = _c.sent();
|
|
7692
|
+
return [4 /*yield*/, promises.writeFile(filePath, pipelineMarkdown)];
|
|
7571
7693
|
case 7:
|
|
7572
7694
|
_c.sent();
|
|
7573
7695
|
console.info(colors__default["default"].green("Prettify ".concat(filePath)));
|