@promptbook/cli 0.61.0-27 โ 0.61.0-28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +30 -18
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/execution/PromptResult.d.ts +15 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
- package/esm/typings/src/prepare/preparePipeline.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +30 -18
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/src/execution/PromptResult.d.ts +15 -0
- package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
- package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
- package/umd/typings/src/prepare/preparePipeline.d.ts +1 -1
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { string_date_iso8601 } from '../types/typeAliases';
|
|
2
2
|
import type { string_model_name } from '../types/typeAliases';
|
|
3
|
+
import type { string_prompt } from '../types/typeAliases';
|
|
3
4
|
import type { TODO_object } from '../utils/organization/TODO_object';
|
|
4
5
|
import type { EmbeddingVector } from './EmbeddingVector';
|
|
5
6
|
import type { PromptResultUsage } from './PromptResultUsage';
|
|
@@ -69,8 +70,22 @@ export type CommonPromptResult = {
|
|
|
69
70
|
* Usage of the prompt execution
|
|
70
71
|
*/
|
|
71
72
|
readonly usage: PromptResultUsage;
|
|
73
|
+
/**
|
|
74
|
+
* Exact text of the prompt (with all replacements)
|
|
75
|
+
*
|
|
76
|
+
* Note: This contains redundant information
|
|
77
|
+
*/
|
|
78
|
+
readonly rawPromptContent: string_prompt;
|
|
79
|
+
/**
|
|
80
|
+
* Raw request to the model
|
|
81
|
+
*
|
|
82
|
+
* Note: This contains redundant information
|
|
83
|
+
*/
|
|
84
|
+
readonly rawRequest: TODO_object | null;
|
|
72
85
|
/**
|
|
73
86
|
* Raw response from the model
|
|
87
|
+
*
|
|
88
|
+
* Note: This contains redundant information
|
|
74
89
|
*/
|
|
75
90
|
readonly rawResponse: TODO_object;
|
|
76
91
|
};
|
|
@@ -50,5 +50,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
|
|
|
50
50
|
}
|
|
51
51
|
/**
|
|
52
52
|
* TODO: [๐] Allow to list compatible models with each variant
|
|
53
|
-
* TODO: [
|
|
53
|
+
* TODO: [๐ฏ] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
|
|
54
54
|
*/
|
|
@@ -12,8 +12,8 @@ export declare function startRemoteServer(options: RemoteServerOptions): IDestro
|
|
|
12
12
|
/**
|
|
13
13
|
* TODO: [โ] Expose the collection to be able to connect to same collection via createCollectionFromUrl
|
|
14
14
|
* TODO: Handle progress - support streaming
|
|
15
|
-
* TODO: [
|
|
16
|
-
* TODO: [
|
|
15
|
+
* TODO: [๐ฏ] Do not hang up immediately but wait until client closes OR timeout
|
|
16
|
+
* TODO: [๐ฏ] Timeout on chat to free up resources
|
|
17
17
|
* TODO: [๐] Pass here some security token to prevent malitious usage and/or DDoS
|
|
18
18
|
* TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
|
|
19
19
|
*/
|
|
@@ -12,6 +12,6 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
|
|
|
12
12
|
* TODO: Write tests for `preparePipeline`
|
|
13
13
|
* TODO: [๐] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
14
14
|
* TODO: [๐ง] In future one preparation can take data from previous preparation and save tokens and time
|
|
15
|
-
* TODO: [๐]
|
|
15
|
+
* TODO: [๐] !!!!! Use here countTotalUsage
|
|
16
16
|
* TODO: [๐ ] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
17
17
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/cli",
|
|
3
|
-
"version": "0.61.0-
|
|
3
|
+
"version": "0.61.0-28",
|
|
4
4
|
"description": "Supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -54,7 +54,7 @@
|
|
|
54
54
|
}
|
|
55
55
|
],
|
|
56
56
|
"peerDependencies": {
|
|
57
|
-
"@promptbook/core": "0.61.0-
|
|
57
|
+
"@promptbook/core": "0.61.0-28"
|
|
58
58
|
},
|
|
59
59
|
"main": "./umd/index.umd.js",
|
|
60
60
|
"module": "./esm/index.es.js",
|
package/umd/index.umd.js
CHANGED
|
@@ -154,7 +154,7 @@
|
|
|
154
154
|
/**
|
|
155
155
|
* The version of the Promptbook library
|
|
156
156
|
*/
|
|
157
|
-
var PROMPTBOOK_VERSION = '0.61.0-
|
|
157
|
+
var PROMPTBOOK_VERSION = '0.61.0-27';
|
|
158
158
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
159
159
|
|
|
160
160
|
/**
|
|
@@ -751,7 +751,7 @@
|
|
|
751
751
|
});
|
|
752
752
|
}
|
|
753
753
|
|
|
754
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-
|
|
754
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
755
755
|
|
|
756
756
|
/**
|
|
757
757
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -3534,7 +3534,7 @@
|
|
|
3534
3534
|
case 0:
|
|
3535
3535
|
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3536
3536
|
promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
|
|
3537
|
-
// TODO:
|
|
3537
|
+
// TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
|
|
3538
3538
|
TODO_USE(parameters);
|
|
3539
3539
|
promptTemplatesPrepared = new Array(promptTemplates.length);
|
|
3540
3540
|
return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [๐ช] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
|
|
@@ -3643,7 +3643,7 @@
|
|
|
3643
3643
|
* TODO: Write tests for `preparePipeline`
|
|
3644
3644
|
* TODO: [๐] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
3645
3645
|
* TODO: [๐ง] In future one preparation can take data from previous preparation and save tokens and time
|
|
3646
|
-
* TODO: [๐]
|
|
3646
|
+
* TODO: [๐] !!!!! Use here countTotalUsage
|
|
3647
3647
|
* TODO: [๐ ] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
3648
3648
|
*/
|
|
3649
3649
|
|
|
@@ -6238,7 +6238,7 @@
|
|
|
6238
6238
|
*/
|
|
6239
6239
|
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6240
6240
|
return __awaiter(this, void 0, void 0, function () {
|
|
6241
|
-
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6241
|
+
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6242
6242
|
return __generator(this, function (_a) {
|
|
6243
6243
|
switch (_a.label) {
|
|
6244
6244
|
case 0:
|
|
@@ -6251,6 +6251,7 @@
|
|
|
6251
6251
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6252
6252
|
}
|
|
6253
6253
|
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6254
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6254
6255
|
rawRequest = {
|
|
6255
6256
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
6256
6257
|
max_tokens: modelRequirements.maxTokens || 4096,
|
|
@@ -6262,7 +6263,7 @@
|
|
|
6262
6263
|
messages: [
|
|
6263
6264
|
{
|
|
6264
6265
|
role: 'user',
|
|
6265
|
-
content:
|
|
6266
|
+
content: rawPromptContent,
|
|
6266
6267
|
},
|
|
6267
6268
|
],
|
|
6268
6269
|
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
@@ -6299,8 +6300,10 @@
|
|
|
6299
6300
|
complete: complete,
|
|
6300
6301
|
},
|
|
6301
6302
|
usage: usage,
|
|
6303
|
+
rawPromptContent: rawPromptContent,
|
|
6304
|
+
rawRequest: rawRequest,
|
|
6302
6305
|
rawResponse: rawResponse,
|
|
6303
|
-
// <- [
|
|
6306
|
+
// <- [๐ฏ]
|
|
6304
6307
|
}];
|
|
6305
6308
|
}
|
|
6306
6309
|
});
|
|
@@ -6333,7 +6336,7 @@
|
|
|
6333
6336
|
|
|
6334
6337
|
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
6335
6338
|
...modelSettings,
|
|
6336
|
-
prompt:
|
|
6339
|
+
prompt: rawPromptContent,
|
|
6337
6340
|
user: this.options.user,
|
|
6338
6341
|
};
|
|
6339
6342
|
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
@@ -6372,7 +6375,7 @@
|
|
|
6372
6375
|
},
|
|
6373
6376
|
usage,
|
|
6374
6377
|
rawResponse,
|
|
6375
|
-
// <- [
|
|
6378
|
+
// <- [๐ฏ]
|
|
6376
6379
|
};
|
|
6377
6380
|
}
|
|
6378
6381
|
*/
|
|
@@ -6840,7 +6843,7 @@
|
|
|
6840
6843
|
*/
|
|
6841
6844
|
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6842
6845
|
return __awaiter(this, void 0, void 0, function () {
|
|
6843
|
-
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6846
|
+
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6844
6847
|
return __generator(this, function (_a) {
|
|
6845
6848
|
switch (_a.label) {
|
|
6846
6849
|
case 0:
|
|
@@ -6866,6 +6869,7 @@
|
|
|
6866
6869
|
type: 'json_object',
|
|
6867
6870
|
};
|
|
6868
6871
|
}
|
|
6872
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6869
6873
|
rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
6870
6874
|
? []
|
|
6871
6875
|
: [
|
|
@@ -6876,7 +6880,7 @@
|
|
|
6876
6880
|
])), false), [
|
|
6877
6881
|
{
|
|
6878
6882
|
role: 'user',
|
|
6879
|
-
content:
|
|
6883
|
+
content: rawPromptContent,
|
|
6880
6884
|
},
|
|
6881
6885
|
], false), user: this.options.user });
|
|
6882
6886
|
start = getCurrentIsoDate();
|
|
@@ -6911,8 +6915,10 @@
|
|
|
6911
6915
|
complete: complete,
|
|
6912
6916
|
},
|
|
6913
6917
|
usage: usage,
|
|
6918
|
+
rawPromptContent: rawPromptContent,
|
|
6919
|
+
rawRequest: rawRequest,
|
|
6914
6920
|
rawResponse: rawResponse,
|
|
6915
|
-
// <- [
|
|
6921
|
+
// <- [๐ฏ]
|
|
6916
6922
|
}];
|
|
6917
6923
|
}
|
|
6918
6924
|
});
|
|
@@ -6923,7 +6929,7 @@
|
|
|
6923
6929
|
*/
|
|
6924
6930
|
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6925
6931
|
return __awaiter(this, void 0, void 0, function () {
|
|
6926
|
-
var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6932
|
+
var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6927
6933
|
return __generator(this, function (_a) {
|
|
6928
6934
|
switch (_a.label) {
|
|
6929
6935
|
case 0:
|
|
@@ -6944,7 +6950,8 @@
|
|
|
6944
6950
|
// <- TODO: [๐] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6945
6951
|
// <- Note: [๐ง]
|
|
6946
6952
|
};
|
|
6947
|
-
|
|
6953
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6954
|
+
rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
|
|
6948
6955
|
start = getCurrentIsoDate();
|
|
6949
6956
|
if (this.options.isVerbose) {
|
|
6950
6957
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
@@ -6974,8 +6981,10 @@
|
|
|
6974
6981
|
complete: complete,
|
|
6975
6982
|
},
|
|
6976
6983
|
usage: usage,
|
|
6984
|
+
rawPromptContent: rawPromptContent,
|
|
6985
|
+
rawRequest: rawRequest,
|
|
6977
6986
|
rawResponse: rawResponse,
|
|
6978
|
-
// <- [
|
|
6987
|
+
// <- [๐ฏ]
|
|
6979
6988
|
}];
|
|
6980
6989
|
}
|
|
6981
6990
|
});
|
|
@@ -6986,7 +6995,7 @@
|
|
|
6986
6995
|
*/
|
|
6987
6996
|
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6988
6997
|
return __awaiter(this, void 0, void 0, function () {
|
|
6989
|
-
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6998
|
+
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6990
6999
|
return __generator(this, function (_a) {
|
|
6991
7000
|
switch (_a.label) {
|
|
6992
7001
|
case 0:
|
|
@@ -6999,8 +7008,9 @@
|
|
|
6999
7008
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
7000
7009
|
}
|
|
7001
7010
|
modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
7011
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7002
7012
|
rawRequest = {
|
|
7003
|
-
input:
|
|
7013
|
+
input: rawPromptContent,
|
|
7004
7014
|
model: modelName,
|
|
7005
7015
|
};
|
|
7006
7016
|
start = getCurrentIsoDate();
|
|
@@ -7028,8 +7038,10 @@
|
|
|
7028
7038
|
complete: complete,
|
|
7029
7039
|
},
|
|
7030
7040
|
usage: usage,
|
|
7041
|
+
rawPromptContent: rawPromptContent,
|
|
7042
|
+
rawRequest: rawRequest,
|
|
7031
7043
|
rawResponse: rawResponse,
|
|
7032
|
-
// <- [
|
|
7044
|
+
// <- [๐ฏ]
|
|
7033
7045
|
}];
|
|
7034
7046
|
}
|
|
7035
7047
|
});
|