@promptbook/legacy-documents 0.92.0-4 → 0.92.0-6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +114 -8
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +4 -0
- package/esm/typings/src/_packages/utils.index.d.ts +2 -0
- package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
- package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
- package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
- package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
- package/esm/typings/src/remote-server/openapi.d.ts +397 -3
- package/package.json +2 -2
- package/umd/index.umd.js +114 -8
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
28
28
|
* @generated
|
|
29
29
|
* @see https://github.com/webgptorg/promptbook
|
|
30
30
|
*/
|
|
31
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-
|
|
31
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-6';
|
|
32
32
|
/**
|
|
33
33
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
34
34
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -2298,6 +2298,45 @@ function isPipelinePrepared(pipeline) {
|
|
|
2298
2298
|
* - [♨] Are tasks prepared
|
|
2299
2299
|
*/
|
|
2300
2300
|
|
|
2301
|
+
/**
|
|
2302
|
+
* Converts a JavaScript Object Notation (JSON) string into an object.
|
|
2303
|
+
*
|
|
2304
|
+
* Note: This is wrapper around `JSON.parse()` with better error and type handling
|
|
2305
|
+
*
|
|
2306
|
+
* @public exported from `@promptbook/utils`
|
|
2307
|
+
*/
|
|
2308
|
+
function jsonParse(value) {
|
|
2309
|
+
if (value === undefined) {
|
|
2310
|
+
throw new Error(`Can not parse JSON from undefined value.`);
|
|
2311
|
+
}
|
|
2312
|
+
else if (typeof value !== 'string') {
|
|
2313
|
+
console.error('Can not parse JSON from non-string value.', { text: value });
|
|
2314
|
+
throw new Error(spaceTrim$1(`
|
|
2315
|
+
Can not parse JSON from non-string value.
|
|
2316
|
+
|
|
2317
|
+
The value type: ${typeof value}
|
|
2318
|
+
See more in console.
|
|
2319
|
+
`));
|
|
2320
|
+
}
|
|
2321
|
+
try {
|
|
2322
|
+
return JSON.parse(value);
|
|
2323
|
+
}
|
|
2324
|
+
catch (error) {
|
|
2325
|
+
if (!(error instanceof Error)) {
|
|
2326
|
+
throw error;
|
|
2327
|
+
}
|
|
2328
|
+
throw new Error(spaceTrim$1((block) => `
|
|
2329
|
+
${block(error.message)}
|
|
2330
|
+
|
|
2331
|
+
The JSON text:
|
|
2332
|
+
${block(value)}
|
|
2333
|
+
`));
|
|
2334
|
+
}
|
|
2335
|
+
}
|
|
2336
|
+
/**
|
|
2337
|
+
* TODO: !!!! Use in Promptbook.studio
|
|
2338
|
+
*/
|
|
2339
|
+
|
|
2301
2340
|
/**
|
|
2302
2341
|
* Recursively converts JSON strings to JSON objects
|
|
2303
2342
|
|
|
@@ -2316,7 +2355,7 @@ function jsonStringsToJsons(object) {
|
|
|
2316
2355
|
const newObject = { ...object };
|
|
2317
2356
|
for (const [key, value] of Object.entries(object)) {
|
|
2318
2357
|
if (typeof value === 'string' && isValidJsonString(value)) {
|
|
2319
|
-
newObject[key] =
|
|
2358
|
+
newObject[key] = jsonParse(value);
|
|
2320
2359
|
}
|
|
2321
2360
|
else {
|
|
2322
2361
|
newObject[key] = jsonStringsToJsons(value);
|
|
@@ -3163,7 +3202,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
3163
3202
|
}).asPromise();
|
|
3164
3203
|
const { outputParameters } = result;
|
|
3165
3204
|
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
3166
|
-
const modelsRequirementsUnchecked =
|
|
3205
|
+
const modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
|
|
3167
3206
|
if (isVerbose) {
|
|
3168
3207
|
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
3169
3208
|
}
|
|
@@ -3609,7 +3648,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
|
|
|
3609
3648
|
> },
|
|
3610
3649
|
*/
|
|
3611
3650
|
async asJson() {
|
|
3612
|
-
return
|
|
3651
|
+
return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
|
|
3613
3652
|
},
|
|
3614
3653
|
async asText() {
|
|
3615
3654
|
return await tools.fs.readFile(filename, 'utf-8');
|
|
@@ -5296,13 +5335,79 @@ async function getExamplesForTask(task) {
|
|
|
5296
5335
|
/**
|
|
5297
5336
|
* @@@
|
|
5298
5337
|
*
|
|
5338
|
+
* Here is the place where RAG (retrieval-augmented generation) happens
|
|
5339
|
+
*
|
|
5299
5340
|
* @private internal utility of `createPipelineExecutor`
|
|
5300
5341
|
*/
|
|
5301
5342
|
async function getKnowledgeForTask(options) {
|
|
5302
|
-
const { preparedPipeline, task } = options;
|
|
5303
|
-
|
|
5343
|
+
const { tools, preparedPipeline, task } = options;
|
|
5344
|
+
const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
|
|
5345
|
+
const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
|
|
5346
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5347
|
+
if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
|
|
5348
|
+
return 'No knowledge pieces found';
|
|
5349
|
+
}
|
|
5350
|
+
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
5351
|
+
const _llms = arrayableToArray(tools.llm);
|
|
5352
|
+
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5353
|
+
const taskEmbeddingPrompt = {
|
|
5354
|
+
title: 'Knowledge Search',
|
|
5355
|
+
modelRequirements: {
|
|
5356
|
+
modelVariant: 'EMBEDDING',
|
|
5357
|
+
modelName: firstKnowlegeIndex.modelName,
|
|
5358
|
+
},
|
|
5359
|
+
content: task.content,
|
|
5360
|
+
parameters: {
|
|
5361
|
+
/* !!!!!!!! */
|
|
5362
|
+
},
|
|
5363
|
+
};
|
|
5364
|
+
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5365
|
+
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5366
|
+
const { index } = knowledgePiece;
|
|
5367
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
|
|
5368
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5369
|
+
if (knowledgePieceIndex === undefined) {
|
|
5370
|
+
return {
|
|
5371
|
+
content: knowledgePiece.content,
|
|
5372
|
+
relevance: 0,
|
|
5373
|
+
};
|
|
5374
|
+
}
|
|
5375
|
+
const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
|
|
5376
|
+
return {
|
|
5377
|
+
content: knowledgePiece.content,
|
|
5378
|
+
relevance,
|
|
5379
|
+
};
|
|
5380
|
+
});
|
|
5381
|
+
const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
|
|
5382
|
+
const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
|
|
5383
|
+
console.log('!!! Embedding', {
|
|
5384
|
+
task,
|
|
5385
|
+
taskEmbeddingPrompt,
|
|
5386
|
+
taskEmbeddingResult,
|
|
5387
|
+
firstKnowlegePiece,
|
|
5388
|
+
firstKnowlegeIndex,
|
|
5389
|
+
knowledgePiecesWithRelevance,
|
|
5390
|
+
knowledgePiecesSorted,
|
|
5391
|
+
knowledgePiecesLimited,
|
|
5392
|
+
});
|
|
5393
|
+
return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
|
|
5304
5394
|
// <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
|
|
5305
5395
|
}
|
|
5396
|
+
// TODO: !!!!!! Annotate + to new file
|
|
5397
|
+
function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
|
|
5398
|
+
if (embeddingVector1.length !== embeddingVector2.length) {
|
|
5399
|
+
throw new TypeError('Embedding vectors must have the same length');
|
|
5400
|
+
}
|
|
5401
|
+
const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
|
|
5402
|
+
const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
|
|
5403
|
+
const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
|
|
5404
|
+
return 1 - dotProduct / (magnitude1 * magnitude2);
|
|
5405
|
+
}
|
|
5406
|
+
/**
|
|
5407
|
+
* TODO: !!!! Verify if this is working
|
|
5408
|
+
* TODO: [♨] Implement Better - use keyword search
|
|
5409
|
+
* TODO: [♨] Examples of values
|
|
5410
|
+
*/
|
|
5306
5411
|
|
|
5307
5412
|
/**
|
|
5308
5413
|
* @@@
|
|
@@ -5310,9 +5415,9 @@ async function getKnowledgeForTask(options) {
|
|
|
5310
5415
|
* @private internal utility of `createPipelineExecutor`
|
|
5311
5416
|
*/
|
|
5312
5417
|
async function getReservedParametersForTask(options) {
|
|
5313
|
-
const { preparedPipeline, task, pipelineIdentification } = options;
|
|
5418
|
+
const { tools, preparedPipeline, task, pipelineIdentification } = options;
|
|
5314
5419
|
const context = await getContextForTask(); // <- [🏍]
|
|
5315
|
-
const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
|
|
5420
|
+
const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
|
|
5316
5421
|
const examples = await getExamplesForTask();
|
|
5317
5422
|
const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
|
|
5318
5423
|
const modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
@@ -5374,6 +5479,7 @@ async function executeTask(options) {
|
|
|
5374
5479
|
}
|
|
5375
5480
|
const definedParameters = Object.freeze({
|
|
5376
5481
|
...(await getReservedParametersForTask({
|
|
5482
|
+
tools,
|
|
5377
5483
|
preparedPipeline,
|
|
5378
5484
|
task: currentTask,
|
|
5379
5485
|
pipelineIdentification,
|